text
stringlengths 213
32.3k
|
---|
import avea # pylint: disable=import-error
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.util.color as color_util
SUPPORT_AVEA = SUPPORT_BRIGHTNESS | SUPPORT_COLOR
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Avea platform."""
try:
nearby_bulbs = avea.discover_avea_bulbs()
for bulb in nearby_bulbs:
bulb.get_name()
bulb.get_brightness()
except OSError as err:
raise PlatformNotReady from err
add_entities(AveaLight(bulb) for bulb in nearby_bulbs)
class AveaLight(LightEntity):
"""Representation of an Avea."""
def __init__(self, light):
"""Initialize an AveaLight."""
self._light = light
self._name = light.name
self._state = None
self._brightness = light.brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_AVEA
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def is_on(self):
"""Return true if light is on."""
return self._state
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
if not kwargs:
self._light.set_brightness(4095)
else:
if ATTR_BRIGHTNESS in kwargs:
bright = round((kwargs[ATTR_BRIGHTNESS] / 255) * 4095)
self._light.set_brightness(bright)
if ATTR_HS_COLOR in kwargs:
rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
self._light.set_rgb(rgb[0], rgb[1], rgb[2])
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._light.set_brightness(0)
def update(self):
"""Fetch new state data for this light.
This is the only method that should fetch new data for Home Assistant.
"""
brightness = self._light.get_brightness()
if brightness is not None:
if brightness == 0:
self._state = False
else:
self._state = True
self._brightness = round(255 * (brightness / 4095))
|
import asyncio
import logging
from bravia_tv.braviarc import NoIPControl
import voluptuous as vol
from homeassistant.components.media_player import (
DEVICE_CLASS_TV,
PLATFORM_SCHEMA,
MediaPlayerEntity,
)
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PIN, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json
from .const import (
ATTR_MANUFACTURER,
BRAVIA_CONFIG_FILE,
BRAVIARC,
CLIENTID_PREFIX,
CONF_IGNORED_SOURCES,
DEFAULT_NAME,
DOMAIN,
NICKNAME,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_BRAVIA = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
| SUPPORT_STOP
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Bravia TV platform."""
host = config[CONF_HOST]
bravia_config_file_path = hass.config.path(BRAVIA_CONFIG_FILE)
bravia_config = await hass.async_add_executor_job(
load_json, bravia_config_file_path
)
if not bravia_config:
_LOGGER.error(
"Configuration import failed, there is no bravia.conf file in the configuration folder"
)
return
while bravia_config:
# Import a configured TV
host_ip, host_config = bravia_config.popitem()
if host_ip == host:
pin = host_config[CONF_PIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: host, CONF_PIN: pin},
)
)
return
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add BraviaTV entities from a config_entry."""
ignored_sources = []
pin = config_entry.data[CONF_PIN]
unique_id = config_entry.unique_id
device_info = {
"identifiers": {(DOMAIN, unique_id)},
"name": DEFAULT_NAME,
"manufacturer": ATTR_MANUFACTURER,
"model": config_entry.title,
}
braviarc = hass.data[DOMAIN][config_entry.entry_id][BRAVIARC]
ignored_sources = config_entry.options.get(CONF_IGNORED_SOURCES, [])
async_add_entities(
[
BraviaTVDevice(
braviarc, DEFAULT_NAME, pin, unique_id, device_info, ignored_sources
)
]
)
class BraviaTVDevice(MediaPlayerEntity):
"""Representation of a Bravia TV."""
def __init__(self, client, name, pin, unique_id, device_info, ignored_sources):
"""Initialize the Bravia TV device."""
self._pin = pin
self._braviarc = client
self._name = name
self._state = STATE_OFF
self._muted = False
self._program_name = None
self._channel_name = None
self._channel_number = None
self._source = None
self._source_list = []
self._original_content_list = []
self._content_mapping = {}
self._duration = None
self._content_uri = None
self._playing = False
self._start_date_time = None
self._program_media_type = None
self._min_volume = None
self._max_volume = None
self._volume = None
self._unique_id = unique_id
self._device_info = device_info
self._ignored_sources = ignored_sources
self._state_lock = asyncio.Lock()
async def async_update(self):
"""Update TV info."""
if self._state_lock.locked():
return
power_status = await self.hass.async_add_executor_job(
self._braviarc.get_power_status
)
if power_status != "off":
connected = await self.hass.async_add_executor_job(
self._braviarc.is_connected
)
if not connected:
try:
connected = await self.hass.async_add_executor_job(
self._braviarc.connect, self._pin, CLIENTID_PREFIX, NICKNAME
)
except NoIPControl:
_LOGGER.error("IP Control is disabled in the TV settings")
if not connected:
power_status = "off"
if power_status == "active":
self._state = STATE_ON
if (
await self._async_refresh_volume()
and await self._async_refresh_channels()
):
await self._async_refresh_playing_info()
return
self._state = STATE_OFF
def _get_source(self):
"""Return the name of the source."""
for key, value in self._content_mapping.items():
if value == self._content_uri:
return key
async def _async_refresh_volume(self):
"""Refresh volume information."""
volume_info = await self.hass.async_add_executor_job(
self._braviarc.get_volume_info
)
if volume_info is not None:
self._volume = volume_info.get("volume")
self._min_volume = volume_info.get("minVolume")
self._max_volume = volume_info.get("maxVolume")
self._muted = volume_info.get("mute")
return True
return False
async def _async_refresh_channels(self):
"""Refresh source and channels list."""
if not self._source_list:
self._content_mapping = await self.hass.async_add_executor_job(
self._braviarc.load_source_list
)
self._source_list = []
if not self._content_mapping:
return False
for key in self._content_mapping:
if key not in self._ignored_sources:
self._source_list.append(key)
return True
async def _async_refresh_playing_info(self):
"""Refresh Playing information."""
playing_info = await self.hass.async_add_executor_job(
self._braviarc.get_playing_info
)
self._program_name = playing_info.get("programTitle")
self._channel_name = playing_info.get("title")
self._program_media_type = playing_info.get("programMediaType")
self._channel_number = playing_info.get("dispNum")
self._content_uri = playing_info.get("uri")
self._source = self._get_source()
self._duration = playing_info.get("durationSec")
self._start_date_time = playing_info.get("startDateTime")
if not playing_info:
self._channel_name = "App"
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_class(self):
"""Set the device class to TV."""
return DEVICE_CLASS_TV
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return self._device_info
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume is not None:
return self._volume / 100
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_BRAVIA
@property
def media_title(self):
"""Title of current playing media."""
return_value = None
if self._channel_name is not None:
return_value = self._channel_name
if self._program_name is not None:
return_value = f"{return_value}: {self._program_name}"
return return_value
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._channel_name
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self._duration
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._braviarc.set_volume_level(volume)
async def async_turn_on(self):
"""Turn the media player on."""
async with self._state_lock:
await self.hass.async_add_executor_job(self._braviarc.turn_on)
async def async_turn_off(self):
"""Turn off media player."""
async with self._state_lock:
await self.hass.async_add_executor_job(self._braviarc.turn_off)
def volume_up(self):
"""Volume up the media player."""
self._braviarc.volume_up()
def volume_down(self):
"""Volume down media player."""
self._braviarc.volume_down()
def mute_volume(self, mute):
"""Send mute command."""
self._braviarc.mute_volume(mute)
def select_source(self, source):
"""Set the input source."""
if source in self._content_mapping:
uri = self._content_mapping[source]
self._braviarc.play_content(uri)
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self._braviarc.media_play()
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self._braviarc.media_pause()
def media_stop(self):
"""Send media stop command to media player."""
self._playing = False
self._braviarc.media_stop()
def media_next_track(self):
"""Send next track command."""
self._braviarc.media_next_track()
def media_previous_track(self):
"""Send the previous track command."""
self._braviarc.media_previous_track()
|
import itertools
import warnings
from collections import Counter
from scattertext.ParsedCorpus import ParsedCorpus
class FeatsFromGensim(object):
def __init__(self, phrases, gram_size):
'''
Parameters
----------
phrases : list[gensim.models.Phrases]
gram_size : int, maximum number of words per phrase
kwargs : parameters for FeatsFromSpacyDoc.init
'''
from gensim.models import Phrases
phrases = phrases
gram_size = gram_size
assert type(phrases) == Phrases
self.gram_size = gram_size
self.phrases = phrases
def get_doc_metadata(self, doc):
return Counter()
def get_feats(self, doc):
'''
Parameters
----------
doc, Spacy Docs
Returns
-------
Counter (unigram, bigram) -> count
'''
ngram_counter = Counter()
for sent in doc.sents:
ngrams = self.phrases[str(sent)]
for subphrases in self.phrases[1:]:
ngrams = subphrases[str(sent)]
for ngram in ngrams:
ngram_counter[ngram] += 1
return ngram_counter
class GensimPhraseAdder(object):
def __init__(self, max_tokens_per_phrase=3, phrases=None):
'''
Parameters
----------
max_tokens_per_phrase: int, must be > 1. Default 3
phrases: Instance of Gensim phrases class, default None
'''
self.max_tokens_per_phrase = max_tokens_per_phrase
self.phrases = phrases
def add_phrases(self, corpus):
'''
Parameters
----------
corpus: Corpus for phrase augmentation
Returns
-------
New ParsedCorpus containing unigrams in corpus and new phrases
'''
from gensim.models import Phrases
assert isinstance(corpus, ParsedCorpus)
self.phrases = [Phrases(CorpusAdapterForGensim.get_sentences(corpus), delimiter=' ')]
for i in range(1, self.max_tokens_per_phrase):
self.phrases.append(Phrases(self.phrases[-1][CorpusAdapterForGensim.get_sentences(corpus)]))
return self
class CorpusAdapterForGensim(object):
@staticmethod
def get_token_format(token):
return token.lower_
@classmethod
def get_sentences(cls, corpus):
'''
Parameters
----------
corpus, ParsedCorpus
Returns
-------
iter: [sentence1word1, ...], [sentence2word1, ...]
'''
assert isinstance(corpus, ParsedCorpus)
return itertools.chain(*[[[cls.get_token_format(t)
for t in sent
if not t.is_punct]
for sent in doc.sents]
for doc in corpus.get_parsed_docs()])
class Word2VecDefault(object):
def _default_word2vec_model(self):
from gensim.models import word2vec
return word2vec.Word2Vec(size=100,
alpha=0.025,
window=5,
min_count=5,
max_vocab_size=None,
sample=0,
seed=1,
workers=1,
min_alpha=0.0001,
sg=1,
hs=1,
negative=0,
cbow_mean=0,
iter=1,
null_word=0,
trim_rule=None,
sorted_vocab=1)
class Word2VecFromParsedCorpus(Word2VecDefault):
def __init__(self, corpus, word2vec_model=None):
'''
Parameters
----------
corpus: ParsedCorpus
from which to build word2vec model
word2vec_model: word2vec.Word2Vec
Gensim instance to be used to train word2vec model
'''
try:
from gensim.models import word2vec
assert word2vec_model is None or isinstance(word2vec_model, word2vec.Word2Vec)
except:
warnings.warn("You should really install gensim, but we're going to duck-type your model and pray it works")
assert isinstance(corpus, ParsedCorpus)
self.corpus = corpus
self.model = self._get_word2vec_model(word2vec_model)
def train(self, epochs=2000, training_iterations=5):
'''
Parameters
----------
epochs : int
Number of epochs to train for. Default is 2000.
training_iterations : int
Number of times to repeat training process. Default is training_iterations.
Returns
-------
A trained word2vec model.
'''
self._scan_and_build_vocab()
for _ in range(training_iterations):
self.model.train(CorpusAdapterForGensim.get_sentences(self.corpus),
total_examples=self.model.corpus_count,
epochs=epochs)
return self.model
def _get_word2vec_model(self, word2vec_model):
return (self._default_word2vec_model()
if word2vec_model is None
else word2vec_model)
def _scan_and_build_vocab(self):
try:
self.model.scan_vocab(CorpusAdapterForGensim.get_sentences(self.corpus))
except:
pass
self.model.build_vocab(CorpusAdapterForGensim.get_sentences(self.corpus))
class Word2VecFromParsedCorpusBigrams(Word2VecFromParsedCorpus):
def _scan_and_build_vocab(self):
from gensim.models import Phrases
bigram_transformer = Phrases(CorpusAdapterForGensim.get_sentences(self.corpus))
try:
self.model.scan_vocab(CorpusAdapterForGensim.get_sentences(self.corpus))
except:
pass
self.model.build_vocab(bigram_transformer[CorpusAdapterForGensim.get_sentences(self.corpus)])
|
from weblate.machinery.base import MachineTranslation
class DummyTranslation(MachineTranslation):
"""Dummy machine translation for testing purposes."""
name = "Dummy"
def download_languages(self):
"""Dummy translation supports just Czech language."""
return ("en", "cs")
def download_translations(
self,
source,
language,
text: str,
unit,
user,
search: bool,
threshold: int = 75,
):
"""Dummy translation supports just single phrase."""
if source == "en" and text.strip() == "Hello, world!":
yield {
"text": "Nazdar světe!",
"quality": self.max_score,
"service": "Dummy",
"source": text,
}
yield {
"text": "Ahoj světe!",
"quality": self.max_score,
"service": "Dummy",
"source": text,
}
if source == "en" and text.strip() == "Hello, [7]!":
yield {
"text": "Nazdar [7]!",
"quality": self.max_score,
"service": "Dummy",
"source": text,
}
|
import functools
import itertools
import warnings
import numpy as np
from ..core.formatting import format_item
from .utils import (
_infer_xy_labels,
_process_cmap_cbar_kwargs,
import_matplotlib_pyplot,
label_from_attrs,
)
# Overrides axes.labelsize, xtick.major.size, ytick.major.size
# from mpl.rcParams
_FONTSIZE = "small"
# For major ticks on x, y axes
_NTICKS = 5
def _nicetitle(coord, value, maxchar, template):
"""
Put coord, value in template and truncate at maxchar
"""
prettyvalue = format_item(value, quote_strings=False)
title = template.format(coord=coord, value=prettyvalue)
if len(title) > maxchar:
title = title[: (maxchar - 3)] + "..."
return title
class FacetGrid:
"""
Initialize the matplotlib figure and FacetGrid object.
The :class:`FacetGrid` is an object that links a xarray DataArray to
a matplotlib figure with a particular structure.
In particular, :class:`FacetGrid` is used to draw plots with multiple
Axes where each Axes shows the same relationship conditioned on
different levels of some dimension. It's possible to condition on up to
two variables by assigning variables to the rows and columns of the
grid.
The general approach to plotting here is called "small multiples",
where the same kind of plot is repeated multiple times, and the
specific use of small multiples to display the same relationship
conditioned on one ore more other variables is often called a "trellis
plot".
The basic workflow is to initialize the :class:`FacetGrid` object with
the DataArray and the variable names that are used to structure the grid.
Then plotting functions can be applied to each subset by calling
:meth:`FacetGrid.map_dataarray` or :meth:`FacetGrid.map`.
Attributes
----------
axes : numpy object array
Contains axes in corresponding position, as returned from
plt.subplots
col_labels : list
list of :class:`matplotlib.text.Text` instances corresponding to column titles.
row_labels : list
list of :class:`matplotlib.text.Text` instances corresponding to row titles.
fig : matplotlib.Figure
The figure containing all the axes
name_dicts : numpy object array
Contains dictionaries mapping coordinate names to values. None is
used as a sentinel value for axes which should remain empty, ie.
sometimes the bottom right grid
"""
def __init__(
self,
data,
col=None,
row=None,
col_wrap=None,
sharex=True,
sharey=True,
figsize=None,
aspect=1,
size=3,
subplot_kws=None,
):
"""
Parameters
----------
data : DataArray
xarray DataArray to be plotted
row, col : strings
Dimesion names that define subsets of the data, which will be drawn
on separate facets in the grid.
col_wrap : int, optional
"Wrap" the column variable at this width, so that the column facets
sharex : bool, optional
If true, the facets will share x axes
sharey : bool, optional
If true, the facets will share y axes
figsize : tuple, optional
A tuple (width, height) of the figure in inches.
If set, overrides ``size`` and ``aspect``.
aspect : scalar, optional
Aspect ratio of each facet, so that ``aspect * size`` gives the
width of each facet in inches
size : scalar, optional
Height (in inches) of each facet. See also: ``aspect``
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots
"""
plt = import_matplotlib_pyplot()
# Handle corner case of nonunique coordinates
rep_col = col is not None and not data[col].to_index().is_unique
rep_row = row is not None and not data[row].to_index().is_unique
if rep_col or rep_row:
raise ValueError(
"Coordinates used for faceting cannot "
"contain repeated (nonunique) values."
)
# single_group is the grouping variable, if there is exactly one
if col and row:
single_group = False
nrow = len(data[row])
ncol = len(data[col])
nfacet = nrow * ncol
if col_wrap is not None:
warnings.warn("Ignoring col_wrap since both col and row were passed")
elif row and not col:
single_group = row
elif not row and col:
single_group = col
else:
raise ValueError("Pass a coordinate name as an argument for row or col")
# Compute grid shape
if single_group:
nfacet = len(data[single_group])
if col:
# idea - could add heuristic for nice shapes like 3x4
ncol = nfacet
if row:
ncol = 1
if col_wrap is not None:
# Overrides previous settings
ncol = col_wrap
nrow = int(np.ceil(nfacet / ncol))
# Set the subplot kwargs
subplot_kws = {} if subplot_kws is None else subplot_kws
if figsize is None:
# Calculate the base figure size with extra horizontal space for a
# colorbar
cbar_space = 1
figsize = (ncol * size * aspect + cbar_space, nrow * size)
fig, axes = plt.subplots(
nrow,
ncol,
sharex=sharex,
sharey=sharey,
squeeze=False,
figsize=figsize,
subplot_kw=subplot_kws,
)
# Set up the lists of names for the row and column facet variables
col_names = list(data[col].values) if col else []
row_names = list(data[row].values) if row else []
if single_group:
full = [{single_group: x} for x in data[single_group].values]
empty = [None for x in range(nrow * ncol - len(full))]
name_dicts = full + empty
else:
rowcols = itertools.product(row_names, col_names)
name_dicts = [{row: r, col: c} for r, c in rowcols]
name_dicts = np.array(name_dicts).reshape(nrow, ncol)
# Set up the class attributes
# ---------------------------
# First the public API
self.data = data
self.name_dicts = name_dicts
self.fig = fig
self.axes = axes
self.row_names = row_names
self.col_names = col_names
self.figlegend = None
# Next the private variables
self._single_group = single_group
self._nrow = nrow
self._row_var = row
self._ncol = ncol
self._col_var = col
self._col_wrap = col_wrap
self.row_labels = [None] * nrow
self.col_labels = [None] * ncol
self._x_var = None
self._y_var = None
self._cmap_extend = None
self._mappables = []
self._finalized = False
@property
def _left_axes(self):
return self.axes[:, 0]
@property
def _bottom_axes(self):
return self.axes[-1, :]
def map_dataarray(self, func, x, y, **kwargs):
"""
Apply a plotting function to a 2d facet's subset of the data.
This is more convenient and less general than ``FacetGrid.map``
Parameters
----------
func : callable
A plotting function with the same signature as a 2d xarray
plotting method such as `xarray.plot.imshow`
x, y : string
Names of the coordinates to plot on x, y axes
kwargs :
additional keyword arguments to func
Returns
-------
self : FacetGrid object
"""
if kwargs.get("cbar_ax", None) is not None:
raise ValueError("cbar_ax not supported by FacetGrid.")
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func, self.data.values, **kwargs
)
self._cmap_extend = cmap_params.get("extend")
# Order is important
func_kwargs = {
k: v
for k, v in kwargs.items()
if k not in {"cmap", "colors", "cbar_kwargs", "levels"}
}
func_kwargs.update(cmap_params)
func_kwargs.update({"add_colorbar": False, "add_labels": False})
# Get x, y labels for the first subplot
x, y = _infer_xy_labels(
darray=self.data.loc[self.name_dicts.flat[0]],
x=x,
y=y,
imshow=func.__name__ == "imshow",
rgb=kwargs.get("rgb", None),
)
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(
subset, x=x, y=y, ax=ax, **func_kwargs, _is_facetgrid=True
)
self._mappables.append(mappable)
self._finalize_grid(x, y)
if kwargs.get("add_colorbar", True):
self.add_colorbar(**cbar_kwargs)
return self
def map_dataarray_line(
self, func, x, y, hue, add_legend=True, _labels=None, **kwargs
):
from .plot import _infer_line_data
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
mappable = func(
subset,
x=x,
y=y,
ax=ax,
hue=hue,
add_legend=False,
_labels=False,
**kwargs,
)
self._mappables.append(mappable)
_, _, hueplt, xlabel, ylabel, huelabel = _infer_line_data(
darray=self.data.loc[self.name_dicts.flat[0]], x=x, y=y, hue=hue
)
self._hue_var = hueplt
self._hue_label = huelabel
self._finalize_grid(xlabel, ylabel)
if add_legend and hueplt is not None and huelabel is not None:
self.add_legend()
return self
def map_dataset(
self, func, x=None, y=None, hue=None, hue_style=None, add_guide=None, **kwargs
):
from .dataset_plot import _infer_meta_data, _parse_size
kwargs["add_guide"] = False
kwargs["_is_facetgrid"] = True
if kwargs.get("markersize", None):
kwargs["size_mapping"] = _parse_size(
self.data[kwargs["markersize"]], kwargs.pop("size_norm", None)
)
meta_data = _infer_meta_data(self.data, x, y, hue, hue_style, add_guide)
kwargs["meta_data"] = meta_data
if hue and meta_data["hue_style"] == "continuous":
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
func, self.data[hue].values, **kwargs
)
kwargs["meta_data"]["cmap_params"] = cmap_params
kwargs["meta_data"]["cbar_kwargs"] = cbar_kwargs
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# None is the sentinel value
if d is not None:
subset = self.data.loc[d]
maybe_mappable = func(
ds=subset, x=x, y=y, hue=hue, hue_style=hue_style, ax=ax, **kwargs
)
# TODO: this is needed to get legends to work.
# but maybe_mappable is a list in that case :/
self._mappables.append(maybe_mappable)
self._finalize_grid(meta_data["xlabel"], meta_data["ylabel"])
if hue:
self._hue_label = meta_data.pop("hue_label", None)
if meta_data["add_legend"]:
self._hue_var = meta_data["hue"]
self.add_legend()
elif meta_data["add_colorbar"]:
self.add_colorbar(label=self._hue_label, **cbar_kwargs)
return self
def _finalize_grid(self, *axlabels):
"""Finalize the annotations and layout."""
if not self._finalized:
self.set_axis_labels(*axlabels)
self.set_titles()
self.fig.tight_layout()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is None:
ax.set_visible(False)
self._finalized = True
def add_legend(self, **kwargs):
figlegend = self.fig.legend(
handles=self._mappables[-1],
labels=list(self._hue_var.values),
title=self._hue_label,
loc="center right",
**kwargs,
)
self.figlegend = figlegend
# Draw the plot to set the bounding boxes correctly
self.fig.draw(self.fig.canvas.get_renderer())
# Calculate and set the new width of the figure so the legend fits
legend_width = figlegend.get_window_extent().width / self.fig.dpi
figure_width = self.fig.get_figwidth()
self.fig.set_figwidth(figure_width + legend_width)
# Draw the plot again to get the new transformations
self.fig.draw(self.fig.canvas.get_renderer())
# Now calculate how much space we need on the right side
legend_width = figlegend.get_window_extent().width / self.fig.dpi
space_needed = legend_width / (figure_width + legend_width) + 0.02
# margin = .01
# _space_needed = margin + space_needed
right = 1 - space_needed
# Place the subplot axes to give space for the legend
self.fig.subplots_adjust(right=right)
def add_colorbar(self, **kwargs):
"""Draw a colorbar"""
kwargs = kwargs.copy()
if self._cmap_extend is not None:
kwargs.setdefault("extend", self._cmap_extend)
# dont pass extend as kwarg if it is in the mappable
if hasattr(self._mappables[-1], "extend"):
kwargs.pop("extend", None)
if "label" not in kwargs:
kwargs.setdefault("label", label_from_attrs(self.data))
self.cbar = self.fig.colorbar(
self._mappables[-1], ax=list(self.axes.flat), **kwargs
)
return self
def set_axis_labels(self, x_var=None, y_var=None):
"""Set axis labels on the left column and bottom row of the grid."""
if x_var is not None:
if x_var in self.data.coords:
self._x_var = x_var
self.set_xlabels(label_from_attrs(self.data[x_var]))
else:
# x_var is a string
self.set_xlabels(x_var)
if y_var is not None:
if y_var in self.data.coords:
self._y_var = y_var
self.set_ylabels(label_from_attrs(self.data[y_var]))
else:
self.set_ylabels(y_var)
return self
def set_xlabels(self, label=None, **kwargs):
"""Label the x axis on the bottom row of the grid."""
if label is None:
label = label_from_attrs(self.data[self._x_var])
for ax in self._bottom_axes:
ax.set_xlabel(label, **kwargs)
return self
def set_ylabels(self, label=None, **kwargs):
"""Label the y axis on the left column of the grid."""
if label is None:
label = label_from_attrs(self.data[self._y_var])
for ax in self._left_axes:
ax.set_ylabel(label, **kwargs)
return self
def set_titles(self, template="{coord} = {value}", maxchar=30, size=None, **kwargs):
"""
Draw titles either above each facet or on the grid margins.
Parameters
----------
template : string
Template for plot titles containing {coord} and {value}
maxchar : int
Truncate titles at maxchar
kwargs : keyword args
additional arguments to matplotlib.text
Returns
-------
self: FacetGrid object
"""
import matplotlib as mpl
if size is None:
size = mpl.rcParams["axes.labelsize"]
nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template)
if self._single_group:
for d, ax in zip(self.name_dicts.flat, self.axes.flat):
# Only label the ones with data
if d is not None:
coord, value = list(d.items()).pop()
title = nicetitle(coord, value, maxchar=maxchar)
ax.set_title(title, size=size, **kwargs)
else:
# The row titles on the right edge of the grid
for index, (ax, row_name, handle) in enumerate(
zip(self.axes[:, -1], self.row_names, self.row_labels)
):
title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar)
if not handle:
self.row_labels[index] = ax.annotate(
title,
xy=(1.02, 0.5),
xycoords="axes fraction",
rotation=270,
ha="left",
va="center",
**kwargs,
)
else:
handle.set_text(title)
# The column titles on the top row
for index, (ax, col_name, handle) in enumerate(
zip(self.axes[0, :], self.col_names, self.col_labels)
):
title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar)
if not handle:
self.col_labels[index] = ax.set_title(title, size=size, **kwargs)
else:
handle.set_text(title)
return self
def set_ticks(self, max_xticks=_NTICKS, max_yticks=_NTICKS, fontsize=_FONTSIZE):
"""
Set and control tick behavior
Parameters
----------
max_xticks, max_yticks : int, optional
Maximum number of labeled ticks to plot on x, y axes
fontsize : string or int
Font size as used by matplotlib text
Returns
-------
self : FacetGrid object
"""
from matplotlib.ticker import MaxNLocator
# Both are necessary
x_major_locator = MaxNLocator(nbins=max_xticks)
y_major_locator = MaxNLocator(nbins=max_yticks)
for ax in self.axes.flat:
ax.xaxis.set_major_locator(x_major_locator)
ax.yaxis.set_major_locator(y_major_locator)
for tick in itertools.chain(
ax.xaxis.get_major_ticks(), ax.yaxis.get_major_ticks()
):
tick.label1.set_fontsize(fontsize)
return self
def map(self, func, *args, **kwargs):
"""
Apply a plotting function to each facet's subset of the data.
Parameters
----------
func : callable
A plotting function that takes data and keyword arguments. It
must plot to the currently active matplotlib Axes and take a
`color` keyword argument. If faceting on the `hue` dimension,
it must also take a `label` keyword argument.
args : strings
Column names in self.data that identify variables with data to
plot. The data for each variable is passed to `func` in the
order the variables are specified in the call.
kwargs : keyword arguments
All keyword arguments are passed to the plotting function.
Returns
-------
self : FacetGrid object
"""
plt = import_matplotlib_pyplot()
for ax, namedict in zip(self.axes.flat, self.name_dicts.flat):
if namedict is not None:
data = self.data.loc[namedict]
plt.sca(ax)
innerargs = [data[a].values for a in args]
maybe_mappable = func(*innerargs, **kwargs)
# TODO: better way to verify that an artist is mappable?
# https://stackoverflow.com/questions/33023036/is-it-possible-to-detect-if-a-matplotlib-artist-is-a-mappable-suitable-for-use-w#33023522
if maybe_mappable and hasattr(maybe_mappable, "autoscale_None"):
self._mappables.append(maybe_mappable)
self._finalize_grid(*args[:2])
return self
def _easy_facetgrid(
data,
plotfunc,
kind,
x=None,
y=None,
row=None,
col=None,
col_wrap=None,
sharex=True,
sharey=True,
aspect=None,
size=None,
subplot_kws=None,
ax=None,
figsize=None,
**kwargs,
):
"""
Convenience method to call xarray.plot.FacetGrid from 2d plotting methods
kwargs are the arguments to 2d plotting method
"""
if ax is not None:
raise ValueError("Can't use axes when making faceted plots.")
if aspect is None:
aspect = 1
if size is None:
size = 3
elif figsize is not None:
raise ValueError("cannot provide both `figsize` and `size` arguments")
g = FacetGrid(
data=data,
col=col,
row=row,
col_wrap=col_wrap,
sharex=sharex,
sharey=sharey,
figsize=figsize,
aspect=aspect,
size=size,
subplot_kws=subplot_kws,
)
if kind == "line":
return g.map_dataarray_line(plotfunc, x, y, **kwargs)
if kind == "dataarray":
return g.map_dataarray(plotfunc, x, y, **kwargs)
if kind == "dataset":
return g.map_dataset(plotfunc, x, y, **kwargs)
|
from pathlib import Path
from typing import Final
from redbot.core.i18n import Translator
_ = Translator("Audio", Path(__file__))
class Equalizer:
def __init__(self):
self.band_count: Final[int] = 15
self.bands = [0.0 for _loop_counter in range(self.band_count)]
def set_gain(self, band: int, gain: float):
if band < 0 or band >= self.band_count:
raise IndexError(f"Band {band} does not exist!")
gain = min(max(gain, -0.25), 1.0)
self.bands[band] = gain
def get_gain(self, band: int):
if band < 0 or band >= self.band_count:
raise IndexError(f"Band {band} does not exist!")
return self.bands[band]
def visualise(self):
block = ""
bands = [str(band + 1).zfill(2) for band in range(self.band_count)]
bottom = (" " * 8) + " ".join(bands)
gains = [1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0, -0.1, -0.2, -0.25]
for gain in gains:
prefix = ""
if gain > 0:
prefix = "+"
elif gain == 0:
prefix = " "
block += f"{prefix}{gain:.2f} | "
for value in self.bands:
if value >= gain:
block += "[] "
else:
block += " "
block += "\n"
block += bottom
return block
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import time
from tensornetwork.backends import backend_factory
from tensornetwork.matrixproductstates.finite_mps import FiniteMPS
from tensornetwork.ncon_interface import ncon
@pytest.fixture(
name="backend_dtype_values",
params=[('numpy', np.float64), ('numpy', np.complex128),
('tensorflow', np.float64), ('tensorflow', np.complex128),
('pytorch', np.float64), ('jax', np.float64)])
def backend_dtype(request):
return request.param
def get_random_np(shape, dtype, seed=0):
np.random.seed(seed) #get the same tensors every time you call this function
if dtype is np.complex64:
return np.random.randn(*shape).astype(
np.float32) + 1j * np.random.randn(*shape).astype(np.float32)
if dtype is np.complex128:
return np.random.randn(*shape).astype(
np.float64) + 1j * np.random.randn(*shape).astype(np.float64)
return np.random.randn(*shape).astype(dtype)
@pytest.mark.parametrize("N, pos", [(10, -1), (10, 10)])
def test_finite_mps_init_invalid_position_raises_value_error(backend, N, pos):
D, d = 10, 2
tensors = [np.random.randn(1, d, D)] + [
np.random.randn(D, d, D) for _ in range(N - 2)
] + [np.random.randn(D, d, 1)]
with pytest.raises(ValueError):
FiniteMPS(tensors, center_position=pos, backend=backend)
@pytest.mark.parametrize("N, pos", [(10, 0), (10, 9), (10, 5)])
def test_finite_mps_init(backend, N, pos):
D, d = 10, 2
tensors = [np.random.randn(1, d, D)] + [
np.random.randn(D, d, D) for _ in range(N - 2)
] + [np.random.randn(D, d, 1)]
mps = FiniteMPS(tensors, center_position=pos, backend=backend)
assert mps.center_position == pos
def test_canonical_finite_mps(backend_dtype_values):
backend = backend_dtype_values[0]
dtype = backend_dtype_values[1]
D, d, N = 10, 2, 10
tensors = [get_random_np((1, d, D), dtype)] + [
get_random_np((D, d, D), dtype) for _ in range(N - 2)
] + [get_random_np((D, d, 1), dtype)]
mps = FiniteMPS(
tensors, center_position=N//2, backend=backend, canonicalize=True)
mps.center_position += 1
assert abs(mps.check_canonical()) > 1E-12
mps.canonicalize()
assert abs(mps.check_canonical()) < 1E-12
def test_local_measurement_finite_mps(backend_dtype_values):
backend = backend_dtype_values[0]
dtype = backend_dtype_values[1]
D, d, N = 1, 2, 10
tensors_1 = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps_1 = FiniteMPS(tensors_1, center_position=0, backend=backend)
tensors_2 = [np.zeros((1, d, D), dtype=dtype)] + [
np.zeros((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.zeros((D, d, 1), dtype=dtype)]
for t in tensors_2:
t[0, 0, 0] = 1
mps_2 = FiniteMPS(tensors_2, center_position=0, backend=backend)
sz = np.diag([0.5, -0.5]).astype(dtype)
result_1 = np.array(mps_1.measure_local_operator([sz] * N, range(N)))
result_2 = np.array(mps_2.measure_local_operator([sz] * N, range(N)))
np.testing.assert_almost_equal(result_1, np.zeros(N))
np.testing.assert_allclose(result_2, np.ones(N) * 0.5)
@pytest.mark.parametrize("N1", [0, 5, 9])
def test_correlation_measurement_finite_mps(backend_dtype_values, N1):
backend = backend_dtype_values[0]
dtype = backend_dtype_values[1]
D, d, N = 1, 2, 10
tensors_1 = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps_1 = FiniteMPS(tensors_1, center_position=0, backend=backend)
mps_1.position(N - 1)
mps_1.position(0)
tensors_2 = [np.zeros((1, d, D), dtype=dtype)] + [
np.zeros((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.zeros((D, d, 1), dtype=dtype)]
for t in tensors_2:
t[0, 0, 0] = 1
mps_2 = FiniteMPS(tensors_2, center_position=0, backend=backend)
mps_2.position(N - 1)
mps_2.position(0)
sz = np.diag([0.5, -0.5]).astype(dtype)
result_1 = np.array(mps_1.measure_two_body_correlator(sz, sz, N1, range(N)))
result_2 = np.array(mps_2.measure_two_body_correlator(sz, sz, N1, range(N)))
actual = np.zeros(N)
actual[N1] = 0.25
np.testing.assert_almost_equal(result_1, actual)
np.testing.assert_allclose(result_2, np.ones(N) * 0.25)
def test_left_envs_one_site(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.left_envs(sites=[2])
assert list(envs.keys()) == [2]
expected = backend.convert_to_tensor(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]))
np.testing.assert_array_almost_equal(envs[2], expected)
def test_left_envs_one_site_center_position_to_right(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=4, backend=backend_dtype_values[0])
envs = mps.left_envs(sites=[2])
assert list(envs.keys()) == [2]
np.testing.assert_array_almost_equal(envs[2], np.eye(3))
def test_left_envs_first_site(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.left_envs(sites=[0])
assert list(envs.keys()) == [0]
expected = 1.
np.testing.assert_array_almost_equal(envs[0], expected)
def test_left_envs_last_site(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.left_envs(sites=[5])
assert list(envs.keys()) == [5]
expected = 1.
np.testing.assert_array_almost_equal(envs[5], expected)
def test_left_envs_two_sites(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.left_envs(sites=[2, 3])
assert list(envs.keys()) == [2, 3]
expected = backend.convert_to_tensor(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]))
np.testing.assert_array_almost_equal(envs[2], expected)
np.testing.assert_array_almost_equal(envs[3], expected)
def test_left_envs_two_non_consecutive_sites(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(
tensors,
center_position=None,
backend=backend_dtype_values[0],
canonicalize=False)
l = backend.convert_to_tensor(np.ones((1, 1), dtype=dtype))
exp = {}
for n, t in enumerate(mps.tensors):
if n in [1, 3]:
exp[n] = l
l = ncon([t, l, t], [[1, 2, -1], [1, 3], [3, 2, -2]], backend=backend)
envs = mps.left_envs(sites=[1, 3])
assert list(envs.keys()) == [1, 3]
for n in [1, 3]:
expected = exp[n]
actual = envs[n]
np.testing.assert_array_almost_equal(expected, actual)
def test_left_envs_two_non_consecutive_sites_2(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=4, backend=backend_dtype_values[0])
envs = mps.left_envs(sites=[1, 3])
assert list(envs.keys()) == [1, 3]
np.testing.assert_array_almost_equal(envs[1], np.eye(2))
np.testing.assert_array_almost_equal(envs[3], np.eye(3))
def test_left_envs_all_sites(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(
tensors,
center_position=N//2,
backend=backend_dtype_values[0],
canonicalize=True)
l = backend.convert_to_tensor(np.ones((1, 1), dtype=dtype))
exp = {}
for n, t in enumerate(mps.tensors):
exp[n] = l
l = ncon([t, l, t], [[1, 2, -1], [1, 3], [3, 2, -2]], backend=backend)
envs = mps.left_envs(sites=range(N))
assert list(envs.keys()) == list(range(N))
for n in range(N):
expected = exp[n]
actual = envs[n]
np.testing.assert_array_almost_equal(expected, actual)
def test_left_envs_all_sites_non_0_center_position(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=2, backend=backend_dtype_values[0])
envs = mps.left_envs(sites=[0, 1, 2, 3, 4, 5])
assert list(envs.keys()) == [0, 1, 2, 3, 4, 5]
expected = backend.convert_to_tensor(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]))
np.testing.assert_array_almost_equal(envs[0], 1.)
np.testing.assert_array_almost_equal(envs[3], expected)
def test_left_envs_empty_seq(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 1, 2, 10
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
assert mps.left_envs(()) == {}
assert mps.left_envs([]) == {}
assert mps.left_envs(range(0)) == {}
def test_left_envs_invalid_sites_raises_error(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 1, 2, 10
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
with pytest.raises(ValueError):
mps.left_envs(sites=[0, N + 1])
with pytest.raises(ValueError):
mps.left_envs(sites=[-1, N - 1])
def test_right_envs_one_site(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[2])
assert list(envs.keys()) == [2]
np.testing.assert_array_almost_equal(envs[2], np.eye(3))
def test_right_envs_one_site_center_position_to_right(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=4, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[2])
assert list(envs.keys()) == [2]
expected = backend.convert_to_tensor(
np.array([[1, 0, 0], [0, 0, 0], [0, 0, 0]]))
np.testing.assert_array_almost_equal(envs[2], expected)
def test_right_envs_first_site(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[-1])
assert list(envs.keys()) == [-1]
expected = 1.
np.testing.assert_array_almost_equal(envs[-1], expected)
def test_right_envs_last_site(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[4])
assert list(envs.keys()) == [4]
expected = 1.
np.testing.assert_array_almost_equal(envs[4], expected)
def test_right_envs_two_sites(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[2, 3])
assert list(envs.keys()) == [2, 3]
np.testing.assert_array_almost_equal(envs[2], np.eye(3))
np.testing.assert_array_almost_equal(envs[3], np.eye(2))
def test_right_envs_two_non_consecutive_sites(backend_dtype_values):
dtype = backend_dtype_values[1]
backend = backend_factory.get_backend(backend_dtype_values[0])
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(
tensors,
center_position=None,
backend=backend_dtype_values[0],
canonicalize=False)
r = backend.convert_to_tensor(np.ones((1, 1), dtype=dtype))
exp = {}
for n in reversed(range(N)):
t = mps.tensors[n]
if n in [1, 3]:
exp[n] = r
r = ncon([t, r, t], [[-1, 2, 1], [1, 3], [-2, 2, 3]], backend=backend)
envs = mps.right_envs(sites=[1, 3])
assert set(envs.keys()) == {3, 1}
for n in [1, 3]:
expected = exp[n]
actual = envs[n]
np.testing.assert_array_almost_equal(expected, actual)
def test_right_envs_two_non_consecutive_sites_2(backend_dtype_values):
backend = backend_factory.get_backend(backend_dtype_values[0])
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=1, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[1, 3])
assert set(envs.keys()) == {1, 3}
exp1 = backend.convert_to_tensor(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))
exp3 = backend.convert_to_tensor(np.array([[1, 0], [0, 1]]))
np.testing.assert_array_almost_equal(envs[1], exp1)
np.testing.assert_array_almost_equal(envs[3], exp3)
def test_right_envs_all_sites(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[-1, 0, 1, 2, 3, 4])
assert set(envs.keys()) == {-1, 0, 1, 2, 3, 4}
np.testing.assert_array_almost_equal(envs[-1], 1.)
np.testing.assert_array_almost_equal(envs[2], np.eye(3))
def test_right_envs_all_sites_non_0_center_position(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 3, 2, 5
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=2, backend=backend_dtype_values[0])
envs = mps.right_envs(sites=[-1, 0, 1, 2, 3, 4])
assert set(envs.keys()) == {-1, 0, 1, 2, 3, 4}
np.testing.assert_array_almost_equal(envs[-1], 1.)
np.testing.assert_array_almost_equal(envs[2], np.eye(3))
def test_right_envs_empty_seq(backend_dtype_values):
dtype = backend_dtype_values[1]
D, d, N = 1, 2, 10
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend_dtype_values[0])
assert mps.right_envs(()) == {}
assert mps.right_envs([]) == {}
assert mps.right_envs(range(0)) == {}
def test_right_envs_invalid_sites_raises_error(backend_dtype_values):
backend = backend_dtype_values[0]
dtype = backend_dtype_values[1]
D, d, N = 1, 2, 10
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend)
with pytest.raises(ValueError):
mps.right_envs(sites=[-1, N])
with pytest.raises(ValueError):
mps.right_envs(sites=[-2, N - 1])
def test_random_mps(backend_dtype_values):
mps = FiniteMPS.random(
d=[3, 4, 5],
D=[2, 3],
dtype=backend_dtype_values[1],
backend=backend_dtype_values[0])
assert len(mps) == 3
assert mps.physical_dimensions == [3, 4, 5]
assert mps.bond_dimensions == [1, 2, 3, 1]
def test_random_mps_invalid_dimensions_raises_error(backend_dtype_values):
with pytest.raises(ValueError):
FiniteMPS.random(
d=[3, 4],
D=[2, 3],
dtype=backend_dtype_values[1],
backend=backend_dtype_values[0])
with pytest.raises(ValueError):
FiniteMPS.random(
d=[3, 4, 4, 2],
D=[2, 3],
dtype=backend_dtype_values[1],
backend=backend_dtype_values[0])
def test_save_not_implemented(backend_dtype_values):
backend = backend_dtype_values[0]
dtype = backend_dtype_values[1]
D, d, N = 1, 2, 10
tensors = [np.ones((1, d, D), dtype=dtype)] + [
np.ones((D, d, D), dtype=dtype) for _ in range(N - 2)
] + [np.ones((D, d, 1), dtype=dtype)]
mps = FiniteMPS(tensors, center_position=0, backend=backend)
with pytest.raises(NotImplementedError):
mps.save('tmp')
def test_check_canonical_raises(backend):
N, D, d = 10, 10, 2
tensors = [np.random.randn(1, d, D)] + [
np.random.randn(D, d, D) for _ in range(N - 2)
] + [np.random.randn(D, d, 1)]
mps = FiniteMPS(
tensors, center_position=None, canonicalize=False, backend=backend)
with pytest.raises(
ValueError,
match="FiniteMPS.center_positions is `None`. "
"Cannot check canonical form."):
mps.check_canonical()
|
from datetime import datetime
from typing import cast
import discord
from redbot.core import commands, i18n, checks
from redbot.core.utils.common_filters import (
filter_invites,
filter_various_mentions,
escape_spoilers_and_mass_mentions,
)
from redbot.core.utils.mod import get_audit_reason
from .abc import MixinMeta
_ = i18n.Translator("Mod", __file__)
class ModInfo(MixinMeta):
"""
Commands regarding names, userinfo, etc.
"""
async def get_names_and_nicks(self, user):
names = await self.config.user(user).past_names()
nicks = await self.config.member(user).past_nicks()
if names:
names = [escape_spoilers_and_mass_mentions(name) for name in names if name]
if nicks:
nicks = [escape_spoilers_and_mass_mentions(nick) for nick in nicks if nick]
return names, nicks
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(manage_nicknames=True)
@checks.admin_or_permissions(manage_nicknames=True)
async def rename(self, ctx: commands.Context, user: discord.Member, *, nickname: str = ""):
"""Change a user's nickname.
Leaving the nickname empty will remove it.
"""
nickname = nickname.strip()
me = cast(discord.Member, ctx.me)
if not nickname:
nickname = None
elif not 2 <= len(nickname) <= 32:
await ctx.send(_("Nicknames must be between 2 and 32 characters long."))
return
if not (
(me.guild_permissions.manage_nicknames or me.guild_permissions.administrator)
and me.top_role > user.top_role
and user != ctx.guild.owner
):
await ctx.send(
_(
"I do not have permission to rename that member. They may be higher than or "
"equal to me in the role hierarchy."
)
)
else:
try:
await user.edit(reason=get_audit_reason(ctx.author, None), nick=nickname)
except discord.Forbidden:
# Just in case we missed something in the permissions check above
await ctx.send(_("I do not have permission to rename that member."))
except discord.HTTPException as exc:
if exc.status == 400: # BAD REQUEST
await ctx.send(_("That nickname is invalid."))
else:
await ctx.send(_("An unexpected error has occured."))
else:
await ctx.send(_("Done."))
def handle_custom(self, user):
a = [c for c in user.activities if c.type == discord.ActivityType.custom]
if not a:
return None, discord.ActivityType.custom
a = a[0]
c_status = None
if not a.name and not a.emoji:
return None, discord.ActivityType.custom
elif a.name and a.emoji:
c_status = _("Custom: {emoji} {name}").format(emoji=a.emoji, name=a.name)
elif a.emoji:
c_status = _("Custom: {emoji}").format(emoji=a.emoji)
elif a.name:
c_status = _("Custom: {name}").format(name=a.name)
return c_status, discord.ActivityType.custom
def handle_playing(self, user):
p_acts = [c for c in user.activities if c.type == discord.ActivityType.playing]
if not p_acts:
return None, discord.ActivityType.playing
p_act = p_acts[0]
act = _("Playing: {name}").format(name=p_act.name)
return act, discord.ActivityType.playing
def handle_streaming(self, user):
s_acts = [c for c in user.activities if c.type == discord.ActivityType.streaming]
if not s_acts:
return None, discord.ActivityType.streaming
s_act = s_acts[0]
if isinstance(s_act, discord.Streaming):
act = _("Streaming: [{name}{sep}{game}]({url})").format(
name=discord.utils.escape_markdown(s_act.name),
sep=" | " if s_act.game else "",
game=discord.utils.escape_markdown(s_act.game) if s_act.game else "",
url=s_act.url,
)
else:
act = _("Streaming: {name}").format(name=s_act.name)
return act, discord.ActivityType.streaming
def handle_listening(self, user):
l_acts = [c for c in user.activities if c.type == discord.ActivityType.listening]
if not l_acts:
return None, discord.ActivityType.listening
l_act = l_acts[0]
if isinstance(l_act, discord.Spotify):
act = _("Listening: [{title}{sep}{artist}]({url})").format(
title=discord.utils.escape_markdown(l_act.title),
sep=" | " if l_act.artist else "",
artist=discord.utils.escape_markdown(l_act.artist) if l_act.artist else "",
url=f"https://open.spotify.com/track/{l_act.track_id}",
)
else:
act = _("Listening: {title}").format(title=l_act.name)
return act, discord.ActivityType.listening
def handle_watching(self, user):
w_acts = [c for c in user.activities if c.type == discord.ActivityType.watching]
if not w_acts:
return None, discord.ActivityType.watching
w_act = w_acts[0]
act = _("Watching: {name}").format(name=w_act.name)
return act, discord.ActivityType.watching
def handle_competing(self, user):
w_acts = [c for c in user.activities if c.type == discord.ActivityType.competing]
if not w_acts:
return None, discord.ActivityType.competing
w_act = w_acts[0]
act = _("Competing in: {competing}").format(competing=w_act.name)
return act, discord.ActivityType.competing
def get_status_string(self, user):
string = ""
for a in [
self.handle_custom(user),
self.handle_playing(user),
self.handle_listening(user),
self.handle_streaming(user),
self.handle_watching(user),
self.handle_competing(user),
]:
status_string, status_type = a
if status_string is None:
continue
string += f"{status_string}\n"
return string
@commands.command()
@commands.guild_only()
@commands.bot_has_permissions(embed_links=True)
async def userinfo(self, ctx, *, user: discord.Member = None):
"""Show information about a user.
This includes fields for status, discord join date, server
join date, voice state and previous names/nicknames.
If the user has no roles, previous names or previous nicknames,
these fields will be omitted.
"""
author = ctx.author
guild = ctx.guild
if not user:
user = author
# A special case for a special someone :^)
special_date = datetime(2016, 1, 10, 6, 8, 4, 443000)
is_special = user.id == 96130341705637888 and guild.id == 133049272517001216
roles = user.roles[-1:0:-1]
names, nicks = await self.get_names_and_nicks(user)
joined_at = user.joined_at if not is_special else special_date
since_created = (ctx.message.created_at - user.created_at).days
if joined_at is not None:
since_joined = (ctx.message.created_at - joined_at).days
user_joined = joined_at.strftime("%d %b %Y %H:%M")
else:
since_joined = "?"
user_joined = _("Unknown")
user_created = user.created_at.strftime("%d %b %Y %H:%M")
voice_state = user.voice
member_number = (
sorted(guild.members, key=lambda m: m.joined_at or ctx.message.created_at).index(user)
+ 1
)
created_on = _("{}\n({} days ago)").format(user_created, since_created)
joined_on = _("{}\n({} days ago)").format(user_joined, since_joined)
if any(a.type is discord.ActivityType.streaming for a in user.activities):
statusemoji = "\N{LARGE PURPLE CIRCLE}"
elif user.status.name == "online":
statusemoji = "\N{LARGE GREEN CIRCLE}"
elif user.status.name == "offline":
statusemoji = "\N{MEDIUM WHITE CIRCLE}\N{VARIATION SELECTOR-16}"
elif user.status.name == "dnd":
statusemoji = "\N{LARGE RED CIRCLE}"
elif user.status.name == "idle":
statusemoji = "\N{LARGE ORANGE CIRCLE}"
activity = _("Chilling in {} status").format(user.status)
status_string = self.get_status_string(user)
if roles:
role_str = ", ".join([x.mention for x in roles])
# 400 BAD REQUEST (error code: 50035): Invalid Form Body
# In embed.fields.2.value: Must be 1024 or fewer in length.
if len(role_str) > 1024:
# Alternative string building time.
# This is not the most optimal, but if you're hitting this, you are losing more time
# to every single check running on users than the occasional user info invoke
# We don't start by building this way, since the number of times we hit this should be
# infinitesimally small compared to when we don't across all uses of Red.
continuation_string = _(
"and {numeric_number} more roles not displayed due to embed limits."
)
available_length = 1024 - len(continuation_string) # do not attempt to tweak, i18n
role_chunks = []
remaining_roles = 0
for r in roles:
chunk = f"{r.mention}, "
chunk_size = len(chunk)
if chunk_size < available_length:
available_length -= chunk_size
role_chunks.append(chunk)
else:
remaining_roles += 1
role_chunks.append(continuation_string.format(numeric_number=remaining_roles))
role_str = "".join(role_chunks)
else:
role_str = None
data = discord.Embed(description=status_string or activity, colour=user.colour)
data.add_field(name=_("Joined Discord on"), value=created_on)
data.add_field(name=_("Joined this server on"), value=joined_on)
if role_str is not None:
data.add_field(
name=_("Roles") if len(roles) > 1 else _("Role"), value=role_str, inline=False
)
if names:
# May need sanitizing later, but mentions do not ping in embeds currently
val = filter_invites(", ".join(names))
data.add_field(
name=_("Previous Names") if len(names) > 1 else _("Previous Name"),
value=val,
inline=False,
)
if nicks:
# May need sanitizing later, but mentions do not ping in embeds currently
val = filter_invites(", ".join(nicks))
data.add_field(
name=_("Previous Nicknames") if len(nicks) > 1 else _("Previous Nickname"),
value=val,
inline=False,
)
if voice_state and voice_state.channel:
data.add_field(
name=_("Current voice channel"),
value="{0.mention} ID: {0.id}".format(voice_state.channel),
inline=False,
)
data.set_footer(text=_("Member #{} | User ID: {}").format(member_number, user.id))
name = str(user)
name = " ~ ".join((name, user.nick)) if user.nick else name
name = filter_invites(name)
avatar = user.avatar_url_as(static_format="png")
data.set_author(name=f"{statusemoji} {name}", url=avatar)
data.set_thumbnail(url=avatar)
await ctx.send(embed=data)
@commands.command()
async def names(self, ctx: commands.Context, *, user: discord.Member):
"""Show previous names and nicknames of a user."""
names, nicks = await self.get_names_and_nicks(user)
msg = ""
if names:
msg += _("**Past 20 names**:")
msg += "\n"
msg += ", ".join(names)
if nicks:
if msg:
msg += "\n\n"
msg += _("**Past 20 nicknames**:")
msg += "\n"
msg += ", ".join(nicks)
if msg:
msg = filter_various_mentions(msg)
await ctx.send(msg)
else:
await ctx.send(_("That user doesn't have any recorded name or nickname change."))
|
from scrapy import signals
from scrapy.exceptions import DontCloseSpider
from scrapy.spiders import Spider, CrawlSpider
from collections import Iterable
from . import connection, defaults
from .utils import bytes_to_str
class RedisMixin(object):
"""Mixin class to implement reading urls from a redis queue."""
redis_key = None
redis_batch_size = None
redis_encoding = None
# Redis client placeholder.
server = None
def start_requests(self):
"""Returns a batch of start requests from redis."""
return self.next_requests()
def setup_redis(self, crawler=None):
"""Setup redis connection and idle signal.
This should be called after the spider has set its crawler object.
"""
if self.server is not None:
return
if crawler is None:
# We allow optional crawler argument to keep backwards
# compatibility.
# XXX: Raise a deprecation warning.
crawler = getattr(self, 'crawler', None)
if crawler is None:
raise ValueError("crawler is required")
settings = crawler.settings
if self.redis_key is None:
self.redis_key = settings.get(
'REDIS_START_URLS_KEY', defaults.START_URLS_KEY,
)
self.redis_key = self.redis_key % {'name': self.name}
if not self.redis_key.strip():
raise ValueError("redis_key must not be empty")
if self.redis_batch_size is None:
# TODO: Deprecate this setting (REDIS_START_URLS_BATCH_SIZE).
self.redis_batch_size = settings.getint(
'REDIS_START_URLS_BATCH_SIZE',
settings.getint('CONCURRENT_REQUESTS'),
)
try:
self.redis_batch_size = int(self.redis_batch_size)
except (TypeError, ValueError):
raise ValueError("redis_batch_size must be an integer")
if self.redis_encoding is None:
self.redis_encoding = settings.get('REDIS_ENCODING', defaults.REDIS_ENCODING)
self.logger.info("Reading start URLs from redis key '%(redis_key)s' "
"(batch size: %(redis_batch_size)s, encoding: %(redis_encoding)s",
self.__dict__)
self.server = connection.from_settings(crawler.settings)
if self.settings.getbool('REDIS_START_URLS_AS_SET', defaults.START_URLS_AS_SET):
self.fetch_data = self.server.spop
elif self.settings.getbool('REDIS_START_URLS_AS_ZSET', defaults.START_URLS_AS_ZSET):
self.fetch_data = self.pop_priority_queue
else:
self.fetch_data = self.pop_list_queue
# The idle signal is called when the spider has no requests left,
# that's when we will schedule new requests from redis queue
crawler.signals.connect(self.spider_idle, signal=signals.spider_idle)
def pop_list_queue(self, redis_key, batch_size):
with self.server.pipeline() as pipe:
pipe.lrange(redis_key, 0, batch_size - 1)
pipe.ltrim(redis_key, batch_size, -1)
datas, _ = pipe.execute()
return datas
def pop_priority_queue(self, redis_key, batch_size):
with self.server.pipeline() as pipe:
pipe.zrevrange(redis_key, 0, batch_size - 1)
pipe.zremrangebyrank(redis_key, -batch_size, -1)
datas, _ = pipe.execute()
return datas
def next_requests(self):
"""Returns a request to be scheduled or none."""
# XXX: Do we need to use a timeout here?
found = 0
datas = self.fetch_data(self.redis_key, self.redis_batch_size)
for data in datas:
reqs = self.make_request_from_data(data)
if isinstance(reqs, Iterable):
for req in reqs:
yield req
# XXX: should be here?
found += 1
self.logger.info(f'start req url:{req.url}')
elif reqs:
yield reqs
found += 1
else:
self.logger.debug("Request not made from data: %r", data)
if found:
self.logger.debug("Read %s requests from '%s'", found, self.redis_key)
def make_request_from_data(self, data):
"""Returns a Request instance from data coming from Redis.
By default, ``data`` is an encoded URL. You can override this method to
provide your own message decoding.
Parameters
----------
data : bytes
Message from redis.
"""
url = bytes_to_str(data, self.redis_encoding)
return self.make_requests_from_url(url)
def schedule_next_requests(self):
"""Schedules a request if available"""
# TODO: While there is capacity, schedule a batch of redis requests.
for req in self.next_requests():
self.crawler.engine.crawl(req, spider=self)
def spider_idle(self):
"""Schedules a request if available, otherwise waits."""
# XXX: Handle a sentinel to close the spider.
self.schedule_next_requests()
raise DontCloseSpider
class RedisSpider(RedisMixin, Spider):
"""Spider that reads urls from redis queue when idle.
Attributes
----------
redis_key : str (default: REDIS_START_URLS_KEY)
Redis key where to fetch start URLs from..
redis_batch_size : int (default: CONCURRENT_REQUESTS)
Number of messages to fetch from redis on each attempt.
redis_encoding : str (default: REDIS_ENCODING)
Encoding to use when decoding messages from redis queue.
Settings
--------
REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
Default Redis key where to fetch start URLs from..
REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
Default number of messages to fetch from redis on each attempt.
REDIS_START_URLS_AS_SET : bool (default: False)
Use SET operations to retrieve messages from the redis queue. If False,
the messages are retrieve using the LPOP command.
REDIS_ENCODING : str (default: "utf-8")
Default encoding to use when decoding messages from redis queue.
"""
@classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj
class RedisCrawlSpider(RedisMixin, CrawlSpider):
"""Spider that reads urls from redis queue when idle.
Attributes
----------
redis_key : str (default: REDIS_START_URLS_KEY)
Redis key where to fetch start URLs from..
redis_batch_size : int (default: CONCURRENT_REQUESTS)
Number of messages to fetch from redis on each attempt.
redis_encoding : str (default: REDIS_ENCODING)
Encoding to use when decoding messages from redis queue.
Settings
--------
REDIS_START_URLS_KEY : str (default: "<spider.name>:start_urls")
Default Redis key where to fetch start URLs from..
REDIS_START_URLS_BATCH_SIZE : int (deprecated by CONCURRENT_REQUESTS)
Default number of messages to fetch from redis on each attempt.
REDIS_START_URLS_AS_SET : bool (default: True)
Use SET operations to retrieve messages from the redis queue.
REDIS_ENCODING : str (default: "utf-8")
Default encoding to use when decoding messages from redis queue.
"""
@classmethod
def from_crawler(self, crawler, *args, **kwargs):
obj = super(RedisCrawlSpider, self).from_crawler(crawler, *args, **kwargs)
obj.setup_redis(crawler)
return obj
|
import os
import unittest
class RoslibManifestlibTest(unittest.TestCase):
def test_ManifestException(self):
from roslib.manifestlib import ManifestException
self.assert_(isinstance(ManifestException(), Exception))
def test_Platform(self):
from roslib.manifestlib import Platform
for bad in [None, '']:
try:
Platform(bad, '1')
self.fail('should have failed on [%s]' % bad)
except ValueError:
pass
try:
Platform('ubuntu', bad)
self.fail('should have failed on [%s]' % bad)
except ValueError:
pass
p = Platform('ubuntu', '8.04')
self.assertEquals('ubuntu 8.04', str(p))
self.assertEquals('ubuntu 8.04', repr(p))
self.assertEquals('<platform os="ubuntu" version="8.04"/>', p.xml())
self.assertEquals(p, Platform('ubuntu', '8.04'))
self.assertEquals(p, Platform('ubuntu', '8.04', notes=None))
self.assertNotEquals(p, Platform('ubuntu', '8.04', 'some notes'))
self.assertNotEquals(p, 'foo')
self.assertNotEquals(p, 1)
# note: probably actually "osx"
p = Platform('OS X', '10.6', 'macports')
self.assertEquals('OS X 10.6', str(p))
self.assertEquals('OS X 10.6', repr(p))
self.assertEquals('<platform os="OS X" version="10.6" notes="macports"/>', p.xml())
self.assertEquals(p, p)
self.assertEquals(p, Platform('OS X', '10.6', 'macports'))
self.assertNotEquals(p, Platform('OS X', '10.6'))
self.assertNotEquals(p, 'foo')
self.assertNotEquals(p, 1)
def test_Depend(self):
from roslib.manifestlib import Depend, StackDepend
for bad in [None, '']:
try:
Depend(bad)
self.fail('should have failed on [%s]' % bad)
except ValueError:
pass
d = Depend('roslib')
self.assertEquals('roslib', str(d))
self.assertEquals('roslib', repr(d))
self.assertEquals('<depend package="roslib" />', d.xml())
self.assertEquals(d, Depend('roslib'))
self.assertNotEquals(d, StackDepend('roslib'))
self.assertNotEquals(d, Depend('roslib2'))
self.assertNotEquals(d, 1)
def test_StackDepend(self):
from roslib.manifestlib import Depend, StackDepend
for bad in [None, '']:
try:
StackDepend(bad)
self.fail('should have failed on [%s]' % bad)
except ValueError:
pass
d = StackDepend('common')
self.assertEquals('common', str(d))
self.assertEquals('common', repr(d))
self.assertEquals('<depend stack="common" />', d.xml())
self.assertEquals(d, StackDepend('common'))
self.assertNotEquals(d, Depend('common'))
self.assertNotEquals(d, StackDepend('common2'))
self.assertNotEquals(d, 1)
def test_ROSDep(self):
from roslib.manifestlib import ROSDep
for bad in [None, '']:
try:
rd = ROSDep(bad)
self.fail('should have failed on [%s]' % bad)
except ValueError:
pass
rd = ROSDep('python')
self.assertEquals('<rosdep name="python" />', rd.xml())
def test_VersionControl(self):
from roslib.manifestlib import VersionControl
ros_svn = 'https://ros.svn.sf.net/svnroot'
bad = [
(None, ros_svn),
]
for type_, url in bad:
try:
VersionControl(type_, url)
self.fail('should have failed on [%s] [%s]' % (type_, url))
except ValueError:
pass
tests = [
('svn', ros_svn, '<versioncontrol type="svn" url="%s" />' % ros_svn),
('cvs', None, '<versioncontrol type="cvs" />'),
]
for type_, url, xml in tests:
vc = VersionControl(type_, url)
self.assertEquals(type_, vc.type)
self.assertEquals(url, vc.url)
self.assertEquals(xml, vc.xml())
def _subtest_parse_example1(self, m):
from roslib.manifestlib import _Manifest
self.assert_(isinstance(m, _Manifest))
self.assertEquals('a brief description', m.brief)
self.assertEquals('Line 1\nLine 2', m.description.strip())
self.assertEquals('The authors\ngo here', m.author.strip())
self.assertEquals('Public Domain\nwith other stuff', m.license.strip())
self.assertEquals('http://pr.willowgarage.com/package/', m.url)
self.assertEquals('http://www.willowgarage.com/files/willowgarage/robot10.jpg', m.logo)
dpkgs = [d.package for d in m.depends]
self.assertEquals({'pkgname', 'common'}, set(dpkgs))
rdpkgs = [d.name for d in m.rosdeps]
self.assertEquals({'python', 'bar', 'baz'}, set(rdpkgs))
for p in m.platforms:
if p.os == 'ubuntu':
self.assertEquals('8.04', p.version)
self.assertEquals('', p.notes)
elif p.os == 'OS X':
self.assertEquals('10.6', p.version)
self.assertEquals('macports', p.notes)
else:
self.fail('unknown platform '+str(p))
def _subtest_parse_stack_example1(self, m):
from roslib.manifestlib import _Manifest
self.assert_(isinstance(m, _Manifest))
self.assertEquals('stack', m._type)
self.assertEquals('a brief description', m.brief)
self.assertEquals('Line 1\nLine 2', m.description.strip())
self.assertEquals('The authors\ngo here', m.author.strip())
self.assertEquals('Public Domain\nwith other stuff', m.license.strip())
self.assertEquals('http://ros.org/stack/', m.url)
self.assertEquals('http://www.willowgarage.com/files/willowgarage/robot10.jpg', m.logo)
dpkgs = [d.stack for d in m.depends]
self.assertEquals({'stackname', 'common'}, set(dpkgs))
self.assertEquals([], m.rosdeps)
self.assertEquals([], m.exports)
def _subtest_parse_stack_version(self, m):
self.assertEquals('1.2.3', m.version)
def test_parse_example1_file(self):
from roslib.manifestlib import parse_file, _Manifest
p = os.path.join(get_test_path(), 'manifest_tests', 'example1.xml')
self._subtest_parse_example1(parse_file(_Manifest(), p))
p = os.path.join(get_test_path(), 'manifest_tests', 'stack_example1.xml')
self._subtest_parse_stack_example1(parse_file(_Manifest('stack'), p))
p = os.path.join(get_test_path(), 'manifest_tests', 'stack_version.xml')
self._subtest_parse_stack_version(parse_file(_Manifest('stack'), p))
def test_parse_example1_string(self):
from roslib.manifestlib import parse, _Manifest
self._subtest_parse_example1(parse(_Manifest(), EXAMPLE1))
self._subtest_parse_stack_example1(parse(_Manifest('stack'), STACK_EXAMPLE1))
def test__Manifest(self):
from roslib.manifestlib import _Manifest
m = _Manifest()
# check defaults
self.assertEquals('package', m._type)
m = _Manifest('stack')
self.assertEquals('stack', m._type)
def test_Manifest_str(self):
# just make sure it doesn't crash
from roslib.manifestlib import parse, _Manifest
str(parse(_Manifest(), EXAMPLE1))
def test_Manifest_xml(self):
from roslib.manifestlib import parse, _Manifest
m = _Manifest()
parse(m, EXAMPLE1)
self._subtest_parse_example1(m)
# verify roundtrip
m2 = _Manifest()
parse(m2, m.xml())
self._subtest_parse_example1(m2)
# bad file examples should be more like the roslaunch tests where there is just 1 thing wrong
def test_parse_bad_file(self):
from roslib.manifestlib import parse_file, _Manifest, ManifestException
base_p = os.path.join(get_test_path(), 'manifest_tests')
m = _Manifest()
for b in ['bad1.xml', 'bad2.xml', 'bad3.xml']:
p = os.path.join(base_p, b)
try:
parse_file(m, p)
self.fail('parse should have failed on bad manifest')
except ManifestException as e:
print(str(e))
self.assert_(b in str(e), 'file name should be in error message [%s]' % (str(e)))
EXAMPLE1 = """<package>
<description brief="a brief description">Line 1
Line 2
</description>
<author>The authors
go here</author>
<license>Public Domain
with other stuff</license>
<url>http://pr.willowgarage.com/package/</url>
<logo>http://www.willowgarage.com/files/willowgarage/robot10.jpg</logo>
<depend package="pkgname" />
<depend package="common"/>
<export>
<cpp cflags="-I${prefix}/include" lflags="-L${prefix}/lib -lros"/>
<cpp os="osx" cflags="-I${prefix}/include" lflags="-L${prefix}/lib -lrosthread -framework CoreServices"/>
</export>
<rosdep name="python" />
<rosdep name="bar" />
<rosdep name="baz" />
<platform os="ubuntu" version="8.04" />
<platform os="OS X" version="10.6" notes="macports" />
<rosbuild2>
<depend thirdparty="thisshouldbeokay"/>
</rosbuild2>
</package>"""
STACK_EXAMPLE1 = """<stack>
<description brief="a brief description">Line 1
Line 2
</description>
<author>The authors
go here</author>
<license>Public Domain
with other stuff</license>
<url>http://ros.org/stack/</url>
<logo>http://www.willowgarage.com/files/willowgarage/robot10.jpg</logo>
<depend stack="stackname" />
<depend stack="common"/>
</stack>"""
STACK_INVALID1 = """<stack>
<description brief="a brief description">Line 1</description>
<author>The authors</author>
<license>Public Domain</license>
<rosdep name="python" />
</stack>"""
STACK_INVALID2 = """<stack>
<description brief="a brief description">Line 1</description>
<author>The authors</author>
<license>Public Domain</license>
<export>
<cpp cflags="-I${prefix}/include" lflags="-L${prefix}/lib -lros"/>
<cpp os="osx" cflags="-I${prefix}/include" lflags="-L${prefix}/lib -lrosthread -framework CoreServices"/>
</export>
</stack>"""
def get_test_path():
return os.path.abspath(os.path.dirname(__file__))
|
from homeassistant.core import Config, HomeAssistant
from .config_flow import IpmaFlowHandler # noqa: F401
from .const import DOMAIN # noqa: F401
DEFAULT_NAME = "ipma"
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up configured IPMA."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up IPMA station as config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "weather")
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await hass.config_entries.async_forward_entry_unload(config_entry, "weather")
return True
|
from datetime import timedelta
import logging
from aiohttp import ClientConnectorError
from pygti.exceptions import InvalidAuth
from pytz import timezone
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ID, DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.dt import utcnow
from .const import ATTRIBUTION, CONF_STATION, DOMAIN, MANUFACTURER
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
MAX_LIST = 20
MAX_TIME_OFFSET = 360
ICON = "mdi:bus"
UNIT_OF_MEASUREMENT = "min"
ATTR_DEPARTURE = "departure"
ATTR_LINE = "line"
ATTR_ORIGIN = "origin"
ATTR_DIRECTION = "direction"
ATTR_TYPE = "type"
ATTR_DELAY = "delay"
ATTR_NEXT = "next"
PARALLEL_UPDATES = 0
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_devices):
"""Set up the sensor platform."""
hub = hass.data[DOMAIN][config_entry.entry_id]
session = aiohttp_client.async_get_clientsession(hass)
sensor = HVVDepartureSensor(hass, config_entry, session, hub)
async_add_devices([sensor], True)
class HVVDepartureSensor(Entity):
"""HVVDepartureSensor class."""
def __init__(self, hass, config_entry, session, hub):
"""Initialize."""
self.config_entry = config_entry
self.station_name = self.config_entry.data[CONF_STATION]["name"]
self.attr = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._available = False
self._state = None
self._name = f"Departures at {self.station_name}"
self._last_error = None
self.gti = hub.gti
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Update the sensor."""
departure_time = utcnow() + timedelta(
minutes=self.config_entry.options.get("offset", 0)
)
departure_time_tz_berlin = departure_time.astimezone(timezone("Europe/Berlin"))
payload = {
"station": self.config_entry.data[CONF_STATION],
"time": {
"date": departure_time_tz_berlin.strftime("%d.%m.%Y"),
"time": departure_time_tz_berlin.strftime("%H:%M"),
},
"maxList": MAX_LIST,
"maxTimeOffset": MAX_TIME_OFFSET,
"useRealtime": self.config_entry.options.get("realtime", False),
}
if "filter" in self.config_entry.options:
payload.update({"filter": self.config_entry.options["filter"]})
try:
data = await self.gti.departureList(payload)
except InvalidAuth as error:
if self._last_error != InvalidAuth:
_LOGGER.error("Authentication failed: %r", error)
self._last_error = InvalidAuth
self._available = False
except ClientConnectorError as error:
if self._last_error != ClientConnectorError:
_LOGGER.warning("Network unavailable: %r", error)
self._last_error = ClientConnectorError
self._available = False
except Exception as error: # pylint: disable=broad-except
if self._last_error != error:
_LOGGER.error("Error occurred while fetching data: %r", error)
self._last_error = error
self._available = False
if not (data["returnCode"] == "OK" and data.get("departures")):
self._available = False
return
if self._last_error == ClientConnectorError:
_LOGGER.debug("Network available again")
self._last_error = None
departure = data["departures"][0]
line = departure["line"]
delay = departure.get("delay", 0)
self._available = True
self._state = (
departure_time
+ timedelta(minutes=departure["timeOffset"])
+ timedelta(seconds=delay)
).isoformat()
self.attr.update(
{
ATTR_LINE: line["name"],
ATTR_ORIGIN: line["origin"],
ATTR_DIRECTION: line["direction"],
ATTR_TYPE: line["type"]["shortInfo"],
ATTR_ID: line["id"],
ATTR_DELAY: delay,
}
)
departures = []
for departure in data["departures"]:
line = departure["line"]
delay = departure.get("delay", 0)
departures.append(
{
ATTR_DEPARTURE: departure_time
+ timedelta(minutes=departure["timeOffset"])
+ timedelta(seconds=delay),
ATTR_LINE: line["name"],
ATTR_ORIGIN: line["origin"],
ATTR_DIRECTION: line["direction"],
ATTR_TYPE: line["type"]["shortInfo"],
ATTR_ID: line["id"],
ATTR_DELAY: delay,
}
)
self.attr[ATTR_NEXT] = departures
@property
def unique_id(self):
"""Return a unique ID to use for this sensor."""
station_id = self.config_entry.data[CONF_STATION]["id"]
station_type = self.config_entry.data[CONF_STATION]["type"]
return f"{self.config_entry.entry_id}-{station_id}-{station_type}"
@property
def device_info(self):
"""Return the device info for this sensor."""
return {
"identifiers": {
(
DOMAIN,
self.config_entry.entry_id,
self.config_entry.data[CONF_STATION]["id"],
self.config_entry.data[CONF_STATION]["type"],
)
},
"name": self._name,
"manufacturer": MANUFACTURER,
}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return ICON
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_TIMESTAMP
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self.attr
|
from homeassistant.components.emulated_hue import Config
from tests.async_mock import MagicMock, Mock, patch
def test_config_google_home_entity_id_to_number():
"""Test config adheres to the type."""
mock_hass = Mock()
mock_hass.config.path = MagicMock("path", return_value="test_path")
conf = Config(mock_hass, {"type": "google_home"})
with patch(
"homeassistant.components.emulated_hue.load_json",
return_value={"1": "light.test2"},
) as json_loader:
with patch("homeassistant.components.emulated_hue.save_json") as json_saver:
number = conf.entity_id_to_number("light.test")
assert number == "2"
assert json_saver.mock_calls[0][1][1] == {
"1": "light.test2",
"2": "light.test",
}
assert json_saver.call_count == 1
assert json_loader.call_count == 1
number = conf.entity_id_to_number("light.test")
assert number == "2"
assert json_saver.call_count == 1
number = conf.entity_id_to_number("light.test2")
assert number == "1"
assert json_saver.call_count == 1
entity_id = conf.number_to_entity_id("1")
assert entity_id == "light.test2"
def test_config_google_home_entity_id_to_number_altered():
"""Test config adheres to the type."""
mock_hass = Mock()
mock_hass.config.path = MagicMock("path", return_value="test_path")
conf = Config(mock_hass, {"type": "google_home"})
with patch(
"homeassistant.components.emulated_hue.load_json",
return_value={"21": "light.test2"},
) as json_loader:
with patch("homeassistant.components.emulated_hue.save_json") as json_saver:
number = conf.entity_id_to_number("light.test")
assert number == "22"
assert json_saver.call_count == 1
assert json_loader.call_count == 1
assert json_saver.mock_calls[0][1][1] == {
"21": "light.test2",
"22": "light.test",
}
number = conf.entity_id_to_number("light.test")
assert number == "22"
assert json_saver.call_count == 1
number = conf.entity_id_to_number("light.test2")
assert number == "21"
assert json_saver.call_count == 1
entity_id = conf.number_to_entity_id("21")
assert entity_id == "light.test2"
def test_config_google_home_entity_id_to_number_empty():
"""Test config adheres to the type."""
mock_hass = Mock()
mock_hass.config.path = MagicMock("path", return_value="test_path")
conf = Config(mock_hass, {"type": "google_home"})
with patch(
"homeassistant.components.emulated_hue.load_json", return_value={}
) as json_loader:
with patch("homeassistant.components.emulated_hue.save_json") as json_saver:
number = conf.entity_id_to_number("light.test")
assert number == "1"
assert json_saver.call_count == 1
assert json_loader.call_count == 1
assert json_saver.mock_calls[0][1][1] == {"1": "light.test"}
number = conf.entity_id_to_number("light.test")
assert number == "1"
assert json_saver.call_count == 1
number = conf.entity_id_to_number("light.test2")
assert number == "2"
assert json_saver.call_count == 2
entity_id = conf.number_to_entity_id("2")
assert entity_id == "light.test2"
def test_config_alexa_entity_id_to_number():
"""Test config adheres to the type."""
conf = Config(None, {"type": "alexa"})
number = conf.entity_id_to_number("light.test")
assert number == "light.test"
number = conf.entity_id_to_number("light.test")
assert number == "light.test"
number = conf.entity_id_to_number("light.test2")
assert number == "light.test2"
entity_id = conf.number_to_entity_id("light.test")
assert entity_id == "light.test"
|
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal, assert_allclose)
import pytest
from scipy import linalg
import scipy.io
import mne
from mne import pick_types, Epochs, find_events, read_events
from mne.datasets.testing import requires_testing_data
from mne.transforms import apply_trans
from mne.utils import assert_dig_allclose
from mne.io import read_raw_fif, read_raw_kit, read_epochs_kit
from mne.io.constants import FIFF
from mne.io.kit.coreg import read_sns
from mne.io.kit.constants import KIT
from mne.io.tests.test_raw import _test_raw_reader
from mne.surface import _get_ico_surface
from mne.io.kit import __file__ as _KIT_INIT_FILE
data_dir = op.join(op.dirname(_KIT_INIT_FILE), 'tests', 'data')
sqd_path = op.join(data_dir, 'test.sqd')
sqd_umd_path = op.join(data_dir, 'test_umd-raw.sqd')
epochs_path = op.join(data_dir, 'test-epoch.raw')
events_path = op.join(data_dir, 'test-eve.txt')
mrk_path = op.join(data_dir, 'test_mrk.sqd')
mrk2_path = op.join(data_dir, 'test_mrk_pre.sqd')
mrk3_path = op.join(data_dir, 'test_mrk_post.sqd')
elp_txt_path = op.join(data_dir, 'test_elp.txt')
hsp_txt_path = op.join(data_dir, 'test_hsp.txt')
elp_path = op.join(data_dir, 'test.elp')
hsp_path = op.join(data_dir, 'test.hsp')
data_path = mne.datasets.testing.data_path(download=False)
sqd_as_path = op.join(data_path, 'KIT', 'test_as-raw.con')
yokogawa_path = op.join(
data_path, 'KIT', 'ArtificalSignalData_Yokogawa_1khz.con')
ricoh_path = op.join(
data_path, 'KIT', 'ArtificalSignalData_RICOH_1khz.con')
ricoh_systems_paths = [op.join(
data_path, 'KIT', 'Example_PQA160C_1001-export_anonymyze.con')]
ricoh_systems_paths += [op.join(
data_path, 'KIT', 'Example_RICOH160-1_10020-export_anonymyze.con')]
ricoh_systems_paths += [op.join(
data_path, 'KIT', 'Example_RICOH160-1_10021-export_anonymyze.con')]
berlin_path = op.join(data_path, 'KIT', 'data_berlin.con')
@requires_testing_data
def test_data(tmpdir):
"""Test reading raw kit files."""
pytest.raises(TypeError, read_raw_kit, epochs_path)
pytest.raises(TypeError, read_epochs_kit, sqd_path)
pytest.raises(ValueError, read_raw_kit, sqd_path, mrk_path, elp_txt_path)
pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None,
list(range(200, 190, -1)))
pytest.raises(ValueError, read_raw_kit, sqd_path, None, None, None,
list(range(167, 159, -1)), '*', 1, True)
# check functionality
raw_mrk = read_raw_kit(sqd_path, [mrk2_path, mrk3_path], elp_txt_path,
hsp_txt_path)
assert raw_mrk.info['description'] == \
'NYU 160ch System since Jan24 2009 (34) V2R004 EQ1160C'
raw_py = _test_raw_reader(read_raw_kit, input_fname=sqd_path, mrk=mrk_path,
elp=elp_txt_path, hsp=hsp_txt_path,
stim=list(range(167, 159, -1)), slope='+',
stimthresh=1)
assert 'RawKIT' in repr(raw_py)
assert_equal(raw_mrk.info['kit_system_id'], KIT.SYSTEM_NYU_2010)
# check number/kind of channels
assert_equal(len(raw_py.info['chs']), 193)
kit_channels = (('kind', {FIFF.FIFFV_MEG_CH: 157, FIFF.FIFFV_REF_MEG_CH: 3,
FIFF.FIFFV_MISC_CH: 32, FIFF.FIFFV_STIM_CH: 1}),
('coil_type', {FIFF.FIFFV_COIL_KIT_GRAD: 157,
FIFF.FIFFV_COIL_KIT_REF_MAG: 3,
FIFF.FIFFV_COIL_NONE: 33}))
for label, target in kit_channels:
actual = {id_: sum(ch[label] == id_ for ch in raw_py.info['chs']) for
id_ in target.keys()}
assert_equal(actual, target)
# Test stim channel
raw_stim = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path,
stim='<', preload=False)
for raw in [raw_py, raw_stim, raw_mrk]:
stim_pick = pick_types(raw.info, meg=False, ref_meg=False,
stim=True, exclude='bads')
stim1, _ = raw[stim_pick]
stim2 = np.array(raw.read_stim_ch(), ndmin=2)
assert_array_equal(stim1, stim2)
# Binary file only stores the sensor channels
py_picks = pick_types(raw_py.info, meg=True, exclude='bads')
raw_bin = op.join(data_dir, 'test_bin_raw.fif')
raw_bin = read_raw_fif(raw_bin, preload=True)
bin_picks = pick_types(raw_bin.info, meg=True, stim=True, exclude='bads')
data_bin, _ = raw_bin[bin_picks]
data_py, _ = raw_py[py_picks]
# this .mat was generated using the Yokogawa MEG Reader
data_Ykgw = op.join(data_dir, 'test_Ykgw.mat')
data_Ykgw = scipy.io.loadmat(data_Ykgw)['data']
data_Ykgw = data_Ykgw[py_picks]
assert_array_almost_equal(data_py, data_Ykgw)
py_picks = pick_types(raw_py.info, meg=True, stim=True, ref_meg=False,
exclude='bads')
data_py, _ = raw_py[py_picks]
assert_array_almost_equal(data_py, data_bin)
# KIT-UMD data
_test_raw_reader(read_raw_kit, input_fname=sqd_umd_path, test_rank='less')
raw = read_raw_kit(sqd_umd_path)
assert raw.info['description'] == \
'University of Maryland/Kanazawa Institute of Technology/160-channel MEG System (53) V2R004 PQ1160R' # noqa: E501
assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_UMD_2014_12)
# check number/kind of channels
assert_equal(len(raw.info['chs']), 193)
for label, target in kit_channels:
actual = {id_: sum(ch[label] == id_ for ch in raw.info['chs']) for
id_ in target.keys()}
assert_equal(actual, target)
# KIT Academia Sinica
raw = read_raw_kit(sqd_as_path, slope='+')
assert raw.info['description'] == \
'Academia Sinica/Institute of Linguistics//Magnetoencephalograph System (261) V2R004 PQ1160R-N2' # noqa: E501
assert_equal(raw.info['kit_system_id'], KIT.SYSTEM_AS_2008)
assert_equal(raw.info['chs'][100]['ch_name'], 'MEG 101')
assert_equal(raw.info['chs'][100]['kind'], FIFF.FIFFV_MEG_CH)
assert_equal(raw.info['chs'][100]['coil_type'], FIFF.FIFFV_COIL_KIT_GRAD)
assert_equal(raw.info['chs'][157]['ch_name'], 'MEG 158')
assert_equal(raw.info['chs'][157]['kind'], FIFF.FIFFV_REF_MEG_CH)
assert_equal(raw.info['chs'][157]['coil_type'],
FIFF.FIFFV_COIL_KIT_REF_MAG)
assert_equal(raw.info['chs'][160]['ch_name'], 'EEG 001')
assert_equal(raw.info['chs'][160]['kind'], FIFF.FIFFV_EEG_CH)
assert_equal(raw.info['chs'][160]['coil_type'], FIFF.FIFFV_COIL_EEG)
assert_array_equal(find_events(raw), [[91, 0, 2]])
def _assert_sinusoid(data, t, freq, amp, msg):
__tracebackhide__ = True
sinusoid = np.exp(2j * np.pi * freq * t) * amp
phase = np.angle(np.dot(data, sinusoid))
sinusoid = np.cos(2 * np.pi * freq * t - phase) * amp
assert_allclose(data, sinusoid, rtol=0.05, atol=amp * 1e-3, err_msg=msg)
@requires_testing_data
@pytest.mark.parametrize('fname, desc', [
(yokogawa_path, 'Meg160/Analysis (1001) V3R000 PQA160C'),
(ricoh_path, 'Meg160/Analysis (1001) V3R000 PQA160C'),
])
def test_ricoh_data(tmpdir, fname, desc):
"""Test reading channel names and dig information from Ricoh systems."""
raw = read_raw_kit(fname, standardize_names=True)
assert raw.ch_names[0] == 'MEG 001'
raw = read_raw_kit(fname, standardize_names=False, verbose='debug')
assert raw.info['description'] == desc
assert_allclose(raw.times[-1], 5. - 1. / raw.info['sfreq'])
assert raw.ch_names[0] == 'LF31'
eeg_picks = pick_types(raw.info, meg=False, eeg=True)
assert len(eeg_picks) == 45
assert len(raw.info['dig']) == 8 + len(eeg_picks) - 2 # EKG+ and E no pos
bad_dig = [ch['ch_name'] for ci, ch in enumerate(raw.info['chs'])
if ci in eeg_picks and (ch['loc'][:3] == 0).all()]
assert bad_dig == ['EKG+', 'E']
assert not any(np.allclose(d['r'], 0.) for d in raw.info['dig'])
assert_allclose(
raw.info['dev_head_t']['trans'],
[[0.998311, -0.056923, 0.01164, 0.001403],
[0.054469, 0.986653, 0.153458, 0.0044],
[-0.02022, -0.152564, 0.988087, 0.018634],
[0., 0., 0., 1.]], atol=1e-5)
data = raw.get_data()
# 1 pT 10 Hz on the first channel
assert raw.info['chs'][0]['coil_type'] == FIFF.FIFFV_COIL_KIT_GRAD
_assert_sinusoid(data[0], raw.times, 10, 1e-12, '1 pT 10 Hz MEG')
assert_allclose(data[1:160], 0., atol=1e-13)
# 1 V 5 Hz analog
assert raw.info['chs'][186]['coil_type'] == FIFF.FIFFV_COIL_EEG
_assert_sinusoid(data[160], raw.times, 5, 1, '1 V 5 Hz analog')
assert_allclose(data[161:185], 0., atol=1e-20)
# 50 uV 8 Hz plus 1.6 mV offset
assert raw.info['chs'][186]['coil_type'] == FIFF.FIFFV_COIL_EEG
eeg_data = data[186]
assert_allclose(eeg_data.mean(), 1.6e-3, atol=1e-5) # offset
eeg_data = eeg_data - eeg_data.mean()
_assert_sinusoid(eeg_data, raw.times, 8, 50e-6, '50 uV 8 Hz EEG')
assert_allclose(data[187:-1], 0., atol=1e-20)
assert_allclose(data[-1], 254.5, atol=0.51)
def test_epochs():
"""Test reading epoched SQD file."""
raw = read_raw_kit(sqd_path, stim=None)
events = read_events(events_path)
raw_epochs = Epochs(raw, events, None, tmin=0, tmax=.099, baseline=None)
data1 = raw_epochs.get_data()
epochs = read_epochs_kit(epochs_path, events_path)
data11 = epochs.get_data()
assert_array_equal(data1, data11)
def test_raw_events():
"""Test creating stim channel from raw SQD file."""
def evts(a, b, c, d, e, f=None):
out = [[269, a, b], [281, b, c], [1552, c, d], [1564, d, e]]
if f is not None:
out.append([2000, e, f])
return out
raw = read_raw_kit(sqd_path)
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(255, 254, 255, 254, 255, 0))
raw = read_raw_kit(sqd_path, slope='+')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 1, 0, 1, 0))
raw = read_raw_kit(sqd_path, stim='<', slope='+')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 128, 0, 128, 0))
raw = read_raw_kit(sqd_path, stim='<', slope='+', stim_code='channel')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 160, 0, 160, 0))
raw = read_raw_kit(sqd_path, stim=range(160, 162), slope='+',
stim_code='channel')
assert_array_equal(find_events(raw, output='step', consecutive=True),
evts(0, 160, 0, 160, 0))
def test_ch_loc():
"""Test raw kit loc."""
raw_py = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path,
stim='<')
raw_bin = read_raw_fif(op.join(data_dir, 'test_bin_raw.fif'))
ch_py = np.array([ch['loc'] for ch in
raw_py._raw_extras[0]['channels'][:160]])
# ch locs stored as m, not mm
ch_py[:, :3] *= 1e3
ch_sns = read_sns(op.join(data_dir, 'sns.txt'))
assert_array_almost_equal(ch_py, ch_sns, 2)
assert_array_almost_equal(raw_py.info['dev_head_t']['trans'],
raw_bin.info['dev_head_t']['trans'], 4)
for py_ch, bin_ch in zip(raw_py.info['chs'], raw_bin.info['chs']):
if bin_ch['ch_name'].startswith('MEG'):
# the stored ch locs have more precision than the sns.txt
assert_array_almost_equal(py_ch['loc'], bin_ch['loc'], decimal=2)
# test when more than one marker file provided
mrks = [mrk_path, mrk2_path, mrk3_path]
read_raw_kit(sqd_path, mrks, elp_txt_path, hsp_txt_path, preload=False)
# this dataset does not have the equivalent set of points :(
raw_bin.info['dig'] = raw_bin.info['dig'][:8]
raw_py.info['dig'] = raw_py.info['dig'][:8]
assert_dig_allclose(raw_py.info, raw_bin.info)
def test_hsp_elp():
"""Test KIT usage of *.elp and *.hsp files against *.txt files."""
raw_txt = read_raw_kit(sqd_path, mrk_path, elp_txt_path, hsp_txt_path)
raw_elp = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
# head points
pts_txt = np.array([dig_point['r'] for dig_point in raw_txt.info['dig']])
pts_elp = np.array([dig_point['r'] for dig_point in raw_elp.info['dig']])
assert_array_almost_equal(pts_elp, pts_txt, decimal=5)
# transforms
trans_txt = raw_txt.info['dev_head_t']['trans']
trans_elp = raw_elp.info['dev_head_t']['trans']
assert_array_almost_equal(trans_elp, trans_txt, decimal=5)
# head points in device space
pts_txt_in_dev = apply_trans(linalg.inv(trans_txt), pts_txt)
pts_elp_in_dev = apply_trans(linalg.inv(trans_elp), pts_elp)
assert_array_almost_equal(pts_elp_in_dev, pts_txt_in_dev, decimal=5)
def test_decimate(tmpdir):
"""Test decimation of digitizer headshapes with too many points."""
# load headshape and convert to meters
hsp_mm = _get_ico_surface(5)['rr'] * 100
hsp_m = hsp_mm / 1000.
# save headshape to a file in mm in temporary directory
tempdir = str(tmpdir)
sphere_hsp_path = op.join(tempdir, 'test_sphere.txt')
np.savetxt(sphere_hsp_path, hsp_mm)
# read in raw data using spherical hsp, and extract new hsp
with pytest.warns(RuntimeWarning,
match='was automatically downsampled .* FastScan'):
raw = read_raw_kit(sqd_path, mrk_path, elp_txt_path, sphere_hsp_path)
# collect headshape from raw (should now be in m)
hsp_dec = np.array([dig['r'] for dig in raw.info['dig']])[8:]
# with 10242 points and _decimate_points set to resolution of 5 mm, hsp_dec
# should be a bit over 5000 points. If not, something is wrong or
# decimation resolution has been purposefully changed
assert len(hsp_dec) > 5000
# should have similar size, distance from center
dist = np.sqrt(np.sum((hsp_m - np.mean(hsp_m, axis=0))**2, axis=1))
dist_dec = np.sqrt(np.sum((hsp_dec - np.mean(hsp_dec, axis=0))**2, axis=1))
hsp_rad = np.mean(dist)
hsp_dec_rad = np.mean(dist_dec)
assert_array_almost_equal(hsp_rad, hsp_dec_rad, decimal=3)
@requires_testing_data
@pytest.mark.parametrize('fname, desc, system_id', [
(ricoh_systems_paths[0],
'Meg160/Analysis (1001) V2R004 PQA160C', 1001),
(ricoh_systems_paths[1],
'RICOH MEG System (10020) V3R000 RICOH160-1', 10020),
(ricoh_systems_paths[2],
'RICOH MEG System (10021) V3R000 RICOH160-1', 10021),
])
def test_ricoh_systems(tmpdir, fname, desc, system_id):
"""Test reading channel names and dig information from Ricoh systems."""
raw = read_raw_kit(fname, standardize_names=False)
assert raw.info['description'] == desc
assert raw.info['kit_system_id'] == system_id
@requires_testing_data
def test_berlin():
"""Test data from Berlin."""
# gh-8535
raw = read_raw_kit(berlin_path)
assert raw.info['description'] == 'Physikalisch Technische Bundesanstalt, Berlin/128-channel MEG System (124) V2R004 PQ1128R-N2' # noqa: E501
assert raw.info['kit_system_id'] == 124
assert raw.info['highpass'] == 0.
assert raw.info['lowpass'] == 200.
assert raw.info['sfreq'] == 500.
n = int(round(28.77 * raw.info['sfreq']))
meg = raw.get_data('MEG 003', n, n + 1)[0, 0]
assert_allclose(meg, -8.89e-12, rtol=1e-3)
eeg = raw.get_data('E14', n, n + 1)[0, 0]
assert_allclose(eeg, -2.55, rtol=1e-3)
|
import re
from datetime import date
from hashlib import md5
from urllib.parse import urlencode
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.db.models import Count
from django.db.models import Q
from django.template import Library
from django.template.defaultfilters import stringfilter
from django.template.loader import select_template
from django.utils import timezone
from django.utils.encoding import smart_str
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from django_comments import get_model as get_comment_model
from django_comments.models import CommentFlag
from tagging.models import Tag
from tagging.utils import calculate_cloud
from ..breadcrumbs import retrieve_breadcrumbs
from ..calendar import Calendar
from ..comparison import EntryPublishedVectorBuilder
from ..context import get_context_first_matching_object
from ..context import get_context_first_object
from ..context import get_context_loop_positions
from ..flags import PINGBACK, TRACKBACK
from ..managers import DRAFT
from ..managers import tags_published
from ..models.author import Author
from ..models.category import Category
from ..models.entry import Entry
from ..settings import ENTRY_LOOP_TEMPLATES
from ..settings import PROTOCOL
from ..templating import loop_template_list
WIDONT_REGEXP = re.compile(
r'\s+(\S+\s*)$')
DOUBLE_SPACE_PUNCTUATION_WIDONT_REGEXP = re.compile(
r'\s+([-+*/%=;:!?]+ \S+\s*)$')
END_PUNCTUATION_WIDONT_REGEXP = re.compile(
r'\s+([?!]+\s*)$')
register = Library()
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def get_categories(context, template='zinnia/tags/categories.html'):
"""
Return the published categories.
"""
return {'template': template,
'categories': Category.published.all().order_by('title').annotate(
count_entries_published=Count('entries')),
'context_category': context.get('category')}
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def get_categories_tree(context, template='zinnia/tags/categories_tree.html'):
"""
Return the categories as a tree.
"""
return {'template': template,
'categories': Category.objects.all().annotate(
count_entries=Count('entries')),
'context_category': context.get('category')}
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def get_authors(context, template='zinnia/tags/authors.html'):
"""
Return the published authors.
"""
return {'template': template,
'authors': Author.published.all().annotate(
count_entries_published=Count('entries')),
'context_author': context.get('author')}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_recent_entries(number=5, template='zinnia/tags/entries_recent.html'):
"""
Return the most recent entries.
"""
return {'template': template,
'entries': Entry.published.all()[:number]}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_featured_entries(number=5,
template='zinnia/tags/entries_featured.html'):
"""
Return the featured entries.
"""
return {'template': template,
'entries': Entry.published.filter(featured=True)[:number]}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_draft_entries(number=5,
template='zinnia/tags/entries_draft.html'):
"""
Return the last draft entries.
"""
return {'template': template,
'entries': Entry.objects.filter(status=DRAFT)[:number]}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_random_entries(number=5, template='zinnia/tags/entries_random.html'):
"""
Return random entries.
"""
return {'template': template,
'entries': Entry.published.order_by('?')[:number]}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_popular_entries(number=5, template='zinnia/tags/entries_popular.html'):
"""
Return popular entries.
"""
return {'template': template,
'entries': Entry.published.filter(
comment_count__gt=0).order_by(
'-comment_count', '-publication_date')[:number]}
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def get_similar_entries(context, number=5,
template='zinnia/tags/entries_similar.html'):
"""
Return similar entries.
"""
entry = context.get('entry')
if not entry:
return {'template': template, 'entries': []}
vectors = EntryPublishedVectorBuilder()
entries = vectors.get_related(entry, number)
return {'template': template,
'entries': entries}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_archives_entries(template='zinnia/tags/entries_archives.html'):
"""
Return archives entries.
"""
return {'template': template,
'archives': Entry.published.datetimes(
'publication_date', 'month', order='DESC')}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_archives_entries_tree(
template='zinnia/tags/entries_archives_tree.html'):
"""
Return archives entries as a tree.
"""
return {'template': template,
'archives': Entry.published.datetimes(
'publication_date', 'day', order='ASC')}
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def get_calendar_entries(context, year=None, month=None,
template='zinnia/tags/entries_calendar.html'):
"""
Return an HTML calendar of entries.
"""
if not (year and month):
day_week_month = (context.get('day') or
context.get('week') or
context.get('month'))
publication_date = getattr(context.get('object'),
'publication_date', None)
if day_week_month:
current_month = day_week_month
elif publication_date:
if settings.USE_TZ:
publication_date = timezone.localtime(publication_date)
current_month = publication_date.date()
else:
today = timezone.now()
if settings.USE_TZ:
today = timezone.localtime(today)
current_month = today.date()
current_month = current_month.replace(day=1)
else:
current_month = date(year, month, 1)
dates = list(map(
lambda x: settings.USE_TZ and timezone.localtime(x).date() or x.date(),
Entry.published.datetimes('publication_date', 'month')))
if current_month not in dates:
dates.append(current_month)
dates.sort()
index = dates.index(current_month)
previous_month = index > 0 and dates[index - 1] or None
next_month = index != len(dates) - 1 and dates[index + 1] or None
calendar = Calendar()
return {'template': template,
'next_month': next_month,
'previous_month': previous_month,
'calendar': calendar.formatmonth(
current_month.year,
current_month.month,
previous_month=previous_month,
next_month=next_month)}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_recent_comments(number=5, template='zinnia/tags/comments_recent.html'):
"""
Return the most recent comments.
"""
# Using map(smart_str... fix bug related to issue #8554
entry_published_pks = map(smart_str,
Entry.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Entry)
comments = get_comment_model().objects.filter(
Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL),
content_type=content_type, object_pk__in=entry_published_pks,
is_public=True).order_by('-pk')[:number]
comments = comments.prefetch_related('content_object')
return {'template': template,
'comments': comments}
@register.inclusion_tag('zinnia/tags/dummy.html')
def get_recent_linkbacks(number=5,
template='zinnia/tags/linkbacks_recent.html'):
"""
Return the most recent linkbacks.
"""
entry_published_pks = map(smart_str,
Entry.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Entry)
linkbacks = get_comment_model().objects.filter(
content_type=content_type,
object_pk__in=entry_published_pks,
flags__flag__in=[PINGBACK, TRACKBACK],
is_public=True).order_by('-pk')[:number]
linkbacks = linkbacks.prefetch_related('content_object')
return {'template': template,
'linkbacks': linkbacks}
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def zinnia_pagination(context, page, begin_pages=1, end_pages=1,
before_pages=2, after_pages=2,
template='zinnia/tags/pagination.html'):
"""
Return a Digg-like pagination,
by splitting long list of page into 3 blocks of pages.
"""
get_string = ''
for key, value in context['request'].GET.items():
if key != 'page':
get_string += '&%s=%s' % (key, value)
page_range = list(page.paginator.page_range)
begin = page_range[:begin_pages]
end = page_range[-end_pages:]
middle = page_range[max(page.number - before_pages - 1, 0):
page.number + after_pages]
if set(begin) & set(middle): # [1, 2, 3], [2, 3, 4], [...]
begin = sorted(set(begin + middle)) # [1, 2, 3, 4]
middle = []
elif begin[-1] + 1 == middle[0]: # [1, 2, 3], [4, 5, 6], [...]
begin += middle # [1, 2, 3, 4, 5, 6]
middle = []
elif middle[-1] + 1 == end[0]: # [...], [15, 16, 17], [18, 19, 20]
end = middle + end # [15, 16, 17, 18, 19, 20]
middle = []
elif set(middle) & set(end): # [...], [17, 18, 19], [18, 19, 20]
end = sorted(set(middle + end)) # [17, 18, 19, 20]
middle = []
if set(begin) & set(end): # [1, 2, 3], [...], [2, 3, 4]
begin = sorted(set(begin + end)) # [1, 2, 3, 4]
middle, end = [], []
elif begin[-1] + 1 == end[0]: # [1, 2, 3], [...], [4, 5, 6]
begin += end # [1, 2, 3, 4, 5, 6]
middle, end = [], []
return {'template': template,
'page': page,
'begin': begin,
'middle': middle,
'end': end,
'GET_string': get_string}
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def zinnia_breadcrumbs(context, root_name='',
template='zinnia/tags/breadcrumbs.html',):
"""
Return a breadcrumb for the application.
"""
path = context['request'].path
context_object = get_context_first_object(
context, ['object', 'category', 'tag', 'author'])
context_page = context.get('page_obj')
breadcrumbs = retrieve_breadcrumbs(
path, context_object, context_page, root_name)
return {'template': template,
'breadcrumbs': breadcrumbs}
@register.simple_tag(takes_context=True)
def zinnia_loop_template(context, default_template):
"""
Return a selected template from his position within a loop
and the filtering context.
"""
matching, context_object = get_context_first_matching_object(
context,
['category', 'tag', 'author', 'pattern',
'year', 'month', 'week', 'day'])
context_positions = get_context_loop_positions(context)
templates = loop_template_list(
context_positions, context_object, matching,
default_template, ENTRY_LOOP_TEMPLATES)
return select_template(templates)
@register.simple_tag
def get_gravatar(email, size=80, rating='g', default=None,
protocol=PROTOCOL):
"""
Return url for a Gravatar.
"""
gravatar_protocols = {'http': 'http://www',
'https': 'https://secure'}
url = '%s.gravatar.com/avatar/%s' % (
gravatar_protocols[protocol],
md5(email.strip().lower().encode('utf-8')).hexdigest())
options = {'s': size, 'r': rating}
if default:
options['d'] = default
url = '%s?%s' % (url, urlencode(options))
return url.replace('&', '&')
@register.simple_tag
def get_tags():
"""
Return the published tags.
"""
return Tag.objects.usage_for_queryset(
Entry.published.all())
@register.inclusion_tag('zinnia/tags/dummy.html', takes_context=True)
def get_tag_cloud(context, steps=6, min_count=None,
template='zinnia/tags/tag_cloud.html'):
"""
Return a cloud of published tags.
"""
tags = Tag.objects.usage_for_queryset(
Entry.published.all(), counts=True,
min_count=min_count)
return {'template': template,
'tags': calculate_cloud(tags, steps),
'context_tag': context.get('tag')}
@register.filter(needs_autoescape=True)
@stringfilter
def widont(value, autoescape=None):
"""
Add an HTML non-breaking space between the final
two words of the string to avoid "widowed" words.
"""
esc = autoescape and conditional_escape or (lambda x: x)
def replace(matchobj):
return ' %s' % matchobj.group(1)
value = END_PUNCTUATION_WIDONT_REGEXP.sub(replace, esc(smart_str(value)))
value = WIDONT_REGEXP.sub(replace, value)
value = DOUBLE_SPACE_PUNCTUATION_WIDONT_REGEXP.sub(replace, value)
return mark_safe(value)
@register.filter
def week_number(date):
r"""
Return the Python week number of a date.
The django \|date:"W" returns incompatible value
with the view implementation.
"""
week_number = date.strftime('%W')
if int(week_number) < 10:
week_number = week_number[-1]
return week_number
@register.filter
def comment_admin_urlname(action):
"""
Return the admin URLs for the comment app used.
"""
comment = get_comment_model()
return 'admin:%s_%s_%s' % (
comment._meta.app_label, comment._meta.model_name,
action)
@register.filter
def user_admin_urlname(action):
"""
Return the admin URLs for the user app used.
"""
user = get_user_model()
return 'admin:%s_%s_%s' % (
user._meta.app_label, user._meta.model_name,
action)
@register.inclusion_tag('zinnia/tags/dummy.html')
def zinnia_statistics(template='zinnia/tags/statistics.html'):
"""
Return statistics on the content of Zinnia.
"""
content_type = ContentType.objects.get_for_model(Entry)
discussions = get_comment_model().objects.filter(
content_type=content_type)
entries = Entry.published
categories = Category.objects
tags = tags_published()
authors = Author.published
replies = discussions.filter(
flags=None, is_public=True)
pingbacks = discussions.filter(
flags__flag=PINGBACK, is_public=True)
trackbacks = discussions.filter(
flags__flag=TRACKBACK, is_public=True)
rejects = discussions.filter(is_public=False)
entries_count = entries.count()
replies_count = replies.count()
pingbacks_count = pingbacks.count()
trackbacks_count = trackbacks.count()
if entries_count:
first_entry = entries.order_by('publication_date')[0]
last_entry = entries.latest()
months_count = (last_entry.publication_date -
first_entry.publication_date).days / 31.0
entries_per_month = entries_count / (months_count or 1.0)
comments_per_entry = float(replies_count) / entries_count
linkbacks_per_entry = float(pingbacks_count + trackbacks_count) / \
entries_count
total_words_entry = 0
for e in entries.all():
total_words_entry += e.word_count
words_per_entry = float(total_words_entry) / entries_count
words_per_comment = 0.0
if replies_count:
total_words_comment = 0
for c in replies.all():
total_words_comment += len(c.comment.split())
words_per_comment = float(total_words_comment) / replies_count
else:
words_per_entry = words_per_comment = entries_per_month = \
comments_per_entry = linkbacks_per_entry = 0.0
return {'template': template,
'entries': entries_count,
'categories': categories.count(),
'tags': tags.count(),
'authors': authors.count(),
'comments': replies_count,
'pingbacks': pingbacks_count,
'trackbacks': trackbacks_count,
'rejects': rejects.count(),
'words_per_entry': words_per_entry,
'words_per_comment': words_per_comment,
'entries_per_month': entries_per_month,
'comments_per_entry': comments_per_entry,
'linkbacks_per_entry': linkbacks_per_entry}
|
import unittest2
import socket
from psdash.net import NetIOCounters
class TestNet(unittest2.TestCase):
def setUp(self):
self.io_counter = NetIOCounters()
def test_first_time_return(self):
self.assertEqual(self.io_counter.get(), None)
def test_one_update_gives_defaulted_rates(self):
self.io_counter.update()
name, c = self.io_counter.get().popitem()
self.assertEqual(c['rx_per_sec'], 0)
self.assertEqual(c['tx_per_sec'], 0)
def test_two_updates_gives_rates(self):
self.io_counter.update()
# make sure to actually use the network a bit
socket.getaddrinfo('example.org', 80)
self.io_counter.update()
for c in self.io_counter.get().itervalues():
if c['rx_per_sec'] > 0 and c['tx_per_sec'] > 0:
break
else:
self.fail("Didn't find any changed network interface")
|
import datetime
import os
DATETIME_FORMAT = '{:%m_%d_%Y_%H_%M_}'
DATETIME_TITLE_FORMAT = '{: %m %d %Y %H %M}'
CHART_TITLE_PREFIX = 'Sysbench TPS'
X_LABEL = 'Thread Count'
Y_LABEL = 'TPS'
# This variable controls the verticle lines inside the chart.
# 0 means no vertical lines.
Y_TICS = '100'
DEFAULT_ITERATIONS = '10'
class GnuplotInfo(object):
"""Gnuplot metadata."""
def __init__(self, gnuplot_data_filename,
entries_per_run,
run_uri,
y_max,
iterations=DEFAULT_ITERATIONS,
title=None):
"""Initialize GnuplotInfo object.
Args:
gnuplot_data_filename: filename of TPS data.
entries_per_run: Number of TPS values collected for each run.
run_uri: (string) run identifier.
y_max: maximum y value. Used for y-axis limit.
iterations: number of iterations.
title: (optional, string) Chart title.
"""
self.gnuplot_data_filename = gnuplot_data_filename
self._generate_filenames(run_uri)
self.x_interval = str(entries_per_run)
self.y_height = str(int(100 * round(float(y_max) / 100)))
self.title = title
self.iterations = str(iterations)
def _generate_filenames(self, run_uri):
"""Sets filename (with path) of gnuplot input and chart.
Args:
run_uri: (string) run identifier.
"""
date_string = DATETIME_FORMAT.format(datetime.datetime.now())
date_title_string = DATETIME_TITLE_FORMAT.format(datetime.datetime.now())
self.chart_title = CHART_TITLE_PREFIX + date_title_string
identifier = date_string + run_uri + '_sysbench_run.png'
self.output_chart = os.path.join(
os.path.dirname(__file__), '..', '..', '..', 'charts',
identifier)
self.output_gnuplot_file = self.output_chart + '_gnuplot_input'
def create_file(self):
"""Generates a gnuplot info file.
Returns:
output_file (string): Name of gnuplot output file.
output_chart (string): Name of output chart file.
"""
color = '38761d'
# Titles for the data series
title = self.title or 'Cloud SQL Prod'
output_file = open(self.output_gnuplot_file, 'w')
output_file.write('set terminal pngcairo size 1500,800 '
'enhanced font "Verdana,12"\n')
output_file.write('set output "' + self.output_chart + '"\n')
output_file.write('set multiplot\n')
output_file.write('set grid\n')
output_file.write('set border 4095 ls 0 lc rgb \"black\"\n')
output_file.write('set title (\"' + self.chart_title +
'") font \"aerial, 14\" noenhanced\n')
output_file.write('set xlabel "' + X_LABEL + '"\n')
output_file.write('set ylabel "' + Y_LABEL + '"\n')
# If you want to move the legend to the top left, use this:
output_file.write('set key left top\n')
if self.y_height > 0:
output_file.write('y=' + self.y_height + '\n')
output_file.write('set yrange [0:y]\n')
output_file.write('set ytics ' + Y_TICS + '\n')
output_file.write('unset xtics\n')
output_file.write('thread=1\n')
output_file.write('x=0\n')
output_file.write('do for [t=1:' + self.iterations + ':+1] {\n')
output_file.write(
'\tset label (sprintf(\"%d\", thread)) at x+20, 0 offset -2\n')
output_file.write(
'\tset arrow from x,0 to x,y nohead ls 0 lc rgb \"blue\"\n')
# TODO: This code assumes thread count increases by 2 times the previous
# number. Future implementation should take a list of thread counts and
# properly handle that here.
output_file.write('\tthread=thread*2\n')
output_file.write('\tx=x+' + self.x_interval + '\n')
output_file.write('}\n')
# plotting data series
output_file.write('plot\\\n')
column = '1'
output_file.write('\"' + self.gnuplot_data_filename + '\" using ' + column +
' with points lc rgb \"#' + color + '\" title \"' + title
+ '\"')
return self.output_gnuplot_file, self.output_chart
|
import asyncio
from collections import OrderedDict
import logging
from satel_integra.satel_integra import AlarmState
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
CONF_ARM_HOME_MODE,
CONF_DEVICE_PARTITIONS,
CONF_ZONE_NAME,
DATA_SATEL,
SIGNAL_PANEL_MESSAGE,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up for Satel Integra alarm panels."""
if not discovery_info:
return
configured_partitions = discovery_info[CONF_DEVICE_PARTITIONS]
controller = hass.data[DATA_SATEL]
devices = []
for partition_num, device_config_data in configured_partitions.items():
zone_name = device_config_data[CONF_ZONE_NAME]
arm_home_mode = device_config_data.get(CONF_ARM_HOME_MODE)
device = SatelIntegraAlarmPanel(
controller, zone_name, arm_home_mode, partition_num
)
devices.append(device)
async_add_entities(devices)
class SatelIntegraAlarmPanel(alarm.AlarmControlPanelEntity):
"""Representation of an AlarmDecoder-based alarm panel."""
def __init__(self, controller, name, arm_home_mode, partition_id):
"""Initialize the alarm panel."""
self._name = name
self._state = None
self._arm_home_mode = arm_home_mode
self._partition_id = partition_id
self._satel = controller
async def async_added_to_hass(self):
"""Update alarm status and register callbacks for future updates."""
_LOGGER.debug("Starts listening for panel messages")
self._update_alarm_status()
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_PANEL_MESSAGE, self._update_alarm_status
)
)
@callback
def _update_alarm_status(self):
"""Handle alarm status update."""
state = self._read_alarm_state()
_LOGGER.debug("Got status update, current status: %s", state)
if state != self._state:
self._state = state
self.async_write_ha_state()
else:
_LOGGER.debug("Ignoring alarm status message, same state")
def _read_alarm_state(self):
"""Read current status of the alarm and translate it into HA status."""
# Default - disarmed:
hass_alarm_status = STATE_ALARM_DISARMED
if not self._satel.connected:
return None
state_map = OrderedDict(
[
(AlarmState.TRIGGERED, STATE_ALARM_TRIGGERED),
(AlarmState.TRIGGERED_FIRE, STATE_ALARM_TRIGGERED),
(AlarmState.ENTRY_TIME, STATE_ALARM_PENDING),
(AlarmState.ARMED_MODE3, STATE_ALARM_ARMED_HOME),
(AlarmState.ARMED_MODE2, STATE_ALARM_ARMED_HOME),
(AlarmState.ARMED_MODE1, STATE_ALARM_ARMED_HOME),
(AlarmState.ARMED_MODE0, STATE_ALARM_ARMED_AWAY),
(AlarmState.EXIT_COUNTDOWN_OVER_10, STATE_ALARM_PENDING),
(AlarmState.EXIT_COUNTDOWN_UNDER_10, STATE_ALARM_PENDING),
]
)
_LOGGER.debug("State map of Satel: %s", self._satel.partition_states)
for satel_state, ha_state in state_map.items():
if (
satel_state in self._satel.partition_states
and self._partition_id in self._satel.partition_states[satel_state]
):
hass_alarm_status = ha_state
break
return hass_alarm_status
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def code_format(self):
"""Return the regex for code format or None if no code is required."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
if not code:
_LOGGER.debug("Code was empty or None")
return
clear_alarm_necessary = self._state == STATE_ALARM_TRIGGERED
_LOGGER.debug("Disarming, self._state: %s", self._state)
await self._satel.disarm(code, [self._partition_id])
if clear_alarm_necessary:
# Wait 1s before clearing the alarm
await asyncio.sleep(1)
await self._satel.clear_alarm(code, [self._partition_id])
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
_LOGGER.debug("Arming away")
if code:
await self._satel.arm(code, [self._partition_id])
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
_LOGGER.debug("Arming home")
if code:
await self._satel.arm(code, [self._partition_id], self._arm_home_mode)
|
from collections import OrderedDict
from datetime import timedelta
import logging
import pytest
import voluptuous as vol
from homeassistant.const import ENTITY_MATCH_ALL, ENTITY_MATCH_NONE
import homeassistant.core as ha
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import discovery
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import (
MockConfigEntry,
MockEntity,
MockModule,
MockPlatform,
async_fire_time_changed,
mock_entity_platform,
mock_integration,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
async def test_setup_loads_platforms(hass):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(hass, MockModule("test_component", setup=component_setup))
# mock the dependencies
mock_integration(hass, MockModule("mod2", dependencies=["test_component"]))
mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({DOMAIN: {"platform": "mod2"}})
await hass.async_block_till_done()
assert component_setup.called
assert platform_setup.called
async def test_setup_recovers_when_setup_raises(hass):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception("Broken"))
platform2_setup = Mock(return_value=None)
mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup))
mock_entity_platform(hass, "test_domain.mod2", MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(
OrderedDict(
[
(DOMAIN, {"platform": "mod1"}),
(f"{DOMAIN} 2", {"platform": "non_exist"}),
(f"{DOMAIN} 3", {"platform": "mod2"}),
]
)
)
await hass.async_block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@patch(
"homeassistant.helpers.entity_component.EntityComponent.async_setup_platform",
)
@patch("homeassistant.setup.async_setup_component", return_value=True)
async def test_setup_does_discovery(mock_setup_component, mock_setup, hass):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({})
discovery.load_platform(
hass, DOMAIN, "platform_test", {"msg": "discovery_info"}, {DOMAIN: {}}
)
await hass.async_block_till_done()
assert mock_setup.called
assert ("platform_test", {}, {"msg": "discovery_info"}) == mock_setup.call_args[0]
@patch("homeassistant.helpers.entity_platform.async_track_time_interval")
async def test_set_scan_interval_via_config(mock_track, hass):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(hass, "test_domain.platform", MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup(
{DOMAIN: {"platform": "platform", "scan_interval": timedelta(seconds=30)}}
)
await hass.async_block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
async def test_set_entity_namespace_via_config(hass):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(name="beer"), MockEntity(name=None)])
platform = MockPlatform(platform_setup)
mock_entity_platform(hass, "test_domain.platform", platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
component.setup({DOMAIN: {"platform": "platform", "entity_namespace": "yummy"}})
await hass.async_block_till_done()
assert sorted(hass.states.async_entity_ids()) == [
"test_domain.yummy_beer",
"test_domain.yummy_unnamed_device",
]
async def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[
MockEntity(name="test_1"),
MockEntity(name="test_2", available=False),
MockEntity(name="test_3"),
MockEntity(name="test_4", available=False),
]
)
call_1 = ha.ServiceCall("test", "service", data={"entity_id": ENTITY_MATCH_ALL})
assert ["test_domain.test_1", "test_domain.test_3"] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call_1))
)
call_2 = ha.ServiceCall(
"test",
"service",
data={"entity_id": ["test_domain.test_3", "test_domain.test_4"]},
)
assert ["test_domain.test_3"] == sorted(
ent.entity_id for ent in (await component.async_extract_from_service(call_2))
)
async def test_platform_not_ready(hass, legacy_patchable_time):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady, None])
mock_integration(hass, MockModule("mod1"))
mock_entity_platform(hass, "test_domain.mod1", MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": "mod1"}})
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
assert "test_domain.mod1" not in hass.config.components
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert "test_domain.mod1" not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
await hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert "test_domain.mod1" in hass.config.components
async def test_extract_from_service_fails_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
assert (
await component.async_extract_from_service(ha.ServiceCall("test", "service"))
== []
)
assert (
await component.async_extract_from_service(
ha.ServiceCall("test", "service", {"entity_id": ENTITY_MATCH_NONE})
)
== []
)
assert (
await component.async_extract_from_service(
ha.ServiceCall("test", "service", {"area_id": ENTITY_MATCH_NONE})
)
== []
)
async def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall(
"test",
"service",
{"entity_id": ["test_domain.test_2", "test_domain.non_exist"]},
)
assert ["test_domain.test_2"] == [
ent.entity_id for ent in await component.async_extract_from_service(call)
]
async def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([MockEntity(entity_id="group.test_group")])
call = ha.ServiceCall("test", "service", {"entity_id": ["group.test_group"]})
extracted = await component.async_extract_from_service(call, expand_group=False)
assert len(extracted) == 1
assert extracted[0].entity_id == "group.test_group"
async def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explicitly testing that we process dependencies even if a component
with the same name has already been loaded.
"""
mock_integration(
hass, MockModule("test_component", dependencies=["test_component2"])
)
mock_integration(hass, MockModule("test_component2"))
mock_entity_platform(hass, "test_domain.test_component", MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": "test_component"}})
await hass.async_block_till_done()
assert "test_component" in hass.config.components
assert "test_component2" in hass.config.components
assert "test_domain.test_component" in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = AsyncMock(return_value=True)
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(
async_setup_entry=mock_setup_entry, scan_interval=timedelta(seconds=5)
),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, _ = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform does not exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="non_existing")
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = AsyncMock(return_value=True)
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(async_setup_entry=mock_setup_entry),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = AsyncMock(return_value=True)
mock_entity_platform(
hass,
"test_domain.entry_domain",
MockPlatform(async_setup_entry=mock_setup_entry),
)
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain="entry_domain")
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_write_ha_state = Mock()
entity.async_update_ha_state = AsyncMock(return_value=None)
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_write_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 1
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, "group", {})
component = EntityComponent(_LOGGER, DOMAIN, hass)
for _ in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service")
assert [] == sorted(
ent.entity_id for ent in await component.async_extract_from_service(call)
)
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities(
[MockEntity(name="test_1"), MockEntity(name="test_2")]
)
call = ha.ServiceCall("test", "service", {"entity_id": "all"})
assert ["test_domain.test_1", "test_domain.test_2"] == sorted(
ent.entity_id for ent in await component.async_extract_from_service(call)
)
assert (
"Not passing an entity ID to a service to target all entities is deprecated"
) not in caplog.text
async def test_register_entity_service(hass):
"""Test not expanding a group."""
entity = MockEntity(entity_id=f"{DOMAIN}.entity")
calls = []
@ha.callback
def appender(**kwargs):
calls.append(kwargs)
entity.async_called_by_service = appender
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([entity])
component.async_register_entity_service(
"hello", {"some": str}, "async_called_by_service"
)
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
"hello",
{"entity_id": entity.entity_id, "invalid": "data"},
blocking=True,
)
assert len(calls) == 0
await hass.services.async_call(
DOMAIN, "hello", {"entity_id": entity.entity_id, "some": "data"}, blocking=True
)
assert len(calls) == 1
assert calls[0] == {"some": "data"}
await hass.services.async_call(
DOMAIN, "hello", {"entity_id": ENTITY_MATCH_ALL, "some": "data"}, blocking=True
)
assert len(calls) == 2
assert calls[1] == {"some": "data"}
await hass.services.async_call(
DOMAIN, "hello", {"entity_id": ENTITY_MATCH_NONE, "some": "data"}, blocking=True
)
assert len(calls) == 2
await hass.services.async_call(
DOMAIN, "hello", {"area_id": ENTITY_MATCH_NONE, "some": "data"}, blocking=True
)
assert len(calls) == 2
|
import os
import arrow
from jinja2 import Environment, FileSystemLoader, select_autoescape
from lemur.plugins.utils import get_plugin_option
loader = FileSystemLoader(searchpath=os.path.dirname(os.path.realpath(__file__)))
env = Environment(
loader=loader, # nosec: potentially dangerous types esc.
autoescape=select_autoescape(["html", "xml"]),
)
def human_time(time):
return arrow.get(time).format("dddd, MMMM D, YYYY")
def interval(options):
return get_plugin_option("interval", options)
def unit(options):
return get_plugin_option("unit", options)
env.filters["time"] = human_time
env.filters["interval"] = interval
env.filters["unit"] = unit
|
import requests_mock
from homeassistant.components.starline import config_flow
TEST_APP_ID = "666"
TEST_APP_SECRET = "appsecret"
TEST_APP_CODE = "appcode"
TEST_APP_TOKEN = "apptoken"
TEST_APP_SLNET = "slnettoken"
TEST_APP_SLID = "slidtoken"
TEST_APP_UID = "123"
TEST_APP_USERNAME = "sluser"
TEST_APP_PASSWORD = "slpassword"
async def test_flow_works(hass):
"""Test that config flow works."""
with requests_mock.Mocker() as mock:
mock.get(
"https://id.starline.ru/apiV3/application/getCode/",
text='{"state": 1, "desc": {"code": "' + TEST_APP_CODE + '"}}',
)
mock.get(
"https://id.starline.ru/apiV3/application/getToken/",
text='{"state": 1, "desc": {"token": "' + TEST_APP_TOKEN + '"}}',
)
mock.post(
"https://id.starline.ru/apiV3/user/login/",
text='{"state": 1, "desc": {"user_token": "' + TEST_APP_SLID + '"}}',
)
mock.post(
"https://developer.starline.ru/json/v2/auth.slid",
text='{"code": 200, "user_id": "' + TEST_APP_UID + '"}',
cookies={"slnet": TEST_APP_SLNET},
)
mock.get(
"https://developer.starline.ru/json/v2/user/{}/user_info".format(
TEST_APP_UID
),
text='{"code": 200, "devices": [{"device_id": "123", "imei": "123", "alias": "123", "battery": "123", "ctemp": "123", "etemp": "123", "fw_version": "123", "gsm_lvl": "123", "phone": "123", "status": "1", "ts_activity": "123", "typename": "123", "balance": {}, "car_state": {}, "car_alr_state": {}, "functions": [], "position": {}}], "shared_devices": []}',
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "auth_app"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_APP_ID: TEST_APP_ID,
config_flow.CONF_APP_SECRET: TEST_APP_SECRET,
},
)
assert result["type"] == "form"
assert result["step_id"] == "auth_user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
config_flow.CONF_USERNAME: TEST_APP_USERNAME,
config_flow.CONF_PASSWORD: TEST_APP_PASSWORD,
},
)
assert result["type"] == "create_entry"
assert result["title"] == f"Application {TEST_APP_ID}"
async def test_step_auth_app_code_falls(hass):
"""Test config flow works when app auth code fails."""
with requests_mock.Mocker() as mock:
mock.get(
"https://id.starline.ru/apiV3/application/getCode/", text='{"state": 0}}'
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "user"},
data={
config_flow.CONF_APP_ID: TEST_APP_ID,
config_flow.CONF_APP_SECRET: TEST_APP_SECRET,
},
)
assert result["type"] == "form"
assert result["step_id"] == "auth_app"
assert result["errors"] == {"base": "error_auth_app"}
async def test_step_auth_app_token_falls(hass):
"""Test config flow works when app auth token fails."""
with requests_mock.Mocker() as mock:
mock.get(
"https://id.starline.ru/apiV3/application/getCode/",
text='{"state": 1, "desc": {"code": "' + TEST_APP_CODE + '"}}',
)
mock.get(
"https://id.starline.ru/apiV3/application/getToken/", text='{"state": 0}'
)
result = await hass.config_entries.flow.async_init(
config_flow.DOMAIN,
context={"source": "user"},
data={
config_flow.CONF_APP_ID: TEST_APP_ID,
config_flow.CONF_APP_SECRET: TEST_APP_SECRET,
},
)
assert result["type"] == "form"
assert result["step_id"] == "auth_app"
assert result["errors"] == {"base": "error_auth_app"}
async def test_step_auth_user_falls(hass):
"""Test config flow works when user fails."""
with requests_mock.Mocker() as mock:
mock.post("https://id.starline.ru/apiV3/user/login/", text='{"state": 0}')
flow = config_flow.StarlineFlowHandler()
flow.hass = hass
result = await flow.async_step_auth_user(
user_input={
config_flow.CONF_USERNAME: TEST_APP_USERNAME,
config_flow.CONF_PASSWORD: TEST_APP_PASSWORD,
}
)
assert result["type"] == "form"
assert result["step_id"] == "auth_user"
assert result["errors"] == {"base": "error_auth_user"}
|
from datetime import timedelta
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_TRANSITION,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_TRANSITION,
LightEntity,
)
from . import DOMAIN as CASETA_DOMAIN, LutronCasetaDevice
_LOGGER = logging.getLogger(__name__)
def to_lutron_level(level):
"""Convert the given Home Assistant light level (0-255) to Lutron (0-100)."""
return int(round((level * 100) / 255))
def to_hass_level(level):
"""Convert the given Lutron (0-100) light level to Home Assistant (0-255)."""
return int((level * 255) // 100)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Lutron Caseta light platform.
Adds dimmers from the Caseta bridge associated with the config_entry as
light entities.
"""
entities = []
bridge = hass.data[CASETA_DOMAIN][config_entry.entry_id]
light_devices = bridge.get_devices_by_domain(DOMAIN)
for light_device in light_devices:
entity = LutronCasetaLight(light_device, bridge)
entities.append(entity)
async_add_entities(entities, True)
class LutronCasetaLight(LutronCasetaDevice, LightEntity):
"""Representation of a Lutron Light, including dimmable."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
@property
def brightness(self):
"""Return the brightness of the light."""
return to_hass_level(self._device["current_state"])
async def _set_brightness(self, brightness, **kwargs):
args = {}
if ATTR_TRANSITION in kwargs:
args["fade_time"] = timedelta(seconds=kwargs[ATTR_TRANSITION])
await self._smartbridge.set_value(
self.device_id, to_lutron_level(brightness), **args
)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
brightness = kwargs.pop(ATTR_BRIGHTNESS, 255)
await self._set_brightness(brightness, **kwargs)
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._set_brightness(0, **kwargs)
@property
def is_on(self):
"""Return true if device is on."""
return self._device["current_state"] > 0
async def async_update(self):
"""Call when forcing a refresh of the device."""
self._device = self._smartbridge.get_device_by_id(self.device_id)
_LOGGER.debug(self._device)
|
import os
import platform
import threading
import time
from http.client import HTTPConnection
from distutils.spawn import find_executable
import pytest
from path import Path
from more_itertools import consume
import portend
import cherrypy
from cherrypy._cpcompat import HTTPSConnection
from cherrypy.lib import sessions
from cherrypy.lib import reprconf
from cherrypy.lib.httputil import response_codes
from cherrypy.test import helper
from cherrypy import _json as json
localDir = Path(__file__).dirname()
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ', '.join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed)
def setup_server():
@cherrypy.config(**{
'tools.sessions.on': True,
'tools.sessions.storage_class': sessions.RamSession,
'tools.sessions.storage_path': localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
})
class Root:
@cherrypy.expose
def clear(self):
cherrypy.session.cache.clear()
@cherrypy.expose
@cherrypy.tools.json_out()
def data(self):
cherrypy.session['aha'] = 'foo'
return cherrypy.session._data
@cherrypy.expose
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
@cherrypy.expose
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
@cherrypy.expose
@cherrypy.config(**{'tools.sessions.on': False})
def set_session_cls(self, new_cls_name):
new_cls = reprconf.attributes(new_cls_name)
cfg = {'tools.sessions.storage_class': new_cls}
self.__class__._cp_config.update(cfg)
if hasattr(cherrypy, 'session'):
del cherrypy.session
if new_cls.clean_thread:
new_cls.clean_thread.stop()
new_cls.clean_thread.unsubscribe()
del new_cls.clean_thread
@cherrypy.expose
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
@cherrypy.expose
def keyin(self, key):
return str(key in cherrypy.session)
@cherrypy.expose
def delete(self):
cherrypy.session.delete()
sessions.expire()
return 'done'
@cherrypy.expose
def delkey(self, key):
del cherrypy.session[key]
return 'OK'
@cherrypy.expose
def redir_target(self):
return self._cp_config['tools.sessions.storage_class'].__name__
@cherrypy.expose
def iredir(self):
raise cherrypy.InternalRedirect('/redir_target')
@cherrypy.expose
@cherrypy.config(**{
'tools.allow.on': True,
'tools.allow.methods': ['GET'],
})
def restricted(self):
return cherrypy.request.method
@cherrypy.expose
def regen(self):
cherrypy.tools.sessions.regenerate()
return 'logged in'
@cherrypy.expose
def length(self):
return str(len(cherrypy.session))
@cherrypy.expose
@cherrypy.config(**{
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False,
})
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
cherrypy.tree.mount(Root())
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
@classmethod
def teardown_class(cls):
"""Clean up sessions."""
super(cls, cls).teardown_class()
consume(
file.remove_p()
for file in localDir.listdir()
if file.basename().startswith(
sessions.FileSession.SESSION_PREFIX
)
)
def test_0_Session(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
assert self.body == b'{"aha": "foo"}'
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.cookies[0] == c
self.getPage('/testStr')
assert self.body == b'1'
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is an 'expires' param
expected_cookie_keys = {'session_id', 'expires', 'Path', 'Max-Age'}
assert set(cookie_parts.keys()) == expected_cookie_keys
self.getPage('/testGen', self.cookies)
assert self.body == b'2'
self.getPage('/testStr', self.cookies)
assert self.body == b'3'
self.getPage('/data', self.cookies)
expected_data = {'counter': 3, 'aha': 'foo'}
assert json.decode(self.body.decode('utf-8')) == expected_data
self.getPage('/length', self.cookies)
assert self.body == b'2'
self.getPage('/delkey?key=counter', self.cookies)
assert self.status_code == 200
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self.getPage('/testStr')
assert self.body == b'1'
self.getPage('/testGen', self.cookies)
assert self.body == b'2'
self.getPage('/testStr', self.cookies)
assert self.body == b'3'
self.getPage('/delkey?key=counter', self.cookies)
assert self.status_code == 200
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
assert self.body == b'1'
self.getPage('/length', self.cookies)
assert self.body == b'1'
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
assert self.body == b'True'
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
assert self.body == b'2'
# Test session delete
self.getPage('/delete', self.cookies)
assert self.body == b'done'
self.getPage('/delete', cookieset1)
assert self.body == b'done'
def f():
return [
x
for x in os.listdir(localDir)
if x.startswith('session-') and not x.endswith('.lock')
]
assert f() == []
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
assert f() != []
time.sleep(2)
assert f() == []
def test_1_Ram_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self._test_Concurrency()
def test_2_File_Concurrency(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.FileSession')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
assert self.body == b'1'
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
assert len(errors) == 0
assert hitcount == expected
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
assert self.body == b'FileSession'
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
path = os.path.join(localDir, 'session-' + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/regen')
assert self.body == b'logged in'
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
assert id1 != id2
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
self.getPage('/testStr',
headers=[
('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(';', 1)[0].split('=', 1)[1]
assert id1 != id2
assert id2 != 'maliciousid'
def test_7_session_cookies(self):
self.getPage('/set_session_cls/cherrypy.lib.sessions.RamSession')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
assert set(cookie_parts.keys()) == {'temp', 'Path'}
id1 = cookie_parts['temp']
assert list(sessions.RamSession.cache) == [id1]
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
assert set(cookie_parts.keys()) == {'temp', 'Path'}
assert self.body.decode('utf-8') == id1
assert list(sessions.RamSession.cache) == [id1]
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(';')])
# Assert there is no 'expires' param
assert set(cookie_parts.keys()) == {'temp', 'Path'}
# Assert a new id has been generated...
id2 = cookie_parts['temp']
assert id1 != id2
assert set(sessions.RamSession.cache.keys()) == {id1, id2}
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = list(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail('The second session did not time out.')
else:
self.fail('Unknown session id in cache: %r', cache)
def test_8_Ram_Cleanup(self):
def lock():
s1 = sessions.RamSession()
s1.acquire_lock()
time.sleep(1)
s1.release_lock()
t = threading.Thread(target=lock)
t.start()
start = time.time()
while not sessions.RamSession.locks and time.time() - start < 5:
time.sleep(0.01)
assert len(sessions.RamSession.locks) == 1, 'Lock not acquired'
s2 = sessions.RamSession()
s2.clean_up()
msg = 'Clean up should not remove active lock'
assert len(sessions.RamSession.locks) == 1, msg
t.join()
def is_memcached_present():
executable = find_executable('memcached')
return bool(executable)
@pytest.fixture(scope='session')
def memcached_server_present():
is_memcached_present() or pytest.skip('memcached not available')
@pytest.fixture()
def memcached_client_present():
pytest.importorskip('memcache')
@pytest.fixture(scope='session')
def memcached_instance(request, watcher_getter, memcached_server_present):
"""
Start up an instance of memcached.
"""
port = portend.find_available_local_port()
def is_occupied():
try:
portend.Checker().assert_free('localhost', port)
except Exception:
return True
return False
proc = watcher_getter(
name='memcached',
arguments=['-p', str(port)],
checker=is_occupied,
request=request,
)
return locals()
@pytest.fixture
def memcached_configured(
memcached_instance, monkeypatch,
memcached_client_present,
):
server = 'localhost:{port}'.format_map(memcached_instance)
monkeypatch.setattr(
sessions.MemcachedSession,
'servers',
[server],
)
@pytest.mark.skipif(
platform.system() == 'Windows',
reason='pytest-services helper does not work under Windows',
)
@pytest.mark.usefixtures('memcached_configured')
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage(
'/set_session_cls/cherrypy.lib.sessions.MemcachedSession'
)
self.getPage('/testStr')
assert self.body == b'1'
self.getPage('/testGen', self.cookies)
assert self.body == b'2'
self.getPage('/testStr', self.cookies)
assert self.body == b'3'
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
assert b'NotImplementedError' in self.body
self.getPage('/delkey?key=counter', self.cookies)
assert self.status_code == 200
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
assert self.body == b'1'
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
assert self.body == b'True'
# Test session delete
self.getPage('/delete', self.cookies)
assert self.body == b'done'
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage('/')
assert self.body == b'1'
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage('/', cookies)
# Uncomment the following line to prove threads overlap.
# sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
assert hitcount == expected
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
assert self.body == b'MemcachedSession'
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(
404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
import logging
import voluptuous as vol
from homeassistant.components.switch import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_COMMAND_STATE,
CONF_FRIENDLY_NAME,
CONF_SWITCHES,
CONF_VALUE_TEMPLATE,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import setup_reload_service
from . import call_shell_with_timeout, check_output_or_log
from .const import CONF_COMMAND_TIMEOUT, DEFAULT_TIMEOUT, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_OFF, default="true"): cv.string,
vol.Optional(CONF_COMMAND_ON, default="true"): cv.string,
vol.Optional(CONF_COMMAND_STATE): cv.string,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_COMMAND_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): cv.schema_with_slug_keys(SWITCH_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by shell commands."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
devices = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
CommandSwitch(
hass,
object_id,
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config[CONF_COMMAND_ON],
device_config[CONF_COMMAND_OFF],
device_config.get(CONF_COMMAND_STATE),
value_template,
device_config[CONF_COMMAND_TIMEOUT],
)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_entities(switches)
class CommandSwitch(SwitchEntity):
"""Representation a switch that can be toggled using shell commands."""
def __init__(
self,
hass,
object_id,
friendly_name,
command_on,
command_off,
command_state,
value_template,
timeout,
):
"""Initialize the switch."""
self._hass = hass
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = friendly_name
self._state = False
self._command_on = command_on
self._command_off = command_off
self._command_state = command_state
self._value_template = value_template
self._timeout = timeout
def _switch(self, command):
"""Execute the actual commands."""
_LOGGER.info("Running command: %s", command)
success = call_shell_with_timeout(command, self._timeout) == 0
if not success:
_LOGGER.error("Command failed: %s", command)
return success
def _query_state_value(self, command):
"""Execute state command for return value."""
_LOGGER.info("Running state value command: %s", command)
return check_output_or_log(command, self._timeout)
def _query_state_code(self, command):
"""Execute state command for return code."""
_LOGGER.info("Running state code command: %s", command)
return (
call_shell_with_timeout(command, self._timeout, log_return_code=False) == 0
)
@property
def should_poll(self):
"""Only poll if we have state command."""
return self._command_state is not None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._command_state is None
def _query_state(self):
"""Query for state."""
if not self._command_state:
_LOGGER.error("No state command specified")
return
if self._value_template:
return self._query_state_value(self._command_state)
return self._query_state_code(self._command_state)
def update(self):
"""Update device state."""
if self._command_state:
payload = str(self._query_state())
if self._value_template:
payload = self._value_template.render_with_possible_json_value(payload)
self._state = payload.lower() == "true"
def turn_on(self, **kwargs):
"""Turn the device on."""
if self._switch(self._command_on) and not self._command_state:
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
if self._switch(self._command_off) and not self._command_state:
self._state = False
self.schedule_update_ha_state()
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ntp import NtpCollector
##########################################################################
class TestNtpCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NtpCollector', {})
self.collector = NtpCollector(config, None)
def test_import(self):
self.assertTrue(NtpCollector)
@patch.object(Collector, 'publish')
def test_should_work_wtih_real_data(self, publish_mock):
ntpdate_data = Mock(
return_value=(self.getFixture('ntpdate').getvalue(), None))
collector_mock = patch.object(NtpCollector,
'run_command',
ntpdate_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'server.count': 4,
'offset.milliseconds': 0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_wtih_real_data_and_custom_config(self, publish_mock):
config = get_collector_config('NtpCollector', {
'time_scale': 'seconds',
'precision': 3,
})
self.collector = NtpCollector(config, None)
ntpdate_data = Mock(
return_value=(self.getFixture('ntpdate').getvalue(), None))
collector_mock = patch.object(NtpCollector,
'run_command',
ntpdate_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'server.count': 4,
'offset.seconds': -0.000128
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
ntpdate_data = Mock(return_value=('', None))
collector_mock = patch.object(NtpCollector,
'run_command',
ntpdate_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import yaml
def test_trivia_lists():
from redbot.cogs.trivia import get_core_lists
list_names = get_core_lists()
assert list_names
problem_lists = []
for l in list_names:
with l.open(encoding="utf-8") as f:
try:
dict_ = yaml.safe_load(f)
except yaml.error.YAMLError as e:
problem_lists.append((l.stem, "YAML error:\n{!s}".format(e)))
else:
for key in list(dict_.keys()):
if key == "CONFIG":
if not isinstance(dict_[key], dict):
problem_lists.append((l.stem, "CONFIG is not a dict"))
elif key == "AUTHOR":
if not isinstance(dict_[key], str):
problem_lists.append((l.stem, "AUTHOR is not a string"))
else:
if not isinstance(dict_[key], list):
problem_lists.append(
(l.stem, "The answers for '{}' are not a list".format(key))
)
if problem_lists:
msg = ""
for l in problem_lists:
msg += "{}: {}\n".format(l[0], l[1])
raise TypeError("The following lists contain errors:\n" + msg)
|
from collections import OrderedDict, defaultdict
import json
from typing import Dict
from .model import Config, Integration
BASE = """
\"\"\"Automatically generated by hassfest.
To update, run python3 -m script.hassfest
\"\"\"
# fmt: off
SSDP = {}
""".strip()
def sort_dict(value):
"""Sort a dictionary."""
return OrderedDict((key, value[key]) for key in sorted(value))
def generate_and_validate(integrations: Dict[str, Integration]):
"""Validate and generate ssdp data."""
data = defaultdict(list)
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
continue
ssdp = integration.manifest.get("ssdp")
if not ssdp:
continue
for matcher in ssdp:
data[domain].append(sort_dict(matcher))
return BASE.format(json.dumps(data, indent=4))
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate ssdp file."""
ssdp_path = config.root / "homeassistant/generated/ssdp.py"
config.cache["ssdp"] = content = generate_and_validate(integrations)
if config.specific_integrations:
return
with open(str(ssdp_path)) as fp:
if fp.read().strip() != content:
config.add_error(
"ssdp",
"File ssdp.py is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
return
def generate(integrations: Dict[str, Integration], config: Config):
"""Generate ssdp file."""
ssdp_path = config.root / "homeassistant/generated/ssdp.py"
with open(str(ssdp_path), "w") as fp:
fp.write(f"{config.cache['ssdp']}\n")
|
import logging
import re
import voluptuous as vol
from homeassistant.components import mqtt
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
CONF_CODE,
CONF_DEVICE,
CONF_NAME,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_DISARMING,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
DOMAIN,
PLATFORMS,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .debug_info import log_messages
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_CODE_ARM_REQUIRED = "code_arm_required"
CONF_CODE_DISARM_REQUIRED = "code_disarm_required"
CONF_PAYLOAD_DISARM = "payload_disarm"
CONF_PAYLOAD_ARM_HOME = "payload_arm_home"
CONF_PAYLOAD_ARM_AWAY = "payload_arm_away"
CONF_PAYLOAD_ARM_NIGHT = "payload_arm_night"
CONF_PAYLOAD_ARM_CUSTOM_BYPASS = "payload_arm_custom_bypass"
CONF_COMMAND_TEMPLATE = "command_template"
DEFAULT_COMMAND_TEMPLATE = "{{action}}"
DEFAULT_ARM_NIGHT = "ARM_NIGHT"
DEFAULT_ARM_AWAY = "ARM_AWAY"
DEFAULT_ARM_HOME = "ARM_HOME"
DEFAULT_ARM_CUSTOM_BYPASS = "ARM_CUSTOM_BYPASS"
DEFAULT_DISARM = "DISARM"
DEFAULT_NAME = "MQTT Alarm"
PLATFORM_SCHEMA = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_CODE_ARM_REQUIRED, default=True): cv.boolean,
vol.Optional(CONF_CODE_DISARM_REQUIRED, default=True): cv.boolean,
vol.Optional(
CONF_COMMAND_TEMPLATE, default=DEFAULT_COMMAND_TEMPLATE
): cv.template,
vol.Required(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_AWAY, default=DEFAULT_ARM_AWAY): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_HOME, default=DEFAULT_ARM_HOME): cv.string,
vol.Optional(CONF_PAYLOAD_ARM_NIGHT, default=DEFAULT_ARM_NIGHT): cv.string,
vol.Optional(
CONF_PAYLOAD_ARM_CUSTOM_BYPASS, default=DEFAULT_ARM_CUSTOM_BYPASS
): cv.string,
vol.Optional(CONF_PAYLOAD_DISARM, default=DEFAULT_DISARM): cv.string,
vol.Optional(CONF_RETAIN, default=mqtt.DEFAULT_RETAIN): cv.boolean,
vol.Required(CONF_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT alarm control panel through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT alarm control panel dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add an MQTT alarm control panel."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(alarm.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
hass, config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT Alarm Control Panel platform."""
async_add_entities([MqttAlarm(hass, config, config_entry, discovery_data)])
class MqttAlarm(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
alarm.AlarmControlPanelEntity,
):
"""Representation of a MQTT alarm status."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Init the MQTT Alarm Control Panel."""
self.hass = hass
self._state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
self._sub_state = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe mqtt events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
self._config = config
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = self.hass
command_template = self._config[CONF_COMMAND_TEMPLATE]
command_template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Run when new MQTT message has been received."""
payload = msg.payload
value_template = self._config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
payload = value_template.async_render_with_possible_json_value(
msg.payload, self._state
)
if payload not in (
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_PENDING,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMING,
STATE_ALARM_TRIGGERED,
):
_LOGGER.warning("Received unexpected payload: %s", msg.payload)
return
self._state = payload
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._config[CONF_NAME]
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return (
SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_ARM_CUSTOM_BYPASS
)
@property
def code_format(self):
"""Return one or more digits/characters."""
code = self._config.get(CONF_CODE)
if code is None:
return None
if isinstance(code, str) and re.search("^\\d+$", code):
return alarm.FORMAT_NUMBER
return alarm.FORMAT_TEXT
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
code_required = self._config.get(CONF_CODE_ARM_REQUIRED)
return code_required
async def async_alarm_disarm(self, code=None):
"""Send disarm command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_DISARM_REQUIRED]
if code_required and not self._validate_code(code, "disarming"):
return
payload = self._config[CONF_PAYLOAD_DISARM]
self._publish(code, payload)
async def async_alarm_arm_home(self, code=None):
"""Send arm home command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming home"):
return
action = self._config[CONF_PAYLOAD_ARM_HOME]
self._publish(code, action)
async def async_alarm_arm_away(self, code=None):
"""Send arm away command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming away"):
return
action = self._config[CONF_PAYLOAD_ARM_AWAY]
self._publish(code, action)
async def async_alarm_arm_night(self, code=None):
"""Send arm night command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming night"):
return
action = self._config[CONF_PAYLOAD_ARM_NIGHT]
self._publish(code, action)
async def async_alarm_arm_custom_bypass(self, code=None):
"""Send arm custom bypass command.
This method is a coroutine.
"""
code_required = self._config[CONF_CODE_ARM_REQUIRED]
if code_required and not self._validate_code(code, "arming custom bypass"):
return
action = self._config[CONF_PAYLOAD_ARM_CUSTOM_BYPASS]
self._publish(code, action)
def _publish(self, code, action):
"""Publish via mqtt."""
command_template = self._config[CONF_COMMAND_TEMPLATE]
values = {"action": action, "code": code}
payload = command_template.async_render(**values, parse_result=False)
mqtt.async_publish(
self.hass,
self._config[CONF_COMMAND_TOPIC],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
def _validate_code(self, code, state):
"""Validate given code."""
conf_code = self._config.get(CONF_CODE)
check = conf_code is None or code == conf_code
if not check:
_LOGGER.warning("Wrong code entered for %s", state)
return check
|
import re
import requests_mock
from homeassistant.components.sigfox.sensor import (
API_URL,
CONF_API_LOGIN,
CONF_API_PASSWORD,
)
from homeassistant.setup import async_setup_component
TEST_API_LOGIN = "foo"
TEST_API_PASSWORD = "ebcd1234"
VALID_CONFIG = {
"sensor": {
"platform": "sigfox",
CONF_API_LOGIN: TEST_API_LOGIN,
CONF_API_PASSWORD: TEST_API_PASSWORD,
}
}
VALID_MESSAGE = """
{"data":[{
"time":1521879720,
"data":"7061796c6f6164",
"rinfos":[{"lat":"0.0","lng":"0.0"}],
"snr":"50.0"}]}
"""
async def test_invalid_credentials(hass):
"""Test for invalid credentials."""
with requests_mock.Mocker() as mock_req:
url = re.compile(API_URL + "devicetypes")
mock_req.get(url, text="{}", status_code=401)
assert await async_setup_component(hass, "sensor", VALID_CONFIG)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 0
async def test_valid_credentials(hass):
"""Test for valid credentials."""
with requests_mock.Mocker() as mock_req:
url1 = re.compile(API_URL + "devicetypes")
mock_req.get(url1, text='{"data":[{"id":"fake_type"}]}', status_code=200)
url2 = re.compile(API_URL + "devicetypes/fake_type/devices")
mock_req.get(url2, text='{"data":[{"id":"fake_id"}]}')
url3 = re.compile(API_URL + "devices/fake_id/messages*")
mock_req.get(url3, text=VALID_MESSAGE)
assert await async_setup_component(hass, "sensor", VALID_CONFIG)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
state = hass.states.get("sensor.sigfox_fake_id")
assert state.state == "payload"
assert state.attributes.get("snr") == "50.0"
|
import tensorflow as tf
from scipy.misc import imread, imresize
import numpy as np
# Quantize
use_quantized_graph = True
# Read image
img = imread("/home/zehao/Desktop/dog.png")
img = imresize(img, (224, 224, 3))
img = img.astype(np.float32)
img = np.expand_dims(img, 0)
# Preprocess
img = img / 255.
img = img - 0.5
img = img * 2.
# Graph
if use_quantized_graph:
graph_filename = "../mobilenet-model/with_placeholder/quantized_graph.pb"
else:
graph_filename = "../mobilenet-model/with_placeholder/frozen_graph.pb"
# Create labels dict from labels.txt
labels_file = "/home/zehao/Dataset/imagenet-data/labels.txt"
labels_dict = {}
with open(labels_file, 'r') as f:
for kv in [d.strip().split(':') for d in f]:
labels_dict[int(kv[0])] = kv[1]
# Create a graph def object to read the graph
with tf.gfile.GFile(graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Construct the graph and import the graph from graphdef
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
# We define the input and output node we will feed in
input_node = graph.get_tensor_by_name('import/MobileNet/input_images:0')
output_node = graph.get_tensor_by_name('import/MobileNet/Predictions/Softmax:0')
with tf.Session() as sess:
predictions = sess.run(output_node, feed_dict={input_node: img})[0]
top_5_predictions = predictions.argsort()[-5:][::-1]
top_5_probabilities = predictions[top_5_predictions]
prediction_names = [labels_dict[i] for i in top_5_predictions]
for i in xrange(len(prediction_names)):
print 'Prediction: %s, Probability: %s \n' % (prediction_names[i], top_5_probabilities[i])
|
from datetime import timedelta
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.components import mqtt, sensor
from homeassistant.components.sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_ICON,
CONF_NAME,
CONF_UNIQUE_ID,
CONF_UNIT_OF_MEASUREMENT,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import dt as dt_util
from . import (
ATTR_DISCOVERY_HASH,
CONF_QOS,
CONF_STATE_TOPIC,
DOMAIN,
PLATFORMS,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .debug_info import log_messages
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_EXPIRE_AFTER = "expire_after"
DEFAULT_NAME = "MQTT Sensor"
DEFAULT_FORCE_UPDATE = False
PLATFORM_SCHEMA = (
mqtt.MQTT_RO_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT sensors through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT sensors dynamically through MQTT discovery."""
async def async_discover_sensor(discovery_payload):
"""Discover and add a discovered MQTT sensor."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(sensor.DOMAIN, "mqtt"), async_discover_sensor
)
async def _async_setup_entity(
hass, config: ConfigType, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up MQTT sensor."""
async_add_entities([MqttSensor(hass, config, config_entry, discovery_data)])
class MqttSensor(
MqttAttributes, MqttAvailability, MqttDiscoveryUpdate, MqttEntityDeviceInfo, Entity
):
"""Representation of a sensor that can be updated using MQTT."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the sensor."""
self.hass = hass
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = None
self._sub_state = None
self._expiration_trigger = None
expire_after = config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
self._expired = True
else:
self._expired = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
template.hass = self.hass
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
@callback
@log_messages(self.hass, self.entity_id)
def message_received(msg):
"""Handle new MQTT messages."""
payload = msg.payload
# auto-expire enabled?
expire_after = self._config.get(CONF_EXPIRE_AFTER)
if expire_after is not None and expire_after > 0:
# When expire_after is set, and we receive a message, assume device is not expired since it has to be to receive the message
self._expired = False
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self._value_is_expired, expiration_at
)
template = self._config.get(CONF_VALUE_TEMPLATE)
if template is not None:
payload = template.async_render_with_possible_json_value(
payload, self._state
)
self._state = payload
self.async_write_ha_state()
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_STATE_TOPIC],
"msg_callback": message_received,
"qos": self._config[CONF_QOS],
}
},
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@callback
def _value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._expired = True
self.async_write_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._config[CONF_NAME]
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._config.get(CONF_UNIT_OF_MEASUREMENT)
@property
def force_update(self):
"""Force update."""
return self._config[CONF_FORCE_UPDATE]
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def icon(self):
"""Return the icon."""
return self._config.get(CONF_ICON)
@property
def device_class(self) -> Optional[str]:
"""Return the device class of the sensor."""
return self._config.get(CONF_DEVICE_CLASS)
@property
def available(self) -> bool:
"""Return true if the device is available and value has not expired."""
expire_after = self._config.get(CONF_EXPIRE_AFTER)
# pylint: disable=no-member
return MqttAvailability.available.fget(self) and (
expire_after is None or not self._expired
)
|
from typing import Tuple
from PIL import ImageDraw
def draw_box(
draw: ImageDraw,
box: Tuple[float, float, float, float],
img_width: int,
img_height: int,
text: str = "",
color: Tuple[int, int, int] = (255, 255, 0),
) -> None:
"""
Draw a bounding box on and image.
The bounding box is defined by the tuple (y_min, x_min, y_max, x_max)
where the coordinates are floats in the range [0.0, 1.0] and
relative to the width and height of the image.
For example, if an image is 100 x 200 pixels (height x width) and the bounding
box is `(0.1, 0.2, 0.5, 0.9)`, the upper-left and bottom-right coordinates of
the bounding box will be `(40, 10)` to `(180, 50)` (in (x,y) coordinates).
"""
line_width = 3
font_height = 8
y_min, x_min, y_max, x_max = box
(left, right, top, bottom) = (
x_min * img_width,
x_max * img_width,
y_min * img_height,
y_max * img_height,
)
draw.line(
[(left, top), (left, bottom), (right, bottom), (right, top), (left, top)],
width=line_width,
fill=color,
)
if text:
draw.text(
(left + line_width, abs(top - line_width - font_height)), text, fill=color
)
|
import os.path as op
import pytest as pytest
import numpy as np
from numpy.testing import assert_allclose
from mne.datasets.testing import data_path
from mne.io import read_raw_nirx, BaseRaw
from mne.preprocessing.nirs import optical_density
from mne.utils import _validate_type
from mne.datasets import testing
fname_nirx = op.join(data_path(download=False),
'NIRx', 'nirscout', 'nirx_15_2_recording_w_short')
@testing.requires_testing_data
def test_optical_density():
"""Test return type for optical density."""
raw = read_raw_nirx(fname_nirx, preload=False)
assert 'fnirs_cw_amplitude' in raw
assert 'fnirs_od' not in raw
raw = optical_density(raw)
_validate_type(raw, BaseRaw, 'raw')
assert 'fnirs_cw_amplitude' not in raw
assert 'fnirs_od' in raw
@testing.requires_testing_data
def test_optical_density_zeromean():
"""Test that optical density can process zero mean data."""
raw = read_raw_nirx(fname_nirx, preload=True)
raw._data[4] -= np.mean(raw._data[4])
with pytest.warns(RuntimeWarning, match='Negative'):
raw = optical_density(raw)
assert 'fnirs_od' in raw
@testing.requires_testing_data
def test_optical_density_manual():
"""Test optical density on known values."""
test_tol = 0.01
raw = read_raw_nirx(fname_nirx, preload=True)
# log(1) = 0
raw._data[4] = np.ones((145))
# log(0.5)/-1 = 0.69
# log(1.5)/-1 = -0.40
test_data = np.tile([0.5, 1.5], 73)[:145]
raw._data[5] = test_data
od = optical_density(raw)
assert_allclose(od.get_data([4]), 0.)
assert_allclose(od.get_data([5])[0, :2], [0.69, -0.4], atol=test_tol)
|
import io
import os
import lxml
import pytest
from nikola import __main__
from .helper import append_config, cd, patch_config
from .test_demo_build import prepare_demo_site
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
)
def test_relative_links(build, output_dir):
"""Check that the links in a page are correct"""
test_path = os.path.join(output_dir, "about-nikola.html")
with io.open(test_path, "rb") as inf:
data = inf.read()
assert not any(
url.startswith("..")
for _, _, url, _ in lxml.html.iterlinks(data)
if url.endswith("css")
)
def test_index_in_sitemap(build, output_dir):
"""Test that the correct path is in sitemap, and not the wrong one."""
sitemap_path = os.path.join(output_dir, "sitemap.xml")
with io.open(sitemap_path, "r", encoding="utf8") as inf:
sitemap_data = inf.read()
assert "<loc>https://example.com/</loc>" not in sitemap_data
assert "<loc>https://example.com/blog/index.html</loc>" in sitemap_data
@pytest.fixture(scope="module")
def build(target_dir):
"""Fill the site with demo content and build it."""
prepare_demo_site(target_dir)
# Configure our pages to reside in the root
patch_config(
target_dir,
('("pages/*.txt", "pages", "page.tmpl"),', '("pages/*.txt", "", "page.tmpl"),'),
('("pages/*.rst", "pages", "page.tmpl"),', '("pages/*.rst", "", "page.tmpl"),'),
('# INDEX_PATH = ""', 'INDEX_PATH = "blog"'),
)
append_config(
target_dir,
"""
PRETTY_URLS = False
STRIP_INDEXES = False
""",
)
with cd(target_dir):
__main__.main(["build"])
|
import numpy as np
import unittest
from chainercv.utils import assert_is_image
from chainercv.utils import testing
@testing.parameterize(
{
'img': np.random.randint(0, 256, size=(3, 48, 64)),
'color': True, 'check_range': True, 'valid': True},
{
'img': np.random.randint(0, 256, size=(1, 48, 64)),
'color': True, 'check_range': True, 'valid': False},
{
'img': np.random.randint(0, 256, size=(4, 48, 64)),
'color': True, 'check_range': True, 'valid': False},
{
'img': np.ones((3, 48, 64)) * 256,
'color': True, 'check_range': True, 'valid': False},
{
'img': np.ones((3, 48, 64)) * -1,
'color': True, 'check_range': True, 'valid': False},
{
'img': np.ones((3, 48, 64)) * 256,
'color': True, 'check_range': False, 'valid': True},
{
'img': np.random.randint(0, 256, size=(1, 48, 64)),
'color': False, 'check_range': True, 'valid': True},
{
'img': np.random.randint(0, 256, size=(3, 48, 64)),
'color': False, 'check_range': True, 'valid': False},
{
'img': np.ones((1, 48, 64)) * 256,
'color': False, 'check_range': True, 'valid': False},
{
'img': np.ones((1, 48, 64)) * -1,
'color': False, 'check_range': True, 'valid': False},
{
'img': np.ones((1, 48, 64)) * 256,
'color': False, 'check_range': False, 'valid': True},
{
'img': (((0, 1), (2, 3)), ((4, 5), (6, 7)), ((8, 9), (10, 11))),
'color': True, 'check_range': True, 'valid': False},
)
class TestAssertIsImage(unittest.TestCase):
def test_assert_is_image(self):
if self.valid:
assert_is_image(self.img, self.color, self.check_range)
else:
with self.assertRaises(AssertionError):
assert_is_image(self.img, self.color, self.check_range)
testing.run_module(__name__, __file__)
|
from collections import OrderedDict
from copy import deepcopy
import logging
import json
import numpy as np
from .check import _check_pandas_installed, _check_preload, _validate_type
from ._logging import warn, verbose
from .numerics import object_size, object_hash
logger = logging.getLogger('mne') # one selection here used across mne-python
logger.propagate = False # don't propagate (in case of multiple imports)
class SizeMixin(object):
"""Estimate MNE object sizes."""
def __eq__(self, other):
"""Compare self to other.
Parameters
----------
other : object
The object to compare to.
Returns
-------
eq : bool
True if the two objects are equal.
"""
return isinstance(other, type(self)) and hash(self) == hash(other)
@property
def _size(self):
"""Estimate the object size."""
try:
size = object_size(self.info)
except Exception:
warn('Could not get size for self.info')
return -1
if hasattr(self, 'data'):
size += object_size(self.data)
elif hasattr(self, '_data'):
size += object_size(self._data)
return size
def __hash__(self):
"""Hash the object.
Returns
-------
hash : int
The hash
"""
from ..evoked import Evoked
from ..epochs import BaseEpochs
from ..io.base import BaseRaw
if isinstance(self, Evoked):
return object_hash(dict(info=self.info, data=self.data))
elif isinstance(self, (BaseEpochs, BaseRaw)):
_check_preload(self, "Hashing ")
return object_hash(dict(info=self.info, data=self._data))
else:
raise RuntimeError('Hashing unknown object type: %s' % type(self))
class GetEpochsMixin(object):
"""Class to add epoch selection and metadata to certain classes."""
def __getitem__(self, item):
"""Return an Epochs object with a copied subset of epochs.
Parameters
----------
item : slice, array-like, str, or list
See below for use cases.
Returns
-------
epochs : instance of Epochs
See below for use cases.
Notes
-----
Epochs can be accessed as ``epochs[...]`` in several ways:
1. **Integer or slice:** ``epochs[idx]`` will return an `~mne.Epochs`
object with a subset of epochs chosen by index (supports single
index and Python-style slicing).
2. **String:** ``epochs['name']`` will return an `~mne.Epochs` object
comprising only the epochs labeled ``'name'`` (i.e., epochs created
around events with the label ``'name'``).
If there are no epochs labeled ``'name'`` but there are epochs
labeled with /-separated tags (e.g. ``'name/left'``,
``'name/right'``), then ``epochs['name']`` will select the epochs
with labels that contain that tag (e.g., ``epochs['left']`` selects
epochs labeled ``'audio/left'`` and ``'visual/left'``, but not
``'audio_left'``).
If multiple tags are provided *as a single string* (e.g.,
``epochs['name_1/name_2']``), this selects epochs containing *all*
provided tags. For example, ``epochs['audio/left']`` selects
``'audio/left'`` and ``'audio/quiet/left'``, but not
``'audio/right'``. Note that tag-based selection is insensitive to
order: tags like ``'audio/left'`` and ``'left/audio'`` will be
treated the same way when selecting via tag.
3. **List of strings:** ``epochs[['name_1', 'name_2', ... ]]`` will
return an `~mne.Epochs` object comprising epochs that match *any* of
the provided names (i.e., the list of names is treated as an
inclusive-or condition). If *none* of the provided names match any
epoch labels, a ``KeyError`` will be raised.
If epoch labels are /-separated tags, then providing multiple tags
*as separate list entries* will likewise act as an inclusive-or
filter. For example, ``epochs[['audio', 'left']]`` would select
``'audio/left'``, ``'audio/right'``, and ``'visual/left'``, but not
``'visual/right'``.
4. **Pandas query:** ``epochs['pandas query']`` will return an
`~mne.Epochs` object with a subset of epochs (and matching
metadata) selected by the query called with
``self.metadata.eval``, e.g.::
epochs["col_a > 2 and col_b == 'foo'"]
would return all epochs whose associated ``col_a`` metadata was
greater than two, and whose ``col_b`` metadata was the string 'foo'.
Query-based indexing only works if Pandas is installed and
``self.metadata`` is a :class:`pandas.DataFrame`.
.. versionadded:: 0.16
"""
return self._getitem(item)
def _item_to_select(self, item):
if isinstance(item, str):
item = [item]
# Convert string to indices
if isinstance(item, (list, tuple)) and len(item) > 0 and \
isinstance(item[0], str):
select = self._keys_to_idx(item)
elif isinstance(item, slice):
select = item
else:
select = np.atleast_1d(item)
if len(select) == 0:
select = np.array([], int)
return select
def _getitem(self, item, reason='IGNORED', copy=True, drop_event_id=True,
select_data=True, return_indices=False):
"""
Select epochs from current object.
Parameters
----------
item: slice, array-like, str, or list
see `__getitem__` for details.
reason: str
entry in `drop_log` for unselected epochs
copy: bool
return a copy of the current object
drop_event_id: bool
remove non-existing event-ids after selection
select_data: bool
apply selection to data
(use `select_data=False` if subclasses do not have a
valid `_data` field, or data has already been subselected)
return_indices: bool
return the indices of selected epochs from the original object
in addition to the new `Epochs` objects
Returns
-------
`Epochs` or tuple(Epochs, np.ndarray) if `return_indices` is True
subset of epochs (and optionally array with kept epoch indices)
"""
data = self._data
del self._data
inst = self.copy() if copy else self
self._data = inst._data = data
del self
select = inst._item_to_select(item)
has_selection = hasattr(inst, 'selection')
if has_selection:
key_selection = inst.selection[select]
drop_log = list(inst.drop_log)
if reason is not None:
for k in np.setdiff1d(inst.selection, key_selection):
drop_log[k] = (reason,)
inst.drop_log = tuple(drop_log)
inst.selection = key_selection
del drop_log
inst.events = np.atleast_2d(inst.events[select])
if inst.metadata is not None:
pd = _check_pandas_installed(strict=False)
if pd:
metadata = inst.metadata.iloc[select]
if has_selection:
metadata.index = inst.selection
else:
metadata = np.array(inst.metadata, 'object')[select].tolist()
# will reset the index for us
GetEpochsMixin.metadata.fset(inst, metadata, verbose=False)
if inst.preload and select_data:
# ensure that each Epochs instance owns its own data so we can
# resize later if necessary
inst._data = np.require(inst._data[select], requirements=['O'])
if drop_event_id:
# update event id to reflect new content of inst
inst.event_id = {k: v for k, v in inst.event_id.items()
if v in inst.events[:, 2]}
if return_indices:
return inst, select
else:
return inst
def _keys_to_idx(self, keys):
"""Find entries in event dict."""
keys = keys if isinstance(keys, (list, tuple)) else [keys]
try:
# Assume it's a condition name
return np.where(np.any(
np.array([self.events[:, 2] == self.event_id[k]
for k in _hid_match(self.event_id, keys)]),
axis=0))[0]
except KeyError as err:
# Could we in principle use metadata with these Epochs and keys?
if (len(keys) != 1 or self.metadata is None):
# If not, raise original error
raise
msg = str(err.args[0]) # message for KeyError
pd = _check_pandas_installed(strict=False)
# See if the query can be done
if pd:
md = self.metadata if hasattr(self, '_metadata') else None
self._check_metadata(metadata=md)
try:
# Try metadata
mask = self.metadata.eval(keys[0], engine='python').values
except Exception as exp:
msg += (' The epochs.metadata Pandas query did not '
'yield any results: %s' % (exp.args[0],))
else:
return np.where(mask)[0]
else:
# If not, warn this might be a problem
msg += (' The epochs.metadata Pandas query could not '
'be performed, consider installing Pandas.')
raise KeyError(msg)
def __len__(self):
"""Return the number of epochs.
Returns
-------
n_epochs : int
The number of remaining epochs.
Notes
-----
This function only works if bad epochs have been dropped.
Examples
--------
This can be used as::
>>> epochs.drop_bad() # doctest: +SKIP
>>> len(epochs) # doctest: +SKIP
43
>>> len(epochs.events) # doctest: +SKIP
43
"""
from ..epochs import BaseEpochs
if isinstance(self, BaseEpochs) and not self._bad_dropped:
raise RuntimeError('Since bad epochs have not been dropped, the '
'length of the Epochs is not known. Load the '
'Epochs with preload=True, or call '
'Epochs.drop_bad(). To find the number '
'of events in the Epochs, use '
'len(Epochs.events).')
return len(self.events)
def __iter__(self):
"""Facilitate iteration over epochs.
This method resets the object iteration state to the first epoch.
Notes
-----
This enables the use of this Python pattern::
>>> for epoch in epochs: # doctest: +SKIP
>>> print(epoch) # doctest: +SKIP
Where ``epoch`` is given by successive outputs of
:meth:`mne.Epochs.next`.
"""
self._current = 0
return self
def __next__(self, return_event_id=False):
"""Iterate over epoch data.
Parameters
----------
return_event_id : bool
If True, return both the epoch data and an event_id.
Returns
-------
epoch : array of shape (n_channels, n_times)
The epoch data.
event_id : int
The event id. Only returned if ``return_event_id`` is ``True``.
"""
if self.preload:
if self._current >= len(self._data):
raise StopIteration # signal the end
epoch = self._data[self._current]
self._current += 1
else:
is_good = False
while not is_good:
if self._current >= len(self.events):
raise StopIteration # signal the end properly
epoch_noproj = self._get_epoch_from_raw(self._current)
epoch_noproj = self._detrend_offset_decim(epoch_noproj)
epoch = self._project_epoch(epoch_noproj)
self._current += 1
is_good, _ = self._is_good_epoch(epoch)
# If delayed-ssp mode, pass 'virgin' data after rejection decision.
if self._do_delayed_proj:
epoch = epoch_noproj
if not return_event_id:
return epoch
else:
return epoch, self.events[self._current - 1][-1]
next = __next__ # originally for Python2, now b/c public
def _check_metadata(self, metadata=None, reset_index=False):
"""Check metadata consistency."""
# reset_index=False will not copy!
if metadata is None:
return
else:
pd = _check_pandas_installed(strict=False)
if pd:
_validate_type(metadata, types=pd.DataFrame,
item_name='metadata')
if len(metadata) != len(self.events):
raise ValueError('metadata must have the same number of '
'rows (%d) as events (%d)'
% (len(metadata), len(self.events)))
if reset_index:
if hasattr(self, 'selection'):
# makes a copy
metadata = metadata.reset_index(drop=True)
metadata.index = self.selection
else:
metadata = deepcopy(metadata)
else:
_validate_type(metadata, types=list,
item_name='metadata')
if reset_index:
metadata = deepcopy(metadata)
return metadata
@property
def metadata(self):
"""Get the metadata."""
return self._metadata
@metadata.setter
@verbose
def metadata(self, metadata, verbose=None):
metadata = self._check_metadata(metadata, reset_index=True)
if metadata is not None:
if _check_pandas_installed(strict=False):
n_col = metadata.shape[1]
else:
n_col = len(metadata[0])
n_col = ' with %d columns' % n_col
else:
n_col = ''
if hasattr(self, '_metadata') and self._metadata is not None:
action = 'Removing' if metadata is None else 'Replacing'
action += ' existing'
else:
action = 'Not setting' if metadata is None else 'Adding'
logger.info('%s metadata%s' % (action, n_col))
self._metadata = metadata
def _prepare_write_metadata(metadata):
"""Convert metadata to JSON for saving."""
if metadata is not None:
if not isinstance(metadata, list):
metadata = metadata.to_json(orient='records')
else: # Pandas DataFrame
metadata = json.dumps(metadata)
assert isinstance(metadata, str)
return metadata
def _prepare_read_metadata(metadata):
"""Convert saved metadata back from JSON."""
if metadata is not None:
pd = _check_pandas_installed(strict=False)
# use json.loads because this preserves ordering
# (which is necessary for round-trip equivalence)
metadata = json.loads(metadata, object_pairs_hook=OrderedDict)
assert isinstance(metadata, list)
if pd:
metadata = pd.DataFrame.from_records(metadata)
assert isinstance(metadata, pd.DataFrame)
return metadata
def _hid_match(event_id, keys):
"""Match event IDs using HID selection.
Parameters
----------
event_id : dict
The event ID dictionary.
keys : list | str
The event ID or subset (for HID), or list of such items.
Returns
-------
use_keys : list
The full keys that fit the selection criteria.
"""
# form the hierarchical event ID mapping
use_keys = []
for key in keys:
if not isinstance(key, str):
raise KeyError('keys must be strings, got %s (%s)'
% (type(key), key))
use_keys.extend(k for k in event_id.keys()
if set(key.split('/')).issubset(k.split('/')))
if len(use_keys) == 0:
raise KeyError('Event "{}" is not in Epochs. Event_ids must be one of '
'"{}"'.format(key, ', '.join(event_id.keys())))
use_keys = list(set(use_keys)) # deduplicate if necessary
return use_keys
class _FakeNoPandas(object): # noqa: D101
def __enter__(self): # noqa: D105
def _check(strict=True):
if strict:
raise RuntimeError('Pandas not installed')
else:
return False
import mne
self._old_check = _check_pandas_installed
mne.epochs._check_pandas_installed = _check
mne.utils.mixin._check_pandas_installed = _check
def __exit__(self, *args): # noqa: D105
import mne
mne.epochs._check_pandas_installed = self._old_check
mne.utils.mixin._check_pandas_installed = self._old_check
class ShiftTimeMixin(object):
"""Class for shift_time method (Epochs, Evoked, and DipoleFixed)."""
def shift_time(self, tshift, relative=True):
"""Shift time scale in epoched or evoked data.
Parameters
----------
tshift : float
The (absolute or relative) time shift in seconds. If ``relative``
is True, positive tshift increases the time value associated with
each sample, while negative tshift decreases it.
relative : bool
If True, increase or decrease time values by ``tshift`` seconds.
Otherwise, shift the time values such that the time of the first
sample equals ``tshift``.
Returns
-------
epochs : instance of Epochs
The modified Epochs instance.
Notes
-----
This method allows you to shift the *time* values associated with each
data sample by an arbitrary amount. It does *not* resample the signal
or change the *data* values in any way.
"""
from ..epochs import BaseEpochs
_check_preload(self, 'shift_time')
start = tshift + (self.times[0] if relative else 0.)
new_times = start + np.arange(len(self.times)) / self.info['sfreq']
if isinstance(self, BaseEpochs):
self._set_times(new_times)
else:
self.times = new_times
self._update_first_last()
return self
def _update_first_last(self):
"""Update self.first and self.last (sample indices)."""
self.first = int(round(self.times[0] * self.info['sfreq']))
self.last = len(self.times) + self.first - 1
|
import numpy as np
from ... import pick_types
from ...io import BaseRaw
from ...utils import _validate_type
from ...io.pick import _picks_to_idx
def temporal_derivative_distribution_repair(raw):
"""Apply temporal derivative distribution repair to data.
Applies temporal derivative distribution repair (TDDR) to data
:footcite:`FishburnEtAl2019`. This approach removes baseline shift
and spike artifacts without the need for any user-supplied parameters.
Parameters
----------
raw : instance of Raw
The raw data.
%(verbose)s
Returns
-------
raw : instance of Raw
Data with TDDR applied.
Notes
-----
There is a shorter alias ``mne.preprocessing.nirs.tddr`` that can be used
instead of this function (e.g. if line length is an issue).
References
----------
.. footbibliography::
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
if not len(pick_types(raw.info, fnirs='fnirs_od')):
raise RuntimeError('TDDR should be run on optical density data.')
picks = _picks_to_idx(raw.info, 'fnirs_od', exclude=[])
for pick in picks:
raw._data[pick] = _TDDR(raw._data[pick], raw.info['sfreq'])
return raw
# provide a short alias
tddr = temporal_derivative_distribution_repair
# Taken from https://github.com/frankfishburn/TDDR/ (MIT license).
# With permission https://github.com/frankfishburn/TDDR/issues/1.
# The only modification is the name, scipy signal import and flake fixes.
def _TDDR(signal, sample_rate):
# This function is the reference implementation for the TDDR algorithm for
# motion correction of fNIRS data, as described in:
#
# Fishburn F.A., Ludlum R.S., Vaidya C.J., & Medvedev A.V. (2019).
# Temporal Derivative Distribution Repair (TDDR): A motion correction
# method for fNIRS. NeuroImage, 184, 171-179.
# https://doi.org/10.1016/j.neuroimage.2018.09.025
#
# Usage:
# signals_corrected = TDDR( signals , sample_rate );
#
# Inputs:
# signals: A [sample x channel] matrix of uncorrected optical density
# data
# sample_rate: A scalar reflecting the rate of acquisition in Hz
#
# Outputs:
# signals_corrected: A [sample x channel] matrix of corrected optical
# density data
from scipy.signal import butter, filtfilt
signal = np.array(signal)
if len(signal.shape) != 1:
for ch in range(signal.shape[1]):
signal[:, ch] = _TDDR(signal[:, ch], sample_rate)
return signal
# Preprocess: Separate high and low frequencies
filter_cutoff = .5
filter_order = 3
Fc = filter_cutoff * 2 / sample_rate
signal_mean = np.mean(signal)
signal -= signal_mean
if Fc < 1:
fb, fa = butter(filter_order, Fc)
signal_low = filtfilt(fb, fa, signal, padlen=0)
else:
signal_low = signal
signal_high = signal - signal_low
# Initialize
tune = 4.685
D = np.sqrt(np.finfo(signal.dtype).eps)
mu = np.inf
iter = 0
# Step 1. Compute temporal derivative of the signal
deriv = np.diff(signal_low)
# Step 2. Initialize observation weights
w = np.ones(deriv.shape)
# Step 3. Iterative estimation of robust weights
while iter < 50:
iter = iter + 1
mu0 = mu
# Step 3a. Estimate weighted mean
mu = np.sum(w * deriv) / np.sum(w)
# Step 3b. Calculate absolute residuals of estimate
dev = np.abs(deriv - mu)
# Step 3c. Robust estimate of standard deviation of the residuals
sigma = 1.4826 * np.median(dev)
# Step 3d. Scale deviations by standard deviation and tuning parameter
r = dev / (sigma * tune)
# Step 3e. Calculate new weights according to Tukey's biweight function
w = ((1 - r**2) * (r < 1)) ** 2
# Step 3f. Terminate if new estimate is within
# machine-precision of old estimate
if abs(mu - mu0) < D * max(abs(mu), abs(mu0)):
break
# Step 4. Apply robust weights to centered derivative
new_deriv = w * (deriv - mu)
# Step 5. Integrate corrected derivative
signal_low_corrected = np.cumsum(np.insert(new_deriv, 0, 0.0))
# Postprocess: Center the corrected signal
signal_low_corrected = signal_low_corrected - np.mean(signal_low_corrected)
# Postprocess: Merge back with uncorrected high frequency component
signal_corrected = signal_low_corrected + signal_high + signal_mean
return signal_corrected
|
from django.conf import settings
from django.template.loader import select_template
from django.urls import NoReverseMatch, reverse
from django.utils.translation import get_language_from_request
from cms.models.pagemodel import Page
from rest_framework.serializers import CharField, BooleanField
from rest_auth import serializers
from shop.conf import app_settings
from shop.forms.auth import PasswordResetRequestForm
from rest_auth.serializers import LoginSerializer as DefaultLoginSerializer
class LoginSerializer(DefaultLoginSerializer):
stay_logged_in = BooleanField(required=False)
class PasswordResetRequestSerializer(serializers.PasswordResetSerializer):
password_reset_form_class = PasswordResetRequestForm
invalid_password_reset_confirm_url = '/cms-page_or_view_with__reverse_id=password-reset-confirm__does-not-exist/'
def save(self):
subject_template = select_template([
'{}/email/password-reset-subject.txt'.format(app_settings.APP_LABEL),
'shop/email/password-reset-subject.txt',
])
body_text_template = select_template([
'{}/email/password-reset-body.txt'.format(app_settings.APP_LABEL),
'shop/email/password-reset-body.txt',
])
body_html_template = select_template([
'{}/email/password-reset-body.html'.format(app_settings.APP_LABEL),
'shop/email/password-reset-body.html',
])
try:
page = Page.objects.select_related('node').get(reverse_id='password-reset-confirm', publisher_is_draft=False)
except Page.DoesNotExist:
try:
password_reset_confirm_url = reverse('password-reset-confirm')
except NoReverseMatch:
password_reset_confirm_url = self.invalid_password_reset_confirm_url
else:
language = get_language_from_request(self.context['request'])
password_reset_confirm_url = page.get_absolute_url(language)
opts = {
'use_https': self.context['request'].is_secure(),
'from_email': getattr(settings, 'DEFAULT_FROM_EMAIL'),
'request': self.context['request'],
'subject_template_name': subject_template.template.name,
'email_template_name': body_text_template.template.name,
'html_email_template_name': body_html_template.template.name,
'extra_email_context': {'password_reset_confirm_url': password_reset_confirm_url}
}
self.reset_form.save(**opts)
class PasswordResetConfirmSerializer(serializers.PasswordResetConfirmSerializer):
new_password1 = CharField(min_length=6, max_length=128)
new_password2 = CharField(min_length=6, max_length=128)
|
import json
from perfkitbenchmarker import resource
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import util
def GetAwsVpcEndpointClass(aws_service):
"""Returns the AwsVpcEndpoint class for the given service."""
return resource.GetResourceClass(
AwsVpcEndpoint, CLOUD=aws.CLOUD, AWS_SERVICE=aws_service)
def CreateEndpointService(aws_service, vpc):
"""Creates the named VPC endpoint in the given VPC.
Args:
aws_service: The AWS service to use.
vpc: The VPC to launch the endpoint service in.
Returns:
The resource.BaseResource of the endpoint service.
"""
service_class = GetAwsVpcEndpointClass(aws_service)
return service_class(vpc)
class AwsVpcEndpoint(resource.BaseResource):
"""An AWS Endpoint.
Attributes:
region: The AWS region of the VPC
vpc: The aws_network.AwsVpc object to make the connection in. The VPC does
not initially need an ID but does when Create() is called.
"""
REQUIRED_ATTRS = ['CLOUD', 'AWS_SERVICE']
RESOURCE_TYPE = 'AwsVpcEndpoint'
CLOUD = aws.CLOUD
def __init__(self, vpc):
super(AwsVpcEndpoint, self).__init__()
assert vpc, 'Must have a VPC object (does not require an id).'
self._vpc = vpc
self.region = self._vpc.region
assert self.region, 'VPC region must be set'
self._service_name = 'com.amazonaws.{}.{}'.format(self.region,
self.AWS_SERVICE)
# in the Create() method query to see if an endpoint already defined
self.id = None
@property
def vpc_id(self):
"""Returns the VPC id. Can be None."""
return self._vpc.id
@property
def endpoint_id(self):
"""Returns the endpoint id for the defined VPC."""
if not self.vpc_id:
# When creating an SDDC there will not be a VPC to have an endpoint
return None
ids = self._RunCommand(['describe-vpc-endpoints'] + util.AwsFilter({
'vpc-id': self.vpc_id,
'service-name': self._service_name
}) + ['--query', 'VpcEndpoints[].VpcEndpointId'])
if not ids:
# There is a VPC but no endpoint
return None
assert len(ids) == 1, 'More than 1 VPC endpoint found: {}'.format(ids)
return ids[0]
@property
def route_table_id(self):
"""Returns the route table id for the VPC.
Raises:
AssertionError: If no VPC is defined or if there are 0 or more than 1
routing tables found.
"""
assert self.vpc_id, 'No defined VPC id.'
table_ids = self._RunCommand(['describe-route-tables'] +
util.AwsFilter({'vpc-id': self.vpc_id}) +
['--query', 'RouteTables[].RouteTableId'])
assert len(table_ids) == 1, 'Only want 1 route table: {}'.format(table_ids)
return table_ids[0]
def _Create(self):
"""See base class.
Raises:
AssertionError: If no VPC is defined.
"""
assert self.vpc_id, 'No defined VPC id.'
self.id = self.endpoint_id
if self.id:
# Endpoint already created
return
create_response = self._RunCommand([
'create-vpc-endpoint', '--vpc-endpoint-type', 'Gateway', '--vpc-id',
self.vpc_id, '--service-name', self._service_name, '--route-table-ids',
self.route_table_id
])
self.id = create_response['VpcEndpoint']['VpcEndpointId']
def _PostCreate(self):
"""See base class."""
util.AddDefaultTags(self.id, self.region)
def _Exists(self):
"""See base class."""
return bool(self.endpoint_id)
def _Delete(self):
"""See base class."""
endpoint_id = self.id or self.endpoint_id
if endpoint_id:
self._RunCommand(
['delete-vpc-endpoints', '--vpc-endpoint-ids', endpoint_id])
def _RunCommand(self, cmds):
"""Runs the AWS ec2 command in the defined region.
Args:
cmds: List of AWS ec2 commands to run, example: ['describe-route-tables']
Returns:
Dict of the AWS response.
"""
cmd = util.AWS_PREFIX + ['ec2', '--region=%s' % self.region] + list(cmds)
stdout, _ = util.IssueRetryableCommand(cmd)
return json.loads(stdout)
class AwsVpcS3Endpoint(AwsVpcEndpoint):
"""An AWS VPC S3 Endpoint.
Attributes:
region: The AWS region of the VPC
vpc: The aws_network.AwsVpc object to make the connection in. The VPC does
not initially need an ID but does when Create() is called.
"""
AWS_SERVICE = 's3'
|
import threading
import re
import Queue
import diamond.collector
import diamond.metric
import collectd_network
ALIVE = True
class JCollectdCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(JCollectdCollector, self).__init__(*args, **kwargs)
self.listener_thread = None
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(JCollectdCollector, self).get_default_config()
config.update({
'path': 'jvm',
'listener_host': '127.0.0.1',
'listener_port': 25826,
})
return config
def collect(self):
if not self.listener_thread:
self.start_listener()
q = self.listener_thread.queue
while True:
try:
dp = q.get(False)
metric = self.make_metric(dp)
except Queue.Empty:
break
self.publish_metric(metric)
def start_listener(self):
self.listener_thread = ListenerThread(self.config['listener_host'],
self.config['listener_port'],
self.log)
self.listener_thread.start()
def stop_listener(self):
global ALIVE
ALIVE = False
self.listener_thread.join()
self.log.error('Listener thread is shut down.')
def make_metric(self, dp):
path = ".".join((dp.host, self.config['path'], dp.name))
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
if prefix:
path = ".".join((prefix, path))
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
if suffix:
path = ".".join((path, suffix))
if dp.is_counter:
metric_type = "COUNTER"
else:
metric_type = "GAUGE"
metric = diamond.metric.Metric(path, dp.value, dp.time,
metric_type=metric_type)
return metric
def __del__(self):
if self.listener_thread:
self.stop_listener()
class ListenerThread(threading.Thread):
def __init__(self, host, port, log, poll_interval=0.4):
super(ListenerThread, self).__init__()
self.name = 'JCollectdListener' # thread name
self.host = host
self.port = port
self.log = log
self.poll_interval = poll_interval
self.queue = Queue.Queue()
def run(self):
self.log.info('ListenerThread started on {}:{}(udp)'.format(
self.host, self.port))
rdr = collectd_network.Reader(self.host, self.port)
try:
while ALIVE:
try:
items = rdr.interpret(poll_interval=self.poll_interval)
self.send_to_collector(items)
except ValueError as e:
self.log.warn('Dropping bad packet: {}'.format(e))
except Exception as e:
self.log.error('caught exception: type={}, exc={}'.format(
type(e), e))
self.log.info('ListenerThread - stop')
def send_to_collector(self, items):
if items is None:
return
for item in items:
try:
metric = self.transform(item)
self.queue.put(metric)
except Queue.Full:
self.log.error('Queue to collector is FULL')
except Exception as e:
self.log.error('B00M! type={}, exception={}'.format(
type(e), e))
def transform(self, item):
parts = []
path = item.plugininstance
# extract jvm name from 'logstash-MemoryPool Eden Space'
if '-' in path:
(jvm, tail) = path.split('-', 1)
path = tail
else:
jvm = 'unnamed'
# add JVM name
parts.append(jvm)
# add mbean name (e.g. 'java_lang')
parts.append(item.plugin)
# get typed mbean: 'MemoryPool Eden Space'
if ' ' in path:
(mb_type, mb_name) = path.split(' ', 1)
parts.append(mb_type)
parts.append(mb_name)
else:
parts.append(path)
# add property name
parts.append(item.typeinstance)
# construct full path, from safe parts
name = '.'.join([sanitize_word(part) for part in parts])
if item[0][0] == 0:
is_counter = True
else:
is_counter = False
dp = Datapoint(item.host, item.time, name, item[0][1], is_counter)
return dp
def sanitize_word(s):
"""Remove non-alphanumerical characters from metric word.
And trim excessive underscores.
"""
s = re.sub('[^\w-]+', '_', s)
s = re.sub('__+', '_', s)
return s.strip('_')
class Datapoint(object):
def __init__(self, host, time, name, value, is_counter):
self.host = host
self.time = time
self.name = name
self.value = value
self.is_counter = is_counter
|
from datetime import timedelta
from requests.exceptions import HTTPError
from homeassistant.components.fritzbox.const import (
ATTR_STATE_DEVICE_LOCKED,
ATTR_STATE_LOCKED,
DOMAIN as FB_DOMAIN,
)
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
TEMP_CELSIUS,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from . import MOCK_CONFIG, FritzDeviceSensorMock
from tests.async_mock import Mock
from tests.common import async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.fake_name"
async def setup_fritzbox(hass: HomeAssistantType, config: dict):
"""Set up mock AVM Fritz!Box."""
assert await async_setup_component(hass, FB_DOMAIN, config)
await hass.async_block_till_done()
async def test_setup(hass: HomeAssistantType, fritz: Mock):
"""Test setup of platform."""
device = FritzDeviceSensorMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
state = hass.states.get(ENTITY_ID)
assert state
assert state.state == "1.23"
assert state.attributes[ATTR_FRIENDLY_NAME] == "fake_name"
assert state.attributes[ATTR_STATE_DEVICE_LOCKED] == "fake_locked_device"
assert state.attributes[ATTR_STATE_LOCKED] == "fake_locked"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == TEMP_CELSIUS
async def test_update(hass: HomeAssistantType, fritz: Mock):
"""Test update with error."""
device = FritzDeviceSensorMock()
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert device.update.call_count == 0
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert device.update.call_count == 1
assert fritz().login.call_count == 1
async def test_update_error(hass: HomeAssistantType, fritz: Mock):
"""Test update with error."""
device = FritzDeviceSensorMock()
device.update.side_effect = HTTPError("Boom")
fritz().get_devices.return_value = [device]
await setup_fritzbox(hass, MOCK_CONFIG)
assert device.update.call_count == 0
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert device.update.call_count == 1
assert fritz().login.call_count == 2
|
import typing
from pathlib import Path
import pandas as pd
import matchzoo
def load_data(
stage: str = 'train',
task: str = 'ranking',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, typing.Tuple[matchzoo.DataPack, list]]:
"""
Load WikiQA data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
Example:
>>> import matchzoo as mz
>>> stages = 'train', 'dev', 'test'
>>> tasks = 'ranking', 'classification'
>>> for stage in stages:
... for task in tasks:
... _ = mz.datasets.toy.load_data(stage, task)
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
path = Path(__file__).parent.joinpath(f'{stage}.csv')
data_pack = matchzoo.pack(pd.read_csv(path, index_col=0))
if isinstance(task, matchzoo.tasks.Ranking):
data_pack.relation['label'] = \
data_pack.relation['label'].astype('float32')
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
data_pack.relation['label'] = data_pack.relation['label'].astype(int)
data_pack = data_pack.one_hot_encode_label(num_classes=2)
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def load_embedding():
path = Path(__file__).parent.joinpath('embedding.2d.txt')
return matchzoo.embedding.load_from_file(path, mode='glove')
|
from pygal import Bar, Line
from pygal._compat import u
def test_basic_sparktext():
"""Test basic sparktext"""
chart = Line()
chart.add('_', [1, 5, 22, 13, 53])
assert chart.render_sparktext() == u('▁▁▃▂█')
def test_all_sparktext():
"""Test all character sparktext"""
chart = Line()
chart.add('_', range(8))
assert chart.render_sparktext() == u('▁▂▃▄▅▆▇█')
def test_shifted_sparktext():
"""Test relative_to option in sparktext"""
chart = Line()
chart.add('_', list(map(lambda x: x + 10000, range(8))))
assert chart.render_sparktext() == u('▁▂▃▄▅▆▇█')
assert chart.render_sparktext(relative_to=0) == u('▇▇▇▇▇▇▇█')
def test_another_sparktext():
"""Test that same data produces same sparktext"""
chart = Line()
chart.add('_', [0, 30, 55, 80, 33, 150])
assert chart.render_sparktext() == u('▁▂▃▄▂█')
assert chart.render_sparktext() == chart.render_sparktext()
chart2 = Bar()
chart2.add('_', [0, 30, 55, 80, 33, 150])
assert chart2.render_sparktext() == chart.render_sparktext()
def test_negative_and_float__sparktext():
"""Test negative values"""
"""Test negative values"""
chart = Line()
chart.add('_', [0.1, 0.2, 0.9, -0.5])
assert chart.render_sparktext() == u('▁▂█▁')
def test_no_data_sparktext():
"""Test no data sparktext"""
chart2 = Line()
chart2.add('_', [])
assert chart2.render_sparktext() == u('')
chart3 = Line()
assert chart3.render_sparktext() == u('')
def test_same_max_and_relative_values_sparktext():
"""Test flat sparktexts"""
chart = Line()
chart.add('_', [0, 0, 0, 0, 0])
assert chart.render_sparktext() == u('▁▁▁▁▁')
chart2 = Line()
chart2.add('_', [1, 1, 1, 1, 1])
assert chart2.render_sparktext(relative_to=1) == u('▁▁▁▁▁')
|
import os
import sys
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
# don't directly use code from this, though we do depend on the
# manifest.Depend data type
import roslib.manifest
import roslib.msgs
import roslib.names
import roslib.packages
import roslib.resources
# file extension
EXT = '.srv' # alias
SEP = '/' # e.g. std_msgs/String
# input/output deliminator
IODELIM = '---'
COMMENTCHAR = roslib.msgs.COMMENTCHAR
VERBOSE = False
# @return: True if msg-related scripts should print verbose output
def is_verbose():
return VERBOSE
# set whether msg-related scripts should print verbose output
def set_verbose(v):
global VERBOSE
VERBOSE = v
class SrvSpecException(Exception):
pass
# msg spec representation ##########################################
class SrvSpec(object):
def __init__(self, request, response, text, full_name='', short_name='', package=''):
self.request = request
self.response = response
self.text = text
self.full_name = full_name
self.short_name = short_name
self.package = package
def __eq__(self, other):
if not other or not isinstance(other, SrvSpec):
return False
return self.request == other.request and \
self.response == other.response and \
self.text == other.text and \
self.full_name == other.full_name and \
self.short_name == other.short_name and \
self.package == other.package
def __ne__(self, other):
if not other or not isinstance(other, SrvSpec):
return True
return not self.__eq__(other)
def __repr__(self):
return 'SrvSpec[%s, %s]' % (repr(self.request), repr(self.response))
# srv spec loading utilities ##########################################
# @internal
# predicate for filtering directory list. matches message files
def _srv_filter(f):
return os.path.isfile(f) and f.endswith(EXT)
# also used by doxymaker
def list_srv_types(package, include_depends):
"""
list all services in the specified package
@param package: name of package to search
@type package: str
@param include_depends: if True, will also list services in package dependencies
@type include_depends: bool
@return: service type names
@rtype: [str]
"""
types = roslib.resources.list_package_resources(package, include_depends, 'srv', _srv_filter)
return [x[:-len(EXT)] for x in types]
def srv_file(package, type_):
"""
@param package: name of package .srv file is in
@type package: str
@param type_: type name of service
@type type_: str
@return: file path of .srv file in specified package
@rtype: str
"""
return roslib.packages.resource_file(package, 'srv', type_+EXT)
def get_pkg_srv_specs(package):
"""
List all messages that a package contains
@param depend: roslib.manifest.Depend object representing package
to load messages from
@type depend: Depend
@return: list of message type names and specs for package, as well as a list
of message names that could not be processed.
@rtype: [(str,roslib.MsgSpec), [str]]
"""
# almost identical to roslib.msgs.get_pkg_msg_specs
types = list_srv_types(package, False)
specs = [] # no fancy list comprehension as we want to show errors
failures = []
for t in types:
try:
spec = load_from_file(srv_file(package, t), package)
specs.append(spec)
except Exception:
failures.append(t)
sys.stderr.write('ERROR: unable to load %s\n' % (t))
return specs, failures
def load_from_string(text, package_context='', full_name='', short_name=''):
"""
@param text: .msg text
@type text: str
@param package_context: context to use for msgTypeName, i.e. the package name,
or '' to use local naming convention.
@type package_context: str
@return: Message type name and message specification
@rtype: roslib.MsgSpec
@raise roslib.MsgSpecException: if syntax errors or other problems are detected in file
"""
text_in = StringIO()
text_out = StringIO()
accum = text_in
for l in text.split('\n'):
l = l.split(COMMENTCHAR)[0].strip() # strip comments
if l.startswith(IODELIM): # lenient, by request
accum = text_out
else:
accum.write(l+'\n')
# create separate roslib.msgs objects for each half of file
msg_in = roslib.msgs.load_from_string(text_in.getvalue(), package_context, '%sRequest' % (full_name), '%sRequest' % (short_name))
msg_out = roslib.msgs.load_from_string(text_out.getvalue(), package_context, '%sResponse' % (full_name), '%sResponse' % (short_name))
return SrvSpec(msg_in, msg_out, text, full_name, short_name, package_context)
def load_from_file(file_name, package_context=''):
"""
Convert the .srv representation in the file to a SrvSpec instance.
@param file_name: name of file to load from
@type file_name: str
@param package_context: context to use for type name, i.e. the package name,
or '' to use local naming convention.
@type package_context: str
@return: Message type name and message specification
@rtype: (str, L{SrvSpec})
@raise SrvSpecException: if syntax errors or other problems are detected in file
"""
if VERBOSE:
if package_context:
sys.stdout.write('Load spec from %s into namespace [%s]\n' % (file_name, package_context))
else:
sys.stdout.write('Load spec from %s\n' % (file_name))
base_file_name = os.path.basename(file_name)
type_ = base_file_name[:-len(EXT)]
base_type_ = type_
# determine the type name
if package_context:
while package_context.endswith(SEP):
package_context = package_context[:-1] # strip message separators
type_ = '%s%s%s' % (package_context, SEP, type_)
if not roslib.names.is_legal_resource_name(type_):
raise SrvSpecException('%s: %s is not a legal service type name' % (file_name, type_))
f = open(file_name, 'r')
try:
text = f.read()
return (type_, load_from_string(text, package_context, type_, base_type_))
finally:
f.close()
|
import unittest
import plotly_express as px
class TestPlotlyExpress(unittest.TestCase):
def test_basic_scatter_plot(self):
gapminder = px.data.gapminder()
gapminder2007 = gapminder.query("year == 2007")
px.scatter(gapminder2007, x="gdpPercap", y="lifeExp")
def test_complex_scatter_plot(self):
gapminder = px.data.gapminder()
gapminder2007 = gapminder.query("year == 2007")
px.scatter(gapminder, x="gdpPercap", y="lifeExp",size="pop", size_max=60, color="continent", hover_name="country",
animation_frame="year", animation_group="country", log_x=True, range_x=[100,100000], range_y=[25,90],
labels=dict(pop="Population", gdpPercap="GDP per Capita", lifeExp="Life Expectancy"))
def test_choropleth_plot(self):
gapminder = px.data.gapminder()
gapminder2007 = gapminder.query("year == 2007")
px.choropleth(gapminder, locations="iso_alpha", color="lifeExp", hover_name="country", animation_frame="year",
color_continuous_scale=px.colors.sequential.Plasma, projection="natural earth")
def test_violin_plot(self):
tips = px.data.tips()
px.scatter(tips, x="total_bill", y="tip", color="smoker", trendline="ols", marginal_x="violin", marginal_y="box")
|
import re
import pytest
import requests_mock
from tests.common import load_fixture
from tests.components.light.conftest import mock_light_profiles # noqa
@pytest.fixture(name="requests_mock")
def requests_mock_fixture():
"""Fixture to provide a requests mocker."""
with requests_mock.mock() as mock:
# Note all devices have an id of 987652, but a different device_id.
# the device_id is used as our unique_id, but the id is what is sent
# to the APIs, which is why every mock uses that id.
# Mocks the response for authenticating
mock.post(
"https://oauth.ring.com/oauth/token", text=load_fixture("ring_oauth.json")
)
# Mocks the response for getting the login session
mock.post(
"https://api.ring.com/clients_api/session",
text=load_fixture("ring_session.json"),
)
# Mocks the response for getting all the devices
mock.get(
"https://api.ring.com/clients_api/ring_devices",
text=load_fixture("ring_devices.json"),
)
mock.get(
"https://api.ring.com/clients_api/dings/active",
text=load_fixture("ring_ding_active.json"),
)
# Mocks the response for getting the history of a device
mock.get(
re.compile(
r"https:\/\/api\.ring\.com\/clients_api\/doorbots\/\d+\/history"
),
text=load_fixture("ring_doorbots.json"),
)
# Mocks the response for getting the health of a device
mock.get(
re.compile(r"https:\/\/api\.ring\.com\/clients_api\/doorbots\/\d+\/health"),
text=load_fixture("ring_doorboot_health_attrs.json"),
)
# Mocks the response for getting a chimes health
mock.get(
re.compile(r"https:\/\/api\.ring\.com\/clients_api\/chimes\/\d+\/health"),
text=load_fixture("ring_chime_health_attrs.json"),
)
yield mock
|
import asyncio
import logging
import re
from abc import ABC
from collections import defaultdict
from typing import List, Tuple, Literal
import discord
from redbot.core.utils import AsyncIter
from redbot.core import Config, modlog, commands
from redbot.core.bot import Red
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils._internal_utils import send_to_owners_with_prefix_replaced
from .events import Events
from .kickban import KickBanMixin
from .names import ModInfo
from .slowmode import Slowmode
from .settings import ModSettings
_ = T_ = Translator("Mod", __file__)
__version__ = "1.2.0"
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""
This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass
"""
pass
@cog_i18n(_)
class Mod(
ModSettings,
Events,
KickBanMixin,
ModInfo,
Slowmode,
commands.Cog,
metaclass=CompositeMetaClass,
):
"""Moderation tools."""
default_global_settings = {"version": ""}
default_guild_settings = {
"mention_spam": {"ban": None, "kick": None, "warn": None, "strict": False},
"delete_repeats": -1,
"ignored": False,
"respect_hierarchy": True,
"delete_delay": -1,
"reinvite_on_unban": False,
"current_tempbans": [],
"dm_on_kickban": False,
"default_days": 0,
"default_tempban_duration": 60 * 60 * 24,
}
default_channel_settings = {"ignored": False}
default_member_settings = {"past_nicks": [], "perms_cache": {}, "banned_until": False}
default_user_settings = {"past_names": []}
def __init__(self, bot: Red):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, 4961522000, force_registration=True)
self.config.register_global(**self.default_global_settings)
self.config.register_guild(**self.default_guild_settings)
self.config.register_channel(**self.default_channel_settings)
self.config.register_member(**self.default_member_settings)
self.config.register_user(**self.default_user_settings)
self.cache: dict = {}
self.tban_expiry_task = self.bot.loop.create_task(self.check_tempban_expirations())
self.last_case: dict = defaultdict(dict)
self._ready = asyncio.Event()
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
if requester != "discord_deleted_user":
return
all_members = await self.config.all_members()
async for guild_id, guild_data in AsyncIter(all_members.items(), steps=100):
if user_id in guild_data:
await self.config.member_from_ids(guild_id, user_id).clear()
await self.config.user_from_id(user_id).clear()
guild_data = await self.config.all_guilds()
async for guild_id, guild_data in AsyncIter(guild_data.items(), steps=100):
if user_id in guild_data["current_tempbans"]:
async with self.config.guild_from_id(guild_id).current_tempbans() as tbs:
try:
tbs.remove(user_id)
except ValueError:
pass
# possible with a context switch between here and getting all guilds
async def initialize(self):
await self._maybe_update_config()
self._ready.set()
async def cog_before_invoke(self, ctx: commands.Context) -> None:
await self._ready.wait()
def cog_unload(self):
self.tban_expiry_task.cancel()
async def _maybe_update_config(self):
"""Maybe update `delete_delay` value set by Config prior to Mod 1.0.0."""
if not await self.config.version():
guild_dict = await self.config.all_guilds()
async for guild_id, info in AsyncIter(guild_dict.items(), steps=25):
delete_repeats = info.get("delete_repeats", False)
if delete_repeats:
val = 3
else:
val = -1
await self.config.guild_from_id(guild_id).delete_repeats.set(val)
await self.config.version.set("1.0.0") # set version of last update
if await self.config.version() < "1.1.0":
message_sent = False
async for e in AsyncIter((await self.config.all_channels()).values(), steps=25):
if e["ignored"] is not False:
msg = _(
"Ignored guilds and channels have been moved. "
"Please use `[p]moveignoredchannels` to migrate the old settings."
)
self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))
message_sent = True
break
if message_sent is False:
async for e in AsyncIter((await self.config.all_guilds()).values(), steps=25):
if e["ignored"] is not False:
msg = _(
"Ignored guilds and channels have been moved. "
"Please use `[p]moveignoredchannels` to migrate the old settings."
)
self.bot.loop.create_task(
send_to_owners_with_prefix_replaced(self.bot, msg)
)
break
await self.config.version.set("1.1.0")
if await self.config.version() < "1.2.0":
async for e in AsyncIter((await self.config.all_guilds()).values(), steps=25):
if e["delete_delay"] != -1:
msg = _(
"Delete delay settings have been moved. "
"Please use `[p]movedeletedelay` to migrate the old settings."
)
self.bot.loop.create_task(send_to_owners_with_prefix_replaced(self.bot, msg))
break
await self.config.version.set("1.2.0")
if await self.config.version() < "1.3.0":
guild_dict = await self.config.all_guilds()
async for guild_id in AsyncIter(guild_dict.keys(), steps=25):
async with self.config.guild_from_id(guild_id).all() as guild_data:
current_state = guild_data.pop("ban_mention_spam", False)
if current_state is not False:
if "mention_spam" not in guild_data:
guild_data["mention_spam"] = {}
guild_data["mention_spam"]["ban"] = current_state
await self.config.version.set("1.3.0")
@commands.command()
@commands.is_owner()
async def moveignoredchannels(self, ctx: commands.Context) -> None:
"""Move ignored channels and servers to core"""
all_guilds = await self.config.all_guilds()
all_channels = await self.config.all_channels()
for guild_id, settings in all_guilds.items():
await self.bot._config.guild_from_id(guild_id).ignored.set(settings["ignored"])
await self.config.guild_from_id(guild_id).ignored.clear()
for channel_id, settings in all_channels.items():
await self.bot._config.channel_from_id(channel_id).ignored.set(settings["ignored"])
await self.config.channel_from_id(channel_id).clear()
await ctx.send(_("Ignored channels and guilds restored."))
@commands.command()
@commands.is_owner()
async def movedeletedelay(self, ctx: commands.Context) -> None:
"""
Move deletedelay settings to core
"""
all_guilds = await self.config.all_guilds()
for guild_id, settings in all_guilds.items():
await self.bot._config.guild_from_id(guild_id).delete_delay.set(
settings["delete_delay"]
)
await self.config.guild_from_id(guild_id).delete_delay.clear()
await ctx.send(_("Delete delay settings restored."))
|
from collections import defaultdict
from datetime import datetime
from ipaddress import ip_address
import logging
from socket import gethostbyaddr, herror
from typing import List, Optional
from aiohttp.web import middleware
from aiohttp.web_exceptions import HTTPForbidden, HTTPUnauthorized
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.const import HTTP_BAD_REQUEST
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.util.yaml import dump
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
KEY_BANNED_IPS = "ha_banned_ips"
KEY_FAILED_LOGIN_ATTEMPTS = "ha_failed_login_attempts"
KEY_LOGIN_THRESHOLD = "ha_login_threshold"
NOTIFICATION_ID_BAN = "ip-ban"
NOTIFICATION_ID_LOGIN = "http-login"
IP_BANS_FILE = "ip_bans.yaml"
ATTR_BANNED_AT = "banned_at"
SCHEMA_IP_BAN_ENTRY = vol.Schema(
{vol.Optional("banned_at"): vol.Any(None, cv.datetime)}
)
@callback
def setup_bans(hass, app, login_threshold):
"""Create IP Ban middleware for the app."""
app.middlewares.append(ban_middleware)
app[KEY_FAILED_LOGIN_ATTEMPTS] = defaultdict(int)
app[KEY_LOGIN_THRESHOLD] = login_threshold
async def ban_startup(app):
"""Initialize bans when app starts up."""
app[KEY_BANNED_IPS] = await async_load_ip_bans_config(
hass, hass.config.path(IP_BANS_FILE)
)
app.on_startup.append(ban_startup)
@middleware
async def ban_middleware(request, handler):
"""IP Ban middleware."""
if KEY_BANNED_IPS not in request.app:
_LOGGER.error("IP Ban middleware loaded but banned IPs not loaded")
return await handler(request)
# Verify if IP is not banned
ip_address_ = ip_address(request.remote)
is_banned = any(
ip_ban.ip_address == ip_address_ for ip_ban in request.app[KEY_BANNED_IPS]
)
if is_banned:
raise HTTPForbidden()
try:
return await handler(request)
except HTTPUnauthorized:
await process_wrong_login(request)
raise
def log_invalid_auth(func):
"""Decorate function to handle invalid auth or failed login attempts."""
async def handle_req(view, request, *args, **kwargs):
"""Try to log failed login attempts if response status >= 400."""
resp = await func(view, request, *args, **kwargs)
if resp.status >= HTTP_BAD_REQUEST:
await process_wrong_login(request)
return resp
return handle_req
async def process_wrong_login(request):
"""Process a wrong login attempt.
Increase failed login attempts counter for remote IP address.
Add ip ban entry if failed login attempts exceeds threshold.
"""
hass = request.app["hass"]
remote_addr = ip_address(request.remote)
remote_host = request.remote
try:
remote_host, _, _ = await hass.async_add_executor_job(
gethostbyaddr, request.remote
)
except herror:
pass
msg = f"Login attempt or request with invalid authentication from {remote_host} ({remote_addr})"
user_agent = request.headers.get("user-agent")
if user_agent:
msg = f"{msg} ({user_agent})"
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Login attempt failed", NOTIFICATION_ID_LOGIN
)
# Check if ban middleware is loaded
if KEY_BANNED_IPS not in request.app or request.app[KEY_LOGIN_THRESHOLD] < 1:
return
request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] += 1
# Supervisor IP should never be banned
if (
"hassio" in hass.config.components
and hass.components.hassio.get_supervisor_ip() == str(remote_addr)
):
return
if (
request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr]
>= request.app[KEY_LOGIN_THRESHOLD]
):
new_ban = IpBan(remote_addr)
request.app[KEY_BANNED_IPS].append(new_ban)
await hass.async_add_executor_job(
update_ip_bans_config, hass.config.path(IP_BANS_FILE), new_ban
)
_LOGGER.warning("Banned IP %s for too many login attempts", remote_addr)
hass.components.persistent_notification.async_create(
f"Too many login attempts from {remote_addr}",
"Banning IP address",
NOTIFICATION_ID_BAN,
)
async def process_success_login(request):
"""Process a success login attempt.
Reset failed login attempts counter for remote IP address.
No release IP address from banned list function, it can only be done by
manual modify ip bans config file.
"""
remote_addr = ip_address(request.remote)
# Check if ban middleware is loaded
if KEY_BANNED_IPS not in request.app or request.app[KEY_LOGIN_THRESHOLD] < 1:
return
if (
remote_addr in request.app[KEY_FAILED_LOGIN_ATTEMPTS]
and request.app[KEY_FAILED_LOGIN_ATTEMPTS][remote_addr] > 0
):
_LOGGER.debug(
"Login success, reset failed login attempts counter from %s", remote_addr
)
request.app[KEY_FAILED_LOGIN_ATTEMPTS].pop(remote_addr)
class IpBan:
"""Represents banned IP address."""
def __init__(self, ip_ban: str, banned_at: Optional[datetime] = None) -> None:
"""Initialize IP Ban object."""
self.ip_address = ip_address(ip_ban)
self.banned_at = banned_at or datetime.utcnow()
async def async_load_ip_bans_config(hass: HomeAssistant, path: str) -> List[IpBan]:
"""Load list of banned IPs from config file."""
ip_list: List[IpBan] = []
try:
list_ = await hass.async_add_executor_job(load_yaml_config_file, path)
except FileNotFoundError:
return ip_list
except HomeAssistantError as err:
_LOGGER.error("Unable to load %s: %s", path, str(err))
return ip_list
for ip_ban, ip_info in list_.items():
try:
ip_info = SCHEMA_IP_BAN_ENTRY(ip_info)
ip_list.append(IpBan(ip_ban, ip_info["banned_at"]))
except vol.Invalid as err:
_LOGGER.error("Failed to load IP ban %s: %s", ip_info, err)
continue
return ip_list
def update_ip_bans_config(path: str, ip_ban: IpBan) -> None:
"""Update config file with new banned IP address."""
with open(path, "a") as out:
ip_ = {
str(ip_ban.ip_address): {
ATTR_BANNED_AT: ip_ban.banned_at.strftime("%Y-%m-%dT%H:%M:%S")
}
}
out.write("\n")
out.write(dump(ip_))
|
import logging
from kalliope.core.NeuronModule import NeuronModule, MissingParameterException, InvalidParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Neurotransmitter(NeuronModule):
def __init__(self, **kwargs):
super(Neurotransmitter, self).__init__(**kwargs)
# get parameters
self.from_answer_link = kwargs.get('from_answer_link', None)
self.default = kwargs.get('default', None)
self.direct_link = kwargs.get('direct_link', None)
self.is_api_call = kwargs.get('is_api_call', False)
self.answer = kwargs.get('answer', None)
# do some check
if self._is_parameters_ok():
if self.direct_link is not None:
logger.debug("Neurotransmitter directly call to the synapse name: %s" % self.direct_link)
self.run_synapse_by_name(self.direct_link, high_priority=True)
else:
if self.is_api_call:
if self.answer is not None:
self.callback(self.answer)
else:
self.is_waiting_for_answer = True
else:
# the user is using a from_answer_link, we call the stt to get an audio
self.get_audio_from_stt(callback=self.callback)
def callback(self, audio):
"""
The callback used by the STT module to get the linked synapse
:param audio: the audio to play by STT
"""
logger.debug("Neurotransmitter, receiver audio from STT: %s" % audio)
# print self.links
# set a bool to know if we have found a valid answer
if audio is None:
self.run_synapse_by_name(self.default, high_priority=True, is_api_call=self.is_api_call)
else:
found = False
for el in self.from_answer_link:
for answer in el["answers"]:
if self.is_order_matching(audio, answer):
logger.debug("Neurotransmitter: match answer: %s" % answer)
self.run_synapse_by_name(synapse_name=el["synapse"],
user_order=audio,
synapse_order=answer,
high_priority=True,
is_api_call=self.is_api_call)
found = True
break
if not found: # the answer do not correspond to any answer. We run the default synapse
self.run_synapse_by_name(self.default, high_priority=True, is_api_call=self.is_api_call)
def _is_parameters_ok(self):
"""
Check if received links are ok to perform operations
:return: true if the neuron is well configured, raise an exception otherwise
.. raises:: MissingParameterException, InvalidParameterException
"""
# with the neuron the user has the choice of a direct link that call another synapse,
# or a link with an answer caught from the STT engine
# we cannot use at the same time a direct redirection and a link with question
if self.direct_link is not None and self.from_answer_link is not None:
raise InvalidParameterException("neurotransmitter cannot be used with both direct_link and from_answer_link")
if self.direct_link is None and self.from_answer_link is None:
raise MissingParameterException("neurotransmitter must be used with direct_link or from_answer_link")
if self.from_answer_link is not None:
if self.default is None:
raise InvalidParameterException("default parameter is required and must contain a valid synapse name")
for el in self.from_answer_link:
if "synapse" not in el:
raise MissingParameterException("Links must contain a synapse name: %s" % el)
if "answers" not in el:
raise MissingParameterException("Links must contain answers: %s" % el)
return True
|
from datetime import timedelta
import pytest
from speedtest import NoMatchedServers
from homeassistant import data_entry_flow
from homeassistant.components import speedtestdotnet
from homeassistant.components.speedtestdotnet.const import (
CONF_MANUAL,
CONF_SERVER_ID,
CONF_SERVER_NAME,
DOMAIN,
SENSOR_TYPES,
)
from homeassistant.const import CONF_MONITORED_CONDITIONS, CONF_SCAN_INTERVAL
from . import MOCK_SERVERS
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture(name="mock_setup")
def mock_setup():
"""Mock entry setup."""
with patch(
"homeassistant.components.speedtestdotnet.async_setup_entry",
return_value=True,
):
yield
async def test_flow_works(hass, mock_setup):
"""Test user config."""
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "SpeedTest"
async def test_import_fails(hass, mock_setup):
"""Test import step fails if server_id is not valid."""
with patch("speedtest.Speedtest") as mock_api:
mock_api.return_value.get_servers.side_effect = NoMatchedServers
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN,
context={"source": "import"},
data={
CONF_SERVER_ID: "223",
CONF_MANUAL: True,
CONF_SCAN_INTERVAL: timedelta(minutes=1),
CONF_MONITORED_CONDITIONS: list(SENSOR_TYPES),
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "wrong_server_id"
async def test_import_success(hass, mock_setup):
"""Test import step is successful if server_id is valid."""
with patch("speedtest.Speedtest"):
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN,
context={"source": "import"},
data={
CONF_SERVER_ID: "1",
CONF_MANUAL: True,
CONF_SCAN_INTERVAL: timedelta(minutes=1),
CONF_MONITORED_CONDITIONS: list(SENSOR_TYPES),
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "SpeedTest"
assert result["data"][CONF_SERVER_ID] == "1"
assert result["data"][CONF_MANUAL] is True
assert result["data"][CONF_SCAN_INTERVAL] == 1
async def test_options(hass):
"""Test updating options."""
entry = MockConfigEntry(
domain=DOMAIN,
title="SpeedTest",
data={},
options={},
)
entry.add_to_hass(hass)
with patch("speedtest.Speedtest") as mock_api:
mock_api.return_value.get_servers.return_value = MOCK_SERVERS
await hass.config_entries.async_setup(entry.entry_id)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_SERVER_NAME: "Country1 - Sponsor1 - Server1",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: False,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_SERVER_NAME: "Country1 - Sponsor1 - Server1",
CONF_SERVER_ID: "1",
CONF_SCAN_INTERVAL: 30,
CONF_MANUAL: False,
}
async def test_integration_already_configured(hass):
"""Test integration is already configured."""
entry = MockConfigEntry(
domain=DOMAIN,
data={},
options={},
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
speedtestdotnet.DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
|
from datetime import timedelta
from typing import TYPE_CHECKING, Any, Generator
from pytest import raises
from homeassistant.components.switcher_kis import (
CONF_AUTO_OFF,
DATA_DEVICE,
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
SERVICE_SET_AUTO_OFF_SCHEMA,
SIGNAL_SWITCHER_DEVICE_UPDATE,
)
from homeassistant.const import CONF_ENTITY_ID
from homeassistant.core import Context, callback
from homeassistant.exceptions import Unauthorized, UnknownUser
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.setup import async_setup_component
from homeassistant.util import dt
from .consts import (
DUMMY_AUTO_OFF_SET,
DUMMY_DEVICE_ID,
DUMMY_DEVICE_NAME,
DUMMY_DEVICE_STATE,
DUMMY_ELECTRIC_CURRENT,
DUMMY_IP_ADDRESS,
DUMMY_MAC_ADDRESS,
DUMMY_PHONE_ID,
DUMMY_POWER_CONSUMPTION,
DUMMY_REMAINING_TIME,
MANDATORY_CONFIGURATION,
SWITCH_ENTITY_ID,
)
from tests.common import async_fire_time_changed, async_mock_service
if TYPE_CHECKING:
from aioswitcher.devices import SwitcherV2Device
from tests.common import MockUser
async def test_failed_config(
hass: HomeAssistantType, mock_failed_bridge: Generator[None, Any, None]
) -> None:
"""Test failed configuration."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION) is False
async def test_minimal_config(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test setup with configuration minimal entries."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
async def test_discovery_data_bucket(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test the event send with the updated device."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
device = hass.data[DOMAIN].get(DATA_DEVICE)
assert device.device_id == DUMMY_DEVICE_ID
assert device.ip_addr == DUMMY_IP_ADDRESS
assert device.mac_addr == DUMMY_MAC_ADDRESS
assert device.name == DUMMY_DEVICE_NAME
assert device.state == DUMMY_DEVICE_STATE
assert device.remaining_time == DUMMY_REMAINING_TIME
assert device.auto_off_set == DUMMY_AUTO_OFF_SET
assert device.power_consumption == DUMMY_POWER_CONSUMPTION
assert device.electric_current == DUMMY_ELECTRIC_CURRENT
assert device.phone_id == DUMMY_PHONE_ID
async def test_set_auto_off_service(
hass: HomeAssistantType,
mock_bridge: Generator[None, Any, None],
mock_api: Generator[None, Any, None],
hass_owner_user: "MockUser",
hass_read_only_user: "MockUser",
) -> None:
"""Test the set_auto_off service."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
assert hass.services.has_service(DOMAIN, SERVICE_SET_AUTO_OFF_NAME)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id=hass_owner_user.id),
)
with raises(Unauthorized) as unauthorized_read_only_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id=hass_read_only_user.id),
)
assert unauthorized_read_only_exc.type is Unauthorized
with raises(Unauthorized) as unauthorized_wrong_entity_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{
CONF_ENTITY_ID: "light.not_related_entity",
CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET,
},
blocking=True,
context=Context(user_id=hass_owner_user.id),
)
assert unauthorized_wrong_entity_exc.type is Unauthorized
with raises(UnknownUser) as unknown_user_exc:
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
blocking=True,
context=Context(user_id="not_real_user"),
)
assert unknown_user_exc.type is UnknownUser
service_calls = async_mock_service(
hass, DOMAIN, SERVICE_SET_AUTO_OFF_NAME, SERVICE_SET_AUTO_OFF_SCHEMA
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_AUTO_OFF_NAME,
{CONF_ENTITY_ID: SWITCH_ENTITY_ID, CONF_AUTO_OFF: DUMMY_AUTO_OFF_SET},
)
await hass.async_block_till_done()
assert len(service_calls) == 1
assert str(service_calls[0].data[CONF_AUTO_OFF]) == DUMMY_AUTO_OFF_SET.lstrip("0")
async def test_signal_dispatcher(
hass: HomeAssistantType, mock_bridge: Generator[None, Any, None]
) -> None:
"""Test signal dispatcher dispatching device updates every 4 seconds."""
assert await async_setup_component(hass, DOMAIN, MANDATORY_CONFIGURATION)
await hass.async_block_till_done()
@callback
def verify_update_data(device: "SwitcherV2Device") -> None:
"""Use as callback for signal dispatcher."""
pass
async_dispatcher_connect(hass, SIGNAL_SWITCHER_DEVICE_UPDATE, verify_update_data)
async_fire_time_changed(hass, dt.utcnow() + timedelta(seconds=5))
|
import logging
import pywink
from homeassistant.components.water_heater import (
ATTR_TEMPERATURE,
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_PERFORMANCE,
SUPPORT_AWAY_MODE,
SUPPORT_OPERATION_MODE,
SUPPORT_TARGET_TEMPERATURE,
WaterHeaterEntity,
)
from homeassistant.const import STATE_OFF, STATE_UNKNOWN, TEMP_CELSIUS
from . import DOMAIN, WinkDevice
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS_HEATER = (
SUPPORT_TARGET_TEMPERATURE | SUPPORT_OPERATION_MODE | SUPPORT_AWAY_MODE
)
ATTR_RHEEM_TYPE = "rheem_type"
ATTR_VACATION_MODE = "vacation_mode"
HA_STATE_TO_WINK = {
STATE_ECO: "eco",
STATE_ELECTRIC: "electric_only",
STATE_GAS: "gas",
STATE_HEAT_PUMP: "heat_pump",
STATE_HIGH_DEMAND: "high_demand",
STATE_OFF: "off",
STATE_PERFORMANCE: "performance",
}
WINK_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_WINK.items()}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink water heater devices."""
for water_heater in pywink.get_water_heaters():
_id = water_heater.object_id() + water_heater.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkWaterHeater(water_heater, hass)])
class WinkWaterHeater(WinkDevice, WaterHeaterEntity):
"""Representation of a Wink water heater."""
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
@property
def temperature_unit(self):
"""Return the unit of measurement."""
# The Wink API always returns temp in Celsius
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the optional device state attributes."""
data = {}
data[ATTR_VACATION_MODE] = self.wink.vacation_mode_enabled()
data[ATTR_RHEEM_TYPE] = self.wink.rheem_type()
return data
@property
def current_operation(self):
"""
Return current operation one of the following.
["eco", "performance", "heat_pump",
"high_demand", "electric_only", "gas]
"""
if not self.wink.is_on():
current_op = STATE_OFF
else:
current_op = WINK_STATE_TO_HA.get(self.wink.current_mode())
if current_op is None:
current_op = STATE_UNKNOWN
return current_op
@property
def operation_list(self):
"""List of available operation modes."""
op_list = ["off"]
modes = self.wink.modes()
for mode in modes:
if mode == "aux":
continue
ha_mode = WINK_STATE_TO_HA.get(mode)
if ha_mode is not None:
op_list.append(ha_mode)
else:
error = (
"Invalid operation mode mapping. "
f"{mode} doesn't map. Please report this."
)
_LOGGER.error(error)
return op_list
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
self.wink.set_temperature(target_temp)
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
op_mode_to_set = HA_STATE_TO_WINK.get(operation_mode)
self.wink.set_operation_mode(op_mode_to_set)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.wink.current_set_point()
def turn_away_mode_on(self):
"""Turn away on."""
self.wink.set_vacation_mode(True)
def turn_away_mode_off(self):
"""Turn away off."""
self.wink.set_vacation_mode(False)
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.wink.min_set_point()
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.wink.max_set_point()
|
import json
import pytest
from lemur.tests.factories import UserFactory, RoleFactory
from lemur.users.views import * # noqa
from .vectors import (
VALID_ADMIN_API_TOKEN,
VALID_ADMIN_HEADER_TOKEN,
VALID_USER_HEADER_TOKEN,
)
def test_user_input_schema(client):
from lemur.users.schemas import UserInputSchema
input_data = {
"username": "example",
"password": "1233432",
"email": "[email protected]",
}
data, errors = UserInputSchema().load(input_data)
assert not errors
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_user_get(client, token, status):
assert (
client.get(api.url_for(Users, user_id=1), headers=token).status_code == status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_post_(client, token, status):
assert (
client.post(api.url_for(Users, user_id=1), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_user_put(client, token, status):
assert (
client.put(api.url_for(Users, user_id=1), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_delete(client, token, status):
assert (
client.delete(api.url_for(Users, user_id=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_patch(client, token, status):
assert (
client.patch(api.url_for(Users, user_id=1), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_user_list_post_(client, token, status):
assert (
client.post(api.url_for(UsersList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_user_list_get(client, token, status):
assert client.get(api.url_for(UsersList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_list_delete(client, token, status):
assert client.delete(api.url_for(UsersList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_user_list_patch(client, token, status):
assert (
client.patch(api.url_for(UsersList), data={}, headers=token).status_code
== status
)
def test_sensitive_filter(client):
resp = client.get(
api.url_for(UsersList) + "?filter=password;a", headers=VALID_ADMIN_HEADER_TOKEN
)
assert "'password' is not sortable or filterable" in resp.json["message"]
def test_sensitive_sort(client):
resp = client.get(
api.url_for(UsersList) + "?sortBy=password&sortDir=asc",
headers=VALID_ADMIN_HEADER_TOKEN,
)
assert "'password' is not sortable or filterable" in resp.json["message"]
def test_user_role_changes(client, session):
user = UserFactory()
role1 = RoleFactory()
role2 = RoleFactory()
session.flush()
data = {
"active": True,
"id": user.id,
"username": user.username,
"email": user.email,
"roles": [{"id": role1.id}, {"id": role2.id}],
}
# PUT two roles
resp = client.put(
api.url_for(Users, user_id=user.id),
data=json.dumps(data),
headers=VALID_ADMIN_HEADER_TOKEN,
)
assert resp.status_code == 200
assert len(resp.json["roles"]) == 2
assert set(user.roles) == {role1, role2}
# Remove one role and PUT again
del data["roles"][1]
resp = client.put(
api.url_for(Users, user_id=user.id),
data=json.dumps(data),
headers=VALID_ADMIN_HEADER_TOKEN,
)
assert resp.status_code == 200
assert len(resp.json["roles"]) == 1
assert set(user.roles) == {role1}
|
from unittest.mock import patch
import iaqualink
import pytest
from homeassistant.components.iaqualink import config_flow
from tests.common import MockConfigEntry, mock_coro
DATA = {"username": "[email protected]", "password": "pass"}
@pytest.mark.parametrize("step", ["import", "user"])
async def test_already_configured(hass, step):
"""Test config flow when iaqualink component is already setup."""
MockConfigEntry(domain="iaqualink", data=DATA).add_to_hass(hass)
flow = config_flow.AqualinkFlowHandler()
flow.hass = hass
flow.context = {}
fname = f"async_step_{step}"
func = getattr(flow, fname)
result = await func(DATA)
assert result["type"] == "abort"
@pytest.mark.parametrize("step", ["import", "user"])
async def test_without_config(hass, step):
"""Test with no configuration."""
flow = config_flow.AqualinkFlowHandler()
flow.hass = hass
flow.context = {}
fname = f"async_step_{step}"
func = getattr(flow, fname)
result = await func()
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {}
@pytest.mark.parametrize("step", ["import", "user"])
async def test_with_invalid_credentials(hass, step):
"""Test config flow with invalid username and/or password."""
flow = config_flow.AqualinkFlowHandler()
flow.hass = hass
fname = f"async_step_{step}"
func = getattr(flow, fname)
with patch(
"iaqualink.AqualinkClient.login", side_effect=iaqualink.AqualinkLoginException
):
result = await func(DATA)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
@pytest.mark.parametrize("step", ["import", "user"])
async def test_with_existing_config(hass, step):
"""Test with existing configuration."""
flow = config_flow.AqualinkFlowHandler()
flow.hass = hass
flow.context = {}
fname = f"async_step_{step}"
func = getattr(flow, fname)
with patch("iaqualink.AqualinkClient.login", return_value=mock_coro(None)):
result = await func(DATA)
assert result["type"] == "create_entry"
assert result["title"] == DATA["username"]
assert result["data"] == DATA
|
from tqdm import tqdm
from matchzoo.data_pack import DataPack
from matchzoo.engine.base_preprocessor import BasePreprocessor
from .chain_transform import chain_transform
from .build_vocab_unit import build_vocab_unit
from . import units
tqdm.pandas()
class DSSMPreprocessor(BasePreprocessor):
"""DSSM Model preprocessor."""
def __init__(self, with_word_hashing: bool = True):
"""
DSSM Model preprocessor.
The word hashing step could eats up a lot of memory. To workaround
this problem, set `with_word_hashing` to `False` and use a
:class:`matchzoo.DynamicDataGenerator` with a
:class:`matchzoo.preprocessor.units.WordHashing`.
:param with_word_hashing: Include a word hashing step if `True`.
Example:
>>> import matchzoo as mz
>>> train_data = mz.datasets.toy.load_data()
>>> test_data = mz.datasets.toy.load_data(stage='test')
>>> dssm_preprocessor = mz.preprocessors.DSSMPreprocessor()
>>> train_data_processed = dssm_preprocessor.fit_transform(
... train_data, verbose=0
... )
>>> type(train_data_processed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
>>> test_data_transformed = dssm_preprocessor.transform(test_data,
... verbose=0)
>>> type(test_data_transformed)
<class 'matchzoo.data_pack.data_pack.DataPack'>
"""
super().__init__()
self._with_word_hashing = with_word_hashing
def fit(self, data_pack: DataPack, verbose: int = 1):
"""
Fit pre-processing context for transformation.
:param verbose: Verbosity.
:param data_pack: data_pack to be preprocessed.
:return: class:`DSSMPreprocessor` instance.
"""
func = chain_transform(self._default_units())
data_pack = data_pack.apply_on_text(func, verbose=verbose)
vocab_unit = build_vocab_unit(data_pack, verbose=verbose)
self._context['vocab_unit'] = vocab_unit
vocab_size = len(vocab_unit.state['term_index'])
self._context['vocab_size'] = vocab_size
self._context['embedding_input_dim'] = vocab_size
self._context['input_shapes'] = [(vocab_size,), (vocab_size,)]
return self
def transform(self, data_pack: DataPack, verbose: int = 1) -> DataPack:
"""
Apply transformation on data, create `tri-letter` representation.
:param data_pack: Inputs to be preprocessed.
:param verbose: Verbosity.
:return: Transformed data as :class:`DataPack` object.
"""
data_pack = data_pack.copy()
units_ = self._default_units()
if self._with_word_hashing:
term_index = self._context['vocab_unit'].state['term_index']
units_.append(units.WordHashing(term_index))
func = chain_transform(units_)
data_pack.apply_on_text(func, inplace=True, verbose=verbose)
return data_pack
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
units.Tokenize(),
units.Lowercase(),
units.PuncRemoval(),
units.StopRemoval(),
units.NgramLetter(),
]
@property
def with_word_hashing(self):
"""`with_word_hashing` getter."""
return self._with_word_hashing
@with_word_hashing.setter
def with_word_hashing(self, value):
"""`with_word_hashing` setter."""
self._with_word_hashing = value
|
from time import sleep
import mock
from behave import then
from behave import when
from itest_utils import get_service_connection_string
from itest_utils import update_context_marathon_config
from marathon.exceptions import MarathonHttpError
from paasta_tools import marathon_tools
from paasta_tools import mesos_maintenance
from paasta_tools import setup_marathon_job
from paasta_tools.autoscaling.autoscaling_service_lib import (
set_instances_for_marathon_service,
)
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import SystemPaastaConfig
def run_setup_marathon_job_no_apps_found(context):
update_context_marathon_config(context)
with mock.patch.object(
SystemPaastaConfig, "get_zk_hosts", autospec=True, return_value=context.zk_hosts
), mock.patch(
"paasta_tools.setup_marathon_job.parse_args", autospec=True
) as mock_parse_args, mock.patch.object(
MarathonServiceConfig,
"format_marathon_app_dict",
autospec=True,
return_value=context.marathon_complete_config,
), mock.patch(
"paasta_tools.setup_marathon_job.monitoring_tools.send_event", autospec=True
), mock.patch(
"paasta_tools.setup_marathon_job.marathon_tools.get_all_marathon_apps",
autospec=True,
return_value=[],
):
mock_parse_args.return_value = mock.Mock(
verbose=True,
soa_dir=context.soa_dir,
service_instance_list=[context.job_id],
)
try:
setup_marathon_job.main()
except SystemExit:
pass
def run_setup_marathon_job(context):
update_context_marathon_config(context)
with mock.patch.object(
SystemPaastaConfig, "get_zk_hosts", autospec=True, return_value=context.zk_hosts
), mock.patch(
"paasta_tools.setup_marathon_job.parse_args", autospec=True
) as mock_parse_args, mock.patch.object(
MarathonServiceConfig,
"format_marathon_app_dict",
autospec=True,
return_value=context.marathon_complete_config,
), mock.patch(
"paasta_tools.setup_marathon_job.monitoring_tools.send_event", autospec=True
):
mock_parse_args.return_value = mock.Mock(
verbose=True,
soa_dir=context.soa_dir,
service_instance_list=[context.job_id],
)
try:
setup_marathon_job.main()
except (SystemExit, MarathonHttpError):
pass
@when("we set up an app to use zookeeper scaling with {number:d} max instances")
def setup_zookeeper(context, number):
context.max_instances = number
@when('we create a marathon app called "{job_id}" with {number:d} instance(s)')
def create_app_with_instances(context, job_id, number):
create_app_with_instances_constraints(context, job_id, number, str(None))
@when(
'we create a marathon app called "{job_id}" with {number:d} instance(s) with no apps found running'
)
def create_app_with_instances_with_race(context, job_id, number):
create_app_with_instances_constraints(
context, job_id, number, str(None), no_apps_running=True
)
@when(
'we create a marathon app called "{job_id}" with {number:d} instance(s) and constraints {constraints}'
)
def create_app_with_instances_constraints(
context, job_id, number, constraints, no_apps_running=False
):
set_number_instances(context, number)
context.job_id = job_id
(service, instance, _, __) = decompose_job_id(job_id)
context.service = service
context.instance = instance
context.zk_hosts = "%s/mesos-testcluster" % get_service_connection_string(
"zookeeper"
)
context.constraints = constraints
update_context_marathon_config(context)
context.app_id = context.marathon_complete_config["id"]
context.new_id = (
context.app_id
) # for compatibility with bounces_steps.there_are_num_which_tasks
if no_apps_running:
run_setup_marathon_job_no_apps_found(context)
else:
run_setup_marathon_job(context)
@when("we set the number of instances to {number:d}")
def set_number_instances(context, number):
context.instances = number
@when("we run setup_marathon_job until it has {number:d} task(s)")
def run_until_number_tasks(context, number):
for _ in range(20):
with mock.patch(
"paasta_tools.mesos_maintenance.load_credentials", autospec=True
) as mock_load_credentials:
mock_load_credentials.side_effect = mesos_maintenance.load_credentials(
mesos_secrets="/etc/mesos-slave-secret"
)
run_setup_marathon_job(context)
sleep(0.5)
if context.current_client.get_app(context.app_id).instances == number:
return
assert context.current_client.get_app(context.app_id).instances == number
@when(
'we set the instance count in zookeeper for service "{service}" instance "{instance}" to {number:d}'
)
def zookeeper_scale_job(context, service, instance, number):
with mock.patch.object(
SystemPaastaConfig, "get_zk_hosts", autospec=True, return_value=context.zk_hosts
):
set_instances_for_marathon_service(
service, instance, number, soa_dir=context.soa_dir
)
@then("we should see it in the list of apps on shard {shard_number:d}")
@then("we should see it in the list of apps")
def see_it_in_list(context, shard_number=None):
full_list = []
if shard_number is None:
for client in context.marathon_clients.get_all_clients():
full_list.extend(marathon_tools.list_all_marathon_app_ids(client))
else:
full_list.extend(
marathon_tools.list_all_marathon_app_ids(
context.marathon_clients.current[shard_number]
)
)
assert context.app_id in full_list, (context.app_id, full_list)
@then("we should not see it in the list of apps on shard {shard_number:d}")
@then("we should not see it in the list of apps")
def not_see_it_in_list(context, shard_number=None):
full_list = []
if shard_number is None:
for client in context.marathon_clients.get_all_clients():
full_list.extend(marathon_tools.list_all_marathon_app_ids(client))
else:
full_list.extend(
marathon_tools.list_all_marathon_app_ids(
context.marathon_clients.current[shard_number]
)
)
assert context.app_id not in full_list
@then("we can run get_app")
def can_run_get_app(context):
assert context.current_client.get_app(context.app_id)
@then("we should see the number of instances become {number:d}")
def assert_instances_equals(context, number):
attempts = 0
while attempts < 10:
try:
assert context.current_client.get_app(context.app_id).instances == number
return
except AssertionError:
attempts += 1
sleep(5)
assert context.current_client.get_app(context.app_id).instances == number
@when("we mark a host it is running on as at-risk")
def mark_host_running_on_at_risk(context):
app = context.current_client.get_app(context.new_id)
tasks = app.tasks
host = tasks[0].host
mark_host_at_risk(context, host)
@when('we mark the host "{host}" as at-risk')
def mark_host_at_risk(context, host):
start = mesos_maintenance.datetime_to_nanoseconds(mesos_maintenance.now())
duration = mesos_maintenance.parse_timedelta("1h")
with mock.patch(
"paasta_tools.mesos_maintenance.get_principal", autospec=True
) as mock_get_principal, mock.patch(
"paasta_tools.mesos_maintenance.get_secret", autospec=True
) as mock_get_secret:
credentials = mesos_maintenance.load_credentials(
mesos_secrets="/etc/mesos-slave-secret"
)
mock_get_principal.return_value = credentials.principal
mock_get_secret.return_value = credentials.secret
mesos_maintenance.drain([host], start, duration)
context.at_risk_host = host
@then("there should be {number:d} tasks on that at-risk host")
def tasks_on_that_at_risk_host_drained(context, number):
tasks_on_host_drained(context, number, context.at_risk_host)
@then('there should be {number:d} tasks on the host "{host}"')
def tasks_on_host_drained(context, number, host):
app_id = context.new_id
tasks = context.current_client.list_tasks(app_id)
count = 0
for task in tasks:
if task.host == host:
count += 1
assert count == number
|
from homeassistant.components.water_heater import (
STATE_ECO,
STATE_OFF,
STATE_ON,
SUPPORT_OPERATION_MODE,
WaterHeaterEntity,
)
from homeassistant.const import TEMP_CELSIUS
from . import DATA_HIVE, DOMAIN, HiveEntity, refresh_system
SUPPORT_FLAGS_HEATER = SUPPORT_OPERATION_MODE
HIVE_TO_HASS_STATE = {"SCHEDULE": STATE_ECO, "ON": STATE_ON, "OFF": STATE_OFF}
HASS_TO_HIVE_STATE = {STATE_ECO: "SCHEDULE", STATE_ON: "ON", STATE_OFF: "OFF"}
SUPPORT_WATER_HEATER = [STATE_ECO, STATE_ON, STATE_OFF]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Hive water heater devices."""
if discovery_info is None:
return
session = hass.data.get(DATA_HIVE)
devs = []
for dev in discovery_info:
devs.append(HiveWaterHeater(session, dev))
add_entities(devs)
class HiveWaterHeater(HiveEntity, WaterHeaterEntity):
"""Hive Water Heater Device."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATER
@property
def name(self):
"""Return the name of the water heater."""
if self.node_name is None:
self.node_name = "Hot Water"
return self.node_name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_operation(self):
"""Return current operation."""
return HIVE_TO_HASS_STATE[self.session.hotwater.get_mode(self.node_id)]
@property
def operation_list(self):
"""List of available operation modes."""
return SUPPORT_WATER_HEATER
@refresh_system
def set_operation_mode(self, operation_mode):
"""Set operation mode."""
new_mode = HASS_TO_HIVE_STATE[operation_mode]
self.session.hotwater.set_mode(self.node_id, new_mode)
def update(self):
"""Update all Node data from Hive."""
self.session.core.update_data(self.node_id)
|
import sys
if sys.platform.startswith('win'):
import ctypes
from ctypes import windll
from ctypes.wintypes import (BOOL, DOUBLE, DWORD, HBITMAP, HDC, HGDIOBJ, # noqa
HWND, INT, LPARAM, LONG, UINT, WORD) # noqa
SRCCOPY = 13369376
DIB_RGB_COLORS = BI_RGB = 0
class RECT(ctypes.Structure):
_fields_ = [('left', ctypes.c_long),
('top', ctypes.c_long),
('right', ctypes.c_long),
('bottom', ctypes.c_long)]
class BITMAPINFOHEADER(ctypes.Structure):
_fields_ = [('biSize', DWORD), ('biWidth', LONG), ('biHeight', LONG),
('biPlanes', WORD), ('biBitCount', WORD),
('biCompression', DWORD), ('biSizeImage', DWORD),
('biXPelsPerMeter', LONG), ('biYPelsPerMeter', LONG),
('biClrUsed', DWORD), ('biClrImportant', DWORD)]
class BITMAPINFO(ctypes.Structure):
_fields_ = [('bmiHeader', BITMAPINFOHEADER), ('bmiColors', DWORD * 3)]
# Function shorthands
GetClientRect = windll.user32.GetClientRect
GetWindowRect = windll.user32.GetWindowRect
PrintWindow = windll.user32.PrintWindow
GetWindowThreadProcessId = windll.user32.GetWindowThreadProcessId
IsWindowVisible = windll.user32.IsWindowVisible
EnumWindows = windll.user32.EnumWindows
EnumWindowsProc = ctypes.WINFUNCTYPE(ctypes.c_bool,
ctypes.POINTER(ctypes.c_int),
ctypes.POINTER(ctypes.c_int))
GetWindowDC = windll.user32.GetWindowDC
CreateCompatibleDC = windll.gdi32.CreateCompatibleDC
CreateCompatibleBitmap = windll.gdi32.CreateCompatibleBitmap
SelectObject = windll.gdi32.SelectObject
BitBlt = windll.gdi32.BitBlt
DeleteObject = windll.gdi32.DeleteObject
GetDIBits = windll.gdi32.GetDIBits
# Arg types
windll.user32.GetWindowDC.argtypes = [HWND]
windll.gdi32.CreateCompatibleDC.argtypes = [HDC]
windll.gdi32.CreateCompatibleBitmap.argtypes = [HDC, INT, INT]
windll.gdi32.SelectObject.argtypes = [HDC, HGDIOBJ]
windll.gdi32.BitBlt.argtypes = [HDC, INT, INT, INT, INT, HDC, INT, INT, DWORD]
windll.gdi32.DeleteObject.argtypes = [HGDIOBJ]
windll.gdi32.GetDIBits.argtypes = [HDC, HBITMAP, UINT, UINT, ctypes.c_void_p,
ctypes.POINTER(BITMAPINFO), UINT]
# Return types
windll.user32.GetWindowDC.restypes = HDC
windll.gdi32.CreateCompatibleDC.restypes = HDC
windll.gdi32.CreateCompatibleBitmap.restypes = HBITMAP
windll.gdi32.SelectObject.restypes = HGDIOBJ
windll.gdi32.BitBlt.restypes = BOOL
windll.gdi32.GetDIBits.restypes = INT
windll.gdi32.DeleteObject.restypes = BOOL
def win_for_pid(pid):
""" Get the windows-handle for the first visible window of the
process with the given id.
"""
handles = []
def called_for_each_win(hwnd, lParam):
if not IsWindowVisible(hwnd):
return True
# get the proccessid from the windowhandle
p_id = ctypes.c_int()
#t_id = GetWindowThreadProcessId(hwnd, ctypes.byref(p_id))
if p_id.value == pid:
handles.append(hwnd)
return False
return True
EnumWindows(EnumWindowsProc(called_for_each_win), 0)
if handles:
return handles[0]
else:
return None
def screenshot(pid, client=True):
""" Grab a screenshot of the first visible window of the process
with the given id. If client is True, no Window decoration is shown.
This code is derived from https://github.com/BoboTiG/python-mss
"""
# Get handle
hwnd = win_for_pid(pid)
# Get window dimensions
rect = RECT()
if client:
GetClientRect(hwnd, ctypes.byref(rect))
else:
GetWindowRect(hwnd, ctypes.byref(rect))
left, right, top, bottom = rect.left, rect.right, rect.top, rect.bottom
w, h = right - left, bottom - top
hwndDC = saveDC = bmp = None
try:
# Get device contexts
hwndDC = GetWindowDC(hwnd)
saveDC = CreateCompatibleDC(hwndDC)
# Get bitmap
bmp = CreateCompatibleBitmap(hwndDC, w, h)
SelectObject(saveDC, bmp)
if client:
PrintWindow(hwnd, saveDC, 1) # todo: result is never used??
else:
PrintWindow(hwnd, saveDC, 0)
# Init bitmap info
# We grab the image in RGBX mode, so that each word is 32bit and
# we have no striding, then we transform to RGB
buffer_len = h * w * 4
bmi = BITMAPINFO()
bmi.bmiHeader.biSize = ctypes.sizeof(BITMAPINFOHEADER)
bmi.bmiHeader.biWidth = w
bmi.bmiHeader.biHeight = -h # Why minus? See [1]
bmi.bmiHeader.biPlanes = 1 # Always 1
bmi.bmiHeader.biBitCount = 32
bmi.bmiHeader.biCompression = BI_RGB
# Blit
image = ctypes.create_string_buffer(buffer_len)
bits = windll.gdi32.GetDIBits(saveDC, bmp, 0, h, image, bmi, DIB_RGB_COLORS)
assert bits == h
# Replace pixels values: BGRX to RGB
image2 = ctypes.create_string_buffer(h*w*3)
image2[0::3] = image[2::4]
image2[1::3] = image[1::4]
image2[2::3] = image[0::4]
return bytes(image2), (w, h, 3)
finally:
# Clean up
if hwndDC:
DeleteObject(hwndDC)
if saveDC:
DeleteObject(saveDC)
if bmp:
DeleteObject(bmp)
if __name__ == '__main__':
im, shape = screenshot(5144, True)
from flexx.util import icon
png = icon.write_png(im, shape)
open('C:\\Users\\Almar\\test.png', 'wb').write(png)
|
from homeassistant.components.zwave import const, sensor
import homeassistant.const
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
def test_get_device_detects_none(mock_openzwave):
"""Test get_device returns None."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device is None
def test_get_device_detects_alarmsensor(mock_openzwave):
"""Test get_device returns a Z-Wave alarmsensor."""
node = MockNode(
command_classes=[const.COMMAND_CLASS_ALARM, const.COMMAND_CLASS_SENSOR_ALARM]
)
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, sensor.ZWaveAlarmSensor)
def test_get_device_detects_multilevelsensor(mock_openzwave):
"""Test get_device returns a Z-Wave multilevel sensor."""
node = MockNode(
command_classes=[
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
]
)
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, sensor.ZWaveMultilevelSensor)
assert device.force_update
def test_get_device_detects_multilevel_meter(mock_openzwave):
"""Test get_device returns a Z-Wave multilevel sensor."""
node = MockNode(command_classes=[const.COMMAND_CLASS_METER])
value = MockValue(data=0, node=node, type=const.TYPE_DECIMAL)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, sensor.ZWaveMultilevelSensor)
def test_get_device_detects_battery_sensor(mock_openzwave):
"""Test get_device returns a Z-Wave battery sensor."""
node = MockNode(command_classes=[const.COMMAND_CLASS_BATTERY])
value = MockValue(
data=0,
node=node,
type=const.TYPE_DECIMAL,
command_class=const.COMMAND_CLASS_BATTERY,
)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert isinstance(device, sensor.ZWaveBatterySensor)
assert device.device_class == homeassistant.const.DEVICE_CLASS_BATTERY
def test_multilevelsensor_value_changed_temp_fahrenheit(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for temperature."""
node = MockNode(
command_classes=[
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
]
)
value = MockValue(data=190.95555, units="F", node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 191.0
assert device.unit_of_measurement == homeassistant.const.TEMP_FAHRENHEIT
value.data = 197.95555
value_changed(value)
assert device.state == 198.0
def test_multilevelsensor_value_changed_temp_celsius(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for temperature."""
node = MockNode(
command_classes=[
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
]
)
value = MockValue(data=38.85555, units="C", node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 38.9
assert device.unit_of_measurement == homeassistant.const.TEMP_CELSIUS
value.data = 37.95555
value_changed(value)
assert device.state == 38.0
def test_multilevelsensor_value_changed_other_units(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for other units."""
node = MockNode(
command_classes=[
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
]
)
value = MockValue(
data=190.95555, units=homeassistant.const.ENERGY_KILO_WATT_HOUR, node=node
)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 190.96
assert device.unit_of_measurement == homeassistant.const.ENERGY_KILO_WATT_HOUR
value.data = 197.95555
value_changed(value)
assert device.state == 197.96
def test_multilevelsensor_value_changed_integer(mock_openzwave):
"""Test value changed for Z-Wave multilevel sensor for other units."""
node = MockNode(
command_classes=[
const.COMMAND_CLASS_SENSOR_MULTILEVEL,
const.COMMAND_CLASS_METER,
]
)
value = MockValue(data=5, units="counts", node=node)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 5
assert device.unit_of_measurement == "counts"
value.data = 6
value_changed(value)
assert device.state == 6
def test_alarm_sensor_value_changed(mock_openzwave):
"""Test value changed for Z-Wave sensor."""
node = MockNode(
command_classes=[const.COMMAND_CLASS_ALARM, const.COMMAND_CLASS_SENSOR_ALARM]
)
value = MockValue(data=12.34, node=node, units=homeassistant.const.PERCENTAGE)
values = MockEntityValues(primary=value)
device = sensor.get_device(node=node, values=values, node_config={})
assert device.state == 12.34
assert device.unit_of_measurement == homeassistant.const.PERCENTAGE
value.data = 45.67
value_changed(value)
assert device.state == 45.67
|
import copy
import glob
import os
import os.path as op
import shutil
import pathlib
import numpy as np
from numpy.testing import assert_equal
import pytest
from matplotlib import pyplot as plt
from mne import Epochs, read_events, read_evokeds
from mne.io import read_raw_fif
from mne.datasets import testing
from mne.report import Report, open_report, _ReportScraper
from mne.utils import (requires_nibabel, Bunch,
run_tests_if_main, requires_h5py)
from mne.viz import plot_alignment
from mne.io.write import DATE_NONE
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
ms_fname = op.join(data_dir, 'SSS', 'test_move_anon_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
proj_fname = op.join(report_dir, 'sample_audvis_ecg-proj.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
def _get_example_figures():
"""Create two example figures."""
fig1 = plt.plot([1, 2], [1, 2])[0].figure
fig2 = plt.plot([3, 4], [3, 4])[0].figure
return [fig1, fig2]
@pytest.mark.slowtest
@testing.requires_testing_data
def test_render_report(renderer, tmpdir):
"""Test rendering -*.fif files for mne report."""
tempdir = str(tmpdir)
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw_fname_new_bids = op.join(tempdir, 'temp_meg.fif')
ms_fname_new = op.join(tempdir, 'temp_ms_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
proj_fname_new = op.join(tempdir, 'temp_ecg-proj.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[raw_fname, raw_fname_new_bids],
[ms_fname, ms_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[proj_fname, proj_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
# Speed it up by picking channels
raw = read_raw_fif(raw_fname_new, preload=True)
raw.pick_channels(['MEG 0111', 'MEG 0121', 'EEG 001', 'EEG 002'])
raw.del_proj()
raw.set_eeg_reference(projection=True)
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2)
epochs.save(epochs_fname, overwrite=True)
# This can take forever (stall Travis), so let's make it fast
# Also, make sure crop range is wide enough to avoid rendering bug
evoked = epochs.average().crop(0.1, 0.2)
evoked.save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
projs=True)
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
assert repr(report)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
assert_equal(len(report.fnames), len(report))
# Check saving functionality
report.data_path = tempdir
fname = op.join(tempdir, 'report.html')
report.save(fname=fname, open_browser=False)
assert (op.isfile(fname))
with open(fname, 'rb') as fid:
html = fid.read().decode('utf-8')
assert '(MaxShield on)' in html
# Projectors in Raw.info
assert '<h4>SSP Projectors</h4>' in html
# Projectors in `proj_fname_new`
assert f'SSP Projectors: {op.basename(proj_fname_new)}' in html
# Evoked in `evoked_fname`
assert f'Evoked: {op.basename(evoked_fname)} ({evoked.comment})' in html
assert 'Topomap (ch_type =' in html
assert f'Evoked: {op.basename(evoked_fname)} (GFPs)' in html
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert (op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert (op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, pattern=pattern)
assert (repr(report))
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert (op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert (''.join(report.html).find(op.basename(fname)) != -1)
pytest.raises(ValueError, Report, image_format='foo')
pytest.raises(ValueError, Report, image_format=None)
# SVG rendering
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir,
image_format='svg')
tempdir = pathlib.Path(tempdir) # test using pathlib.Path
with pytest.warns(RuntimeWarning, match='Cannot render MRI'):
report.parse_folder(data_path=tempdir, on_error='raise')
# ndarray support smoke test
report.add_figs_to_section(np.zeros((2, 3, 3)), 'caption', 'section')
with pytest.raises(TypeError, match='figure must be a'):
report.add_figs_to_section('foo', 'caption', 'section')
with pytest.raises(TypeError, match='figure must be a'):
report.add_figs_to_section(['foo'], 'caption', 'section')
@testing.requires_testing_data
def test_report_raw_psd_and_date(tmpdir):
"""Test report raw PSD and DATE_NONE functionality."""
with pytest.raises(TypeError, match='dict'):
Report(raw_psd='foo')
tempdir = str(tmpdir)
raw = read_raw_fif(raw_fname).crop(0, 1.).load_data()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
raw.save(raw_fname_new)
report = Report(raw_psd=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'PSD' in ''.join(report.html)
assert 'GMT' in ''.join(report.html)
# test new anonymize functionality
report = Report()
raw.anonymize()
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' in ''.join(report.html)
# DATE_NONE functionality
report = Report()
# old style (pre 0.20) date anonymization
raw.info['meas_date'] = None
for key in ('file_id', 'meas_id'):
value = raw.info.get(key)
if value is not None:
assert 'msecs' not in value
value['secs'] = DATE_NONE[0]
value['usecs'] = DATE_NONE[1]
raw.save(raw_fname_new, overwrite=True)
report.parse_folder(data_path=tempdir, render_bem=False,
on_error='raise')
assert isinstance(report.html, list)
assert 'GMT' not in ''.join(report.html)
@testing.requires_testing_data
def test_render_add_sections(renderer, tmpdir):
"""Test adding figures/images to section."""
tempdir = str(tmpdir)
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
pytest.raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
pytest.raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
pytest.raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_alignment(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
assert (repr(report))
fname = op.join(str(tmpdir), 'test.html')
report.save(fname, open_browser=False)
with open(fname, 'r') as fid:
html = fid.read()
assert html.count('<li class="report_custom"') == 8 # several
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri(renderer, tmpdir):
"""Test rendering MRI for mne report."""
tempdir = str(tmpdir)
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*')
fname = op.join(tempdir, 'report.html')
report.save(fname, open_browser=False)
with open(fname, 'r') as fid:
html = fid.read()
assert html.count('<li class="bem"') == 2 # left and content
assert repr(report)
report.add_bem_to_section('sample', caption='extra', section='foo',
subjects_dir=subjects_dir, decim=30)
report.save(fname, open_browser=False, overwrite=True)
with open(fname, 'r') as fid:
html = fid.read()
assert 'report_report' not in html
assert html.count('<li class="report_foo"') == 2
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem(tmpdir):
"""Test rendering MRI without BEM for mne report."""
tempdir = str(tmpdir)
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
with pytest.raises(RuntimeError, match='No matching files found'):
report.parse_folder(tempdir, render_bem=False)
with pytest.warns(RuntimeWarning, match='No BEM surfaces found'):
report.parse_folder(tempdir, render_bem=True, mri_decim=20)
assert 'bem' in report.fnames
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report."""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert (html in html_compare)
assert (repr(report))
def test_add_slider_to_section(tmpdir):
"""Test adding a slider with a series of images to mne report."""
tempdir = str(tmpdir)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = _get_example_figures()
report.add_slider_to_section(figs, section=section, title='my title')
assert report.fnames[0] == 'my title-#-report_slider_section-#-custom'
report.save(op.join(tempdir, 'report.html'), open_browser=False)
pytest.raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
pytest.raises(ValueError, report.add_slider_to_section, figs, ['wug'])
pytest.raises(TypeError, report.add_slider_to_section, figs, 'wug')
# need at least 2
pytest.raises(ValueError, report.add_slider_to_section, figs[:1], 'wug')
# Smoke test that SVG w/unicode can be added
report = Report()
fig, ax = plt.subplots()
ax.set_xlabel('µ')
report.add_slider_to_section([fig] * 2, image_format='svg')
def test_validate_input():
"""Test Report input validation."""
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
pytest.raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
pytest.raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
@requires_h5py
def test_open_report(tmpdir):
"""Test the open_report function."""
tempdir = str(tmpdir)
hdf5 = op.join(tempdir, 'report.h5')
# Test creating a new report through the open_report function
fig1 = _get_example_figures()[0]
with open_report(hdf5, subjects_dir=subjects_dir) as report:
assert report.subjects_dir == subjects_dir
assert report._fname == hdf5
report.add_figs_to_section(figs=fig1, captions=['evoked response'])
# Exiting the context block should have triggered saving to HDF5
assert op.exists(hdf5)
# Load the HDF5 version of the report and check equivalence
report2 = open_report(hdf5)
assert report2._fname == hdf5
assert report2.subjects_dir == report.subjects_dir
assert report2.html == report.html
assert report2.__getstate__() == report.__getstate__()
assert '_fname' not in report2.__getstate__()
# Check parameters when loading a report
pytest.raises(ValueError, open_report, hdf5, foo='bar') # non-existing
pytest.raises(ValueError, open_report, hdf5, subjects_dir='foo')
open_report(hdf5, subjects_dir=subjects_dir) # This should work
# Check that the context manager doesn't swallow exceptions
with pytest.raises(ZeroDivisionError):
with open_report(hdf5, subjects_dir=subjects_dir) as report:
1 / 0
def test_remove():
"""Test removing figures from a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'figure1', 'mysection')
r.add_slider_to_section([fig1, fig2], title='figure1',
section='othersection')
r.add_figs_to_section(fig2, 'figure1', 'mysection')
r.add_figs_to_section(fig2, 'figure2', 'mysection')
# Test removal by caption
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1')
assert removed_index == 2
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[1]
assert r2.html[2] == r.html[3]
# Test restricting to section
r2 = copy.deepcopy(r)
removed_index = r2.remove(caption='figure1', section='othersection')
assert removed_index == 1
assert len(r2.html) == 3
assert r2.html[0] == r.html[0]
assert r2.html[1] == r.html[2]
assert r2.html[2] == r.html[3]
# Test removal of empty sections
r2 = copy.deepcopy(r)
r2.remove(caption='figure1', section='othersection')
assert r2.sections == ['mysection']
assert r2._sectionvars == {'mysection': 'report_mysection'}
def test_add_or_replace():
"""Test replacing existing figures in a report."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'mysection')
r.add_figs_to_section(fig1, 'duplicate', 'othersection')
r.add_figs_to_section(fig2, 'nonduplicate', 'mysection')
# By default, replace=False, so all figures should be there
assert len(r.html) == 4
old_r = copy.deepcopy(r)
# Re-add fig1 with replace=True, it should overwrite the last occurrence of
# fig1 in section 'mysection'.
r.add_figs_to_section(fig2, 'duplicate', 'mysection', replace=True)
assert len(r.html) == 4
assert r.html[1] != old_r.html[1] # This figure should have changed
# All other figures should be the same
assert r.html[0] == old_r.html[0]
assert r.html[2] == old_r.html[2]
assert r.html[3] == old_r.html[3]
def test_scraper(tmpdir):
"""Test report scraping."""
r = Report()
fig1, fig2 = _get_example_figures()
r.add_figs_to_section(fig1, 'a', 'mysection')
r.add_figs_to_section(fig2, 'b', 'mysection')
# Mock a Sphinx + sphinx_gallery config
app = Bunch(builder=Bunch(srcdir=str(tmpdir),
outdir=op.join(str(tmpdir), '_build', 'html')))
scraper = _ReportScraper()
scraper.app = app
gallery_conf = dict(src_dir=app.builder.srcdir, builder_name='html')
img_fname = op.join(app.builder.srcdir, 'auto_examples', 'images',
'sg_img.png')
target_file = op.join(app.builder.srcdir, 'auto_examples', 'sg.py')
os.makedirs(op.dirname(img_fname))
os.makedirs(app.builder.outdir)
block_vars = dict(image_path_iterator=(img for img in [img_fname]),
example_globals=dict(a=1), target_file=target_file)
# Nothing yet
block = None
rst = scraper(block, block_vars, gallery_conf)
assert rst == ''
# Still nothing
block_vars['example_globals']['r'] = r
rst = scraper(block, block_vars, gallery_conf)
# Once it's saved, add it
assert rst == ''
fname = op.join(str(tmpdir), 'my_html.html')
r.save(fname, open_browser=False)
rst = scraper(block, block_vars, gallery_conf)
out_html = op.join(app.builder.outdir, 'auto_examples', 'my_html.html')
assert not op.isfile(out_html)
os.makedirs(op.join(app.builder.outdir, 'auto_examples'))
scraper.copyfiles()
assert op.isfile(out_html)
assert rst.count('"') == 6
assert "<iframe" in rst
assert op.isfile(img_fname.replace('png', 'svg'))
@testing.requires_testing_data
@pytest.mark.parametrize('split_naming', ('neuromag', 'bids',))
def test_split_files(tmpdir, split_naming):
"""Test that in the case of split files, we only parse the first."""
raw = read_raw_fif(raw_fname)
split_size = '7MB' # Should produce 3 files
buffer_size_sec = 1 # Tiny buffer so it's smaller than the split size
raw.save(op.join(tmpdir, 'raw_meg.fif'), split_size=split_size,
split_naming=split_naming, buffer_size_sec=buffer_size_sec)
report = Report()
report.parse_folder(tmpdir, render_bem=False)
assert len(report.fnames) == 1
run_tests_if_main()
|
import unittest
import sys
from collections import namedtuple
from functools import reduce
from operator import add
from functional.util import (
is_namedtuple,
lazy_parallelize,
split_every,
pack,
unpack,
compute_partition_size,
)
Data = namedtuple("Tuple", "x y")
class TestUtil(unittest.TestCase):
def test_is_namedtuple(self):
self.assertTrue(is_namedtuple(Data(1, 2)))
self.assertFalse(is_namedtuple((1, 2, 3)))
self.assertFalse(is_namedtuple([1, 2, 3]))
self.assertFalse(is_namedtuple(1))
# Skipping tests on pypy because of https://github.com/uqfoundation/dill/issues/73
@unittest.skipIf(
"__pypy__" in sys.builtin_module_names, "Skip parallel tests on pypy"
)
def test_lazy_parallelize(self):
self.assertListEqual(
list(range(10)), reduce(add, lazy_parallelize(lambda x: x, range(10)))
)
self.assertListEqual(
list(range(10)),
list(
reduce(add, lazy_parallelize(lambda x: x, range(10), processes=10000))
),
)
def f():
yield 0
self.assertListEqual([[0]], list(lazy_parallelize(lambda x: x, f())))
def test_split_every(self):
result = iter([1, 2, 3, 4])
self.assertListEqual(list(split_every(2, result)), [[1, 2], [3, 4]])
result = iter([1, 2, 3, 4, 5])
self.assertListEqual(list(split_every(2, result)), [[1, 2], [3, 4], [5]])
# Skipping tests on pypy because of https://github.com/uqfoundation/dill/issues/73
@unittest.skipIf(
"__pypy__" in sys.builtin_module_names, "Skip parallel tests on pypy"
)
def test_pack_unpack(self):
packed = pack(map, [lambda x: x * 2, range(4)])
self.assertListEqual(unpack(packed), [0, 2, 4, 6])
def test_compute_partition_size(self):
result = compute_partition_size([0, 1, 2], 2)
self.assertEqual(result, 2)
result = compute_partition_size([0, 1, 2, 3], 2)
self.assertEqual(result, 2)
result = compute_partition_size(iter([0, 1, 2, 3]), 2)
self.assertEqual(result, 1)
|
import os
from ..bem import fit_sphere_to_headshape
from ..io import read_raw_fif
from ..utils import logger, verbose, warn
def _mxwarn(msg):
"""Warn about a bug."""
warn('Possible MaxFilter bug: %s, more info: '
'http://imaging.mrc-cbu.cam.ac.uk/meg/maxbugs' % msg)
@verbose
def apply_maxfilter(in_fname, out_fname, origin=None, frame='device',
bad=None, autobad='off', skip=None, force=False,
st=False, st_buflen=16.0, st_corr=0.96, mv_trans=None,
mv_comp=False, mv_headpos=False, mv_hp=None,
mv_hpistep=None, mv_hpisubt=None, mv_hpicons=True,
linefreq=None, cal=None, ctc=None, mx_args='',
overwrite=True, verbose=None):
"""Apply NeuroMag MaxFilter to raw data.
Needs Maxfilter license, maxfilter has to be in PATH.
Parameters
----------
in_fname : str
Input file name.
out_fname : str
Output file name.
origin : array-like or str
Head origin in mm. If None it will be estimated from headshape points.
frame : str ('device' or 'head')
Coordinate frame for head center.
bad : str, list (or None)
List of static bad channels. Can be a list with channel names, or a
string with channels (names or logical channel numbers).
autobad : str ('on', 'off', 'n')
Sets automated bad channel detection on or off.
skip : str or a list of float-tuples (or None)
Skips raw data sequences, time intervals pairs in sec,
e.g.: 0 30 120 150.
force : bool
Ignore program warnings.
st : bool
Apply the time-domain MaxST extension.
st_buflen : float
MaxSt buffer length in sec (disabled if st is False).
st_corr : float
MaxSt subspace correlation limit (disabled if st is False).
mv_trans : str (filename or 'default') (or None)
Transforms the data into the coil definitions of in_fname, or into the
default frame (None: don't use option).
mv_comp : bool (or 'inter')
Estimates and compensates head movements in continuous raw data.
mv_headpos : bool
Estimates and stores head position parameters, but does not compensate
movements (disabled if mv_comp is False).
mv_hp : str (or None)
Stores head position data in an ascii file
(disabled if mv_comp is False).
mv_hpistep : float (or None)
Sets head position update interval in ms (disabled if mv_comp is
False).
mv_hpisubt : str ('amp', 'base', 'off') (or None)
Subtracts hpi signals: sine amplitudes, amp + baseline, or switch off
(disabled if mv_comp is False).
mv_hpicons : bool
Check initial consistency isotrak vs hpifit
(disabled if mv_comp is False).
linefreq : int (50, 60) (or None)
Sets the basic line interference frequency (50 or 60 Hz)
(None: do not use line filter).
cal : str
Path to calibration file.
ctc : str
Path to Cross-talk compensation file.
mx_args : str
Additional command line arguments to pass to MaxFilter.
overwrite : bool
Overwrite output file if it already exists.
%(verbose)s
Returns
-------
origin: str
Head origin in selected coordinate frame.
"""
# check for possible maxfilter bugs
if mv_trans is not None and mv_comp:
_mxwarn("Don't use '-trans' with head-movement compensation "
"'-movecomp'")
if autobad != 'off' and (mv_headpos or mv_comp):
_mxwarn("Don't use '-autobad' with head-position estimation "
"'-headpos' or movement compensation '-movecomp'")
if st and autobad != 'off':
_mxwarn("Don't use '-autobad' with '-st' option")
# determine the head origin if necessary
if origin is None:
logger.info('Estimating head origin from headshape points..')
raw = read_raw_fif(in_fname)
r, o_head, o_dev = fit_sphere_to_headshape(raw.info, units='mm')
raw.close()
logger.info('[done]')
if frame == 'head':
origin = o_head
elif frame == 'device':
origin = o_dev
else:
raise RuntimeError('invalid frame for origin')
if not isinstance(origin, str):
origin = '%0.1f %0.1f %0.1f' % (origin[0], origin[1], origin[2])
# format command
cmd = ('maxfilter -f %s -o %s -frame %s -origin %s '
% (in_fname, out_fname, frame, origin))
if bad is not None:
# format the channels
if not isinstance(bad, list):
bad = bad.split()
bad = map(str, bad)
bad_logic = [ch[3:] if ch.startswith('MEG') else ch for ch in bad]
bad_str = ' '.join(bad_logic)
cmd += '-bad %s ' % bad_str
cmd += '-autobad %s ' % autobad
if skip is not None:
if isinstance(skip, list):
skip = ' '.join(['%0.3f %0.3f' % (s[0], s[1]) for s in skip])
cmd += '-skip %s ' % skip
if force:
cmd += '-force '
if st:
cmd += '-st '
cmd += ' %d ' % st_buflen
cmd += '-corr %0.4f ' % st_corr
if mv_trans is not None:
cmd += '-trans %s ' % mv_trans
if mv_comp:
cmd += '-movecomp '
if mv_comp == 'inter':
cmd += ' inter '
if mv_headpos:
cmd += '-headpos '
if mv_hp is not None:
cmd += '-hp %s ' % mv_hp
if mv_hpisubt is not None:
cmd += 'hpisubt %s ' % mv_hpisubt
if mv_hpicons:
cmd += '-hpicons '
if linefreq is not None:
cmd += '-linefreq %d ' % linefreq
if cal is not None:
cmd += '-cal %s ' % cal
if ctc is not None:
cmd += '-ctc %s ' % ctc
cmd += mx_args
if overwrite and os.path.exists(out_fname):
os.remove(out_fname)
logger.info('Running MaxFilter: %s ' % cmd)
if os.getenv('_MNE_MAXFILTER_TEST', '') != 'true': # fake maxfilter
st = os.system(cmd)
else:
print(cmd) # we can check the output
st = 0
if st != 0:
raise RuntimeError('MaxFilter returned non-zero exit status %d' % st)
logger.info('[done]')
return origin
|
from pymyq.const import (
DEVICE_FAMILY as MYQ_DEVICE_FAMILY,
DEVICE_FAMILY_GATEWAY as MYQ_DEVICE_FAMILY_GATEWAY,
DEVICE_STATE as MYQ_DEVICE_STATE,
DEVICE_STATE_ONLINE as MYQ_DEVICE_STATE_ONLINE,
KNOWN_MODELS,
MANUFACTURER,
)
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
BinarySensorEntity,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, MYQ_COORDINATOR, MYQ_GATEWAY
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up mysq covers."""
data = hass.data[DOMAIN][config_entry.entry_id]
myq = data[MYQ_GATEWAY]
coordinator = data[MYQ_COORDINATOR]
entities = []
for device in myq.devices.values():
if device.device_json[MYQ_DEVICE_FAMILY] == MYQ_DEVICE_FAMILY_GATEWAY:
entities.append(MyQBinarySensorEntity(coordinator, device))
async_add_entities(entities, True)
class MyQBinarySensorEntity(CoordinatorEntity, BinarySensorEntity):
"""Representation of a MyQ gateway."""
def __init__(self, coordinator, device):
"""Initialize with API object, device id."""
super().__init__(coordinator)
self._device = device
@property
def device_class(self):
"""We track connectivity for gateways."""
return DEVICE_CLASS_CONNECTIVITY
@property
def name(self):
"""Return the name of the garage door if any."""
return f"{self._device.name} MyQ Gateway"
@property
def is_on(self):
"""Return if the device is online."""
if not self.coordinator.last_update_success:
return False
# Not all devices report online so assume True if its missing
return self._device.device_json[MYQ_DEVICE_STATE].get(
MYQ_DEVICE_STATE_ONLINE, True
)
@property
def available(self) -> bool:
"""Entity is always available."""
return True
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._device.device_id
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(DOMAIN, self._device.device_id)},
"name": self.name,
"manufacturer": MANUFACTURER,
"sw_version": self._device.firmware_version,
}
model = KNOWN_MODELS.get(self._device.device_id[2:4])
if model:
device_info["model"] = model
return device_info
|
from .std import tqdm, trange
from .gui import tqdm as tqdm_gui # TODO: remove in v5.0.0
from .gui import trange as tgrange # TODO: remove in v5.0.0
from ._tqdm_pandas import tqdm_pandas
from .cli import main # TODO: remove in v5.0.0
from ._monitor import TMonitor, TqdmSynchronisationWarning
from ._version import __version__ # NOQA
from .std import TqdmTypeError, TqdmKeyError, TqdmWarning, \
TqdmDeprecationWarning, TqdmExperimentalWarning, \
TqdmMonitorWarning
__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
'TqdmTypeError', 'TqdmKeyError',
'TqdmWarning', 'TqdmDeprecationWarning',
'TqdmExperimentalWarning',
'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
'__version__']
def tqdm_notebook(*args, **kwargs): # pragma: no cover
"""See tqdm.notebook.tqdm for full documentation"""
from .notebook import tqdm as _tqdm_notebook
from warnings import warn
warn("This function will be removed in tqdm==5.0.0\n"
"Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`",
TqdmDeprecationWarning, stacklevel=2)
return _tqdm_notebook(*args, **kwargs)
def tnrange(*args, **kwargs): # pragma: no cover
"""
A shortcut for `tqdm.notebook.tqdm(xrange(*args), **kwargs)`.
On Python3+, `range` is used instead of `xrange`.
"""
from .notebook import trange as _tnrange
from warnings import warn
warn("Please use `tqdm.notebook.trange` instead of `tqdm.tnrange`",
TqdmDeprecationWarning, stacklevel=2)
return _tnrange(*args, **kwargs)
|
from mock import patch, sentinel, call
from arctic.scripts.arctic_fsck import main
from ...util import run_as_main
def test_main():
with patch('arctic.scripts.arctic_fsck.Arctic') as Arctic, \
patch('arctic.scripts.arctic_fsck.get_mongodb_uri') as get_mongodb_uri, \
patch('arctic.scripts.arctic_fsck.do_db_auth') as do_db_auth:
run_as_main(main, '--host', '%s:%s' % (sentinel.host, sentinel.port),
'-v', '--library', 'sentinel.library', 'lib2', '-f')
get_mongodb_uri.assert_called_once_with('sentinel.host:sentinel.port')
Arctic.assert_called_once_with(get_mongodb_uri.return_value)
assert do_db_auth.call_args_list == [call('%s:%s' % (sentinel.host, sentinel.port),
Arctic.return_value._conn,
'arctic_sentinel'),
call('%s:%s' % (sentinel.host, sentinel.port),
Arctic.return_value._conn,
'arctic')]
assert Arctic.return_value.__getitem__.return_value._fsck.call_args_list == [call(False),
call(False), ]
def test_main_dry_run():
with patch('arctic.scripts.arctic_fsck.Arctic') as Arctic, \
patch('arctic.scripts.arctic_fsck.get_mongodb_uri') as get_mongodb_uri, \
patch('arctic.scripts.arctic_fsck.do_db_auth') as do_db_auth:
run_as_main(main, '--host', '%s:%s' % (sentinel.host, sentinel.port),
'-v', '--library', 'sentinel.library', 'sentinel.lib2')
get_mongodb_uri.assert_called_once_with('sentinel.host:sentinel.port')
Arctic.assert_called_once_with(get_mongodb_uri.return_value)
assert do_db_auth.call_count == 0
assert Arctic.return_value.__getitem__.return_value._fsck.call_args_list == [call(True),
call(True), ]
|
import datetime
import logging
import os
import signal
import subprocess
import time
import socket
import re
import shlex
from pkg_resources import resource_string
from .reader import JMeterReader
from ..Console import Plugin as ConsolePlugin
from ..Console import screen as ConsoleScreen
from ...common.interfaces import AggregateResultListener, AbstractInfoWidget, GeneratorPlugin
logger = logging.getLogger(__name__)
class Plugin(GeneratorPlugin):
""" JMeter tank plugin """
SECTION = 'jmeter'
SHUTDOWN_TEST = 'Shutdown'
STOP_TEST_NOW = 'Stop Test'
DISCOVER_PORT_PATTERN = r'Waiting for possible .* message on port (?P<port>\d+)'
def __init__(self, core, cfg, name):
super(Plugin, self).__init__(core, cfg, name)
self.args = None
self.original_jmx = None
self.jtl_file = None
self.ext_log = None
self.ext_levels = ['none', 'errors', 'all']
self.ext_log_file = None
self.jmx = None
self.user_args = None
self.jmeter_path = None
self.jmeter_ver = None
self.jmeter_log = None
self.start_time = time.time()
self.jmeter_buffer_size = None
self.jmeter_udp_port = None
self.shutdown_timeout = None
@staticmethod
def get_key():
return __file__
def get_available_options(self):
return [
"jmx", "args", "jmeter_path", "buffer_size", "buffered_seconds",
"exclude_markers", "shutdown_timeout"
]
def configure(self):
self.original_jmx = self.get_option("jmx")
self.core.add_artifact_file(self.original_jmx, True)
self.jtl_file = self.core.mkstemp('.jtl', 'jmeter_')
self.core.add_artifact_file(self.jtl_file)
self.user_args = self.get_option("args", '')
self.jmeter_path = self.get_option('jmeter_path')
self.jmeter_log = self.core.mkstemp('.log', 'jmeter_')
self.jmeter_ver = self.get_option('jmeter_ver')
self.ext_log = self.get_option('extended_log', self.get_option('ext_log'))
if self.ext_log != 'none':
self.ext_log_file = self.core.mkstemp('.jtl', 'jmeter_ext_')
self.core.add_artifact_file(self.ext_log_file)
self.core.add_artifact_file(self.jmeter_log, True)
self.exclude_markers = set(self.get_option('exclude_markers', []))
self.jmx = self.__add_jmeter_components(
self.original_jmx, self.jtl_file, self.get_option('variables'))
self.core.add_artifact_file(self.jmx)
jmeter_stderr_file = self.core.mkstemp(".log", "jmeter_stdout_stderr_")
self.core.add_artifact_file(jmeter_stderr_file)
self.process_stderr = open(jmeter_stderr_file, 'w')
self.shutdown_timeout = self.get_option('shutdown_timeout', 3)
self.affinity = self.get_option('affinity', '')
def get_reader(self):
if self.reader is None:
self.reader = JMeterReader(self.jtl_file)
return self.reader
def get_stats_reader(self):
if self.stats_reader is None:
self.stats_reader = self.reader.stats_reader
return self.stats_reader
def prepare_test(self):
self.args = [
self.jmeter_path, "-n", "-t", self.jmx, '-j', self.jmeter_log,
'-Jjmeter.save.saveservice.default_delimiter=\\t',
'-Jjmeter.save.saveservice.connect_time=true'
]
self.args += shlex.split(self.user_args)
if self.affinity:
self.core.__setup_affinity(self.affinity, args=self.args)
try:
console = self.core.get_plugin_of_type(ConsolePlugin)
except Exception as ex:
logger.debug("Console not found: %s", ex)
console = None
if console:
widget = JMeterInfoWidget(self)
console.add_info_widget(widget)
self.core.job.aggregator.add_result_listener(widget)
def start_test(self):
logger.info(
"Starting %s with arguments: %s", self.jmeter_path, self.args)
try:
self.process = subprocess.Popen(
self.args,
executable=self.jmeter_path,
preexec_fn=os.setsid,
close_fds=True,
stdout=self.process_stderr,
stderr=self.process_stderr)
except OSError:
logger.debug(
"Unable to start JMeter process. Args: %s, Executable: %s",
self.args,
self.jmeter_path,
exc_info=True)
raise RuntimeError(
"Unable to access to JMeter executable file or it does not exist: %s"
% self.jmeter_path)
self.start_time = time.time()
self.jmeter_udp_port = self.__discover_jmeter_udp_port()
def is_test_finished(self):
retcode = self.process.poll()
aggregator = self.core.job.aggregator
if not aggregator.reader.jmeter_finished and retcode is not None:
logger.info(
"JMeter process finished with exit code: %s, waiting for aggregator",
retcode)
self.retries = 0
aggregator.reader.jmeter_finished = True
return -1
elif aggregator.reader.jmeter_finished is True:
if aggregator.reader.agg_finished:
self.reader.close()
return retcode
else:
logger.info("Waiting for aggregator to finish")
return -1
else:
return -1
def end_test(self, retcode):
if self.process:
gracefully_shutdown = self.__graceful_shutdown()
if not gracefully_shutdown:
self.__kill_jmeter()
if self.process_stderr:
self.process_stderr.close()
self.core.add_artifact_file(self.jmeter_log)
self.reader.close()
return retcode
def __discover_jmeter_udp_port(self):
"""Searching for line in jmeter.log such as
Waiting for possible shutdown message on port 4445
"""
r = re.compile(self.DISCOVER_PORT_PATTERN)
with open(self.process_stderr.name, 'r') as f:
cnt = 0
while self.process.pid and cnt < 10:
line = f.readline()
m = r.match(line)
if m is None:
cnt += 1
time.sleep(1)
else:
port = int(m.group('port'))
return port
else:
logger.warning('JMeter UDP port wasn\'t discovered')
return None
def __kill_jmeter(self):
logger.info(
"Terminating jmeter process group with PID %s",
self.process.pid)
try:
os.killpg(self.process.pid, signal.SIGTERM)
except OSError as exc:
logger.debug("Seems JMeter exited itself: %s", exc)
# Utils.log_stdout_stderr(logger, self.process.stdout, self.process.stderr, "jmeter")
def __add_jmeter_components(self, jmx, jtl, variables):
""" Genius idea by Alexey Lavrenyuk """
logger.debug("Original JMX: %s", os.path.realpath(jmx))
with open(jmx, 'r') as src_jmx:
source_lines = src_jmx.readlines()
try:
# In new Jmeter version (3.2 as example) WorkBench's plugin checkbox enabled by default
# It totally crashes Yandex tank injection and raises XML Parse Exception
closing = source_lines.pop(-1)
if "WorkBenchGui" in source_lines[-5]:
logger.info("WorkBench checkbox enabled...bypassing")
last_string_count = 6
else:
last_string_count = 2
while last_string_count > 0:
closing = source_lines.pop(-1) + closing
last_string_count -= 1
logger.debug("Closing statement: %s", closing)
except Exception as exc:
raise RuntimeError("Failed to find the end of JMX XML: %s" % exc)
udv_tpl = resource_string(__name__, 'config/jmeter_var_template.xml').decode('utf8')
udv_set = []
for var_name, var_value in variables.items():
udv_set.append(udv_tpl % (var_name, var_name, var_value))
udv = "\n".join(udv_set)
if self.jmeter_ver >= 2.13:
save_connect = '<connectTime>true</connectTime>'
else:
save_connect = ''
if self.ext_log in ['errors', 'all']:
level_map = {'errors': 'true', 'all': 'false'}
tpl_resource = 'jmeter_writer_ext.xml'
tpl_args = {
'jtl': self.jtl_file,
'udv': udv,
'ext_log': self.ext_log_file,
'ext_level': level_map[self.ext_log],
'save_connect': save_connect
}
else:
tpl_resource = 'jmeter_writer.xml'
tpl_args = {
'jtl': self.jtl_file,
'udv': udv,
'save_connect': save_connect
}
tpl = resource_string(__name__, 'config/' + tpl_resource).decode('utf8')
try:
new_jmx = self.core.mkstemp(
'.jmx', 'modified_', os.path.dirname(os.path.realpath(jmx)))
except OSError as exc:
logger.debug("Can't create modified jmx near original: %s", exc)
new_jmx = self.core.mkstemp('.jmx', 'modified_')
logger.debug("Modified JMX: %s", new_jmx)
with open(new_jmx, "w") as fh:
fh.write(''.join(source_lines))
fh.write(tpl % tpl_args)
fh.write(closing)
return new_jmx
def __graceful_shutdown(self):
if self.jmeter_udp_port is None:
return False
shutdown_test_started = time.time()
while time.time() - shutdown_test_started < self.shutdown_timeout:
self.__send_udp_message(self.SHUTDOWN_TEST)
if self.process.poll() is not None:
return True
else:
time.sleep(1)
self.log.info('Graceful shutdown failed after %s' % str(time.time() - shutdown_test_started))
stop_test_started = time.time()
while time.time() - stop_test_started < self.shutdown_timeout:
self.__send_udp_message(self.STOP_TEST_NOW)
if self.process.poll() is not None:
return True
else:
time.sleep(1)
self.log.info('Graceful stop failed after {}'.format(time.time() - stop_test_started))
return False
def __send_udp_message(self, message):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(message.encode('utf8'), ('localhost', self.jmeter_udp_port))
class JMeterInfoWidget(AbstractInfoWidget, AggregateResultListener):
""" Right panel widget with JMeter test info """
def __init__(self, jmeter):
AbstractInfoWidget.__init__(self)
self.krutilka = ConsoleScreen.krutilka()
self.jmeter = jmeter
self.active_threads = 0
self.RPS = 0
def get_index(self):
return 0
def on_aggregated_data(self, data, stats):
self.active_threads = stats['metrics']['instances']
self.RPS = data['overall']['interval_real']['len']
def render(self, screen):
jmeter = " JMeter Test %s" % next(self.krutilka)
space = screen.right_panel_width - len(jmeter) - 1
left_spaces = space / 2
right_spaces = space / 2
dur_seconds = int(time.time()) - int(self.jmeter.start_time)
duration = str(datetime.timedelta(seconds=dur_seconds))
template = screen.markup.BG_MAGENTA + '~' * left_spaces + jmeter + ' '
template += '~' * right_spaces + screen.markup.RESET + "\n"
template += " Test Plan: %s\n"
template += " Duration: %s\n"
template += "Active Threads: %s\n"
template += " Responses/s: %s"
data = (
os.path.basename(self.jmeter.original_jmx), duration,
self.active_threads, self.RPS)
return template % data
|
import json
import os
import unittest
from absl import flags
from absl.testing import parameterized
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_benchmarks import netperf_benchmark
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
class NetperfBenchmarkTestCase(parameterized.TestCase, unittest.TestCase):
maxDiff = None
def setUp(self):
super(NetperfBenchmarkTestCase, self).setUp()
# Load data
path = os.path.join(os.path.dirname(__file__),
'..', 'data',
'netperf_results.json')
with open(path) as fp:
stdouts = ['\n'.join(i) for i in json.load(fp)]
self.expected_stdout = [json.dumps(([stdout], [''], [0]))
for stdout in stdouts]
p = mock.patch(vm_util.__name__ + '.ShouldRunOnExternalIpAddress')
self.should_run_external = p.start()
self.addCleanup(p.stop)
p = mock.patch(vm_util.__name__ + '.ShouldRunOnInternalIpAddress')
self.should_run_internal = p.start()
self.addCleanup(p.stop)
FLAGS.netperf_enable_histograms = False
def _ConfigureIpTypes(self, run_external=True, run_internal=True):
self.should_run_external.return_value = run_external
self.should_run_internal.return_value = run_internal
def testHistogramStatsCalculator(self):
histogram = {1: 5, 2: 10, 5: 5}
stats = netperf_benchmark._HistogramStatsCalculator(
histogram, [0, 20, 30, 74, 80, 100])
self.assertEqual(stats['p0'], 1)
self.assertEqual(stats['p20'], 1)
self.assertEqual(stats['p30'], 2)
self.assertEqual(stats['p74'], 2)
self.assertEqual(stats['p80'], 5)
self.assertEqual(stats['p100'], 5)
self.assertLessEqual(abs(stats['stddev'] - 1.538), 0.001)
def testExternalAndInternal(self):
self._ConfigureIpTypes()
vm_spec = mock.MagicMock(spec=benchmark_spec.BenchmarkSpec)
vm_spec.vms = [mock.MagicMock(), mock.MagicMock()]
vm_spec.vms[0].RobustRemoteCommand.side_effect = [
(i, '') for i in self.expected_stdout]
result = netperf_benchmark.Run(vm_spec)
tps = 'transactions_per_second'
mbps = 'Mbits/sec'
self.assertListEqual(
[('TCP_RR_Transaction_Rate', 1405.5, tps),
('TCP_RR_Latency_p50', 683.0, 'us'),
('TCP_RR_Latency_p90', 735.0, 'us'),
('TCP_RR_Latency_p99', 841.0, 'us'),
('TCP_RR_Latency_min', 600.0, 'us'),
('TCP_RR_Latency_max', 900.0, 'us'),
('TCP_RR_Latency_stddev', 783.80, 'us'),
('TCP_RR_Transaction_Rate', 3545.77, tps),
('TCP_RR_Latency_p50', 274.0, 'us'),
('TCP_RR_Latency_p90', 309.0, 'us'),
('TCP_RR_Latency_p99', 371.0, 'us'),
('TCP_RR_Latency_min', 200.0, 'us'),
('TCP_RR_Latency_max', 400.0, 'us'),
('TCP_RR_Latency_stddev', 189.82, 'us'),
('TCP_CRR_Transaction_Rate', 343.35, tps),
('TCP_CRR_Latency_p50', 2048.0, 'us'),
('TCP_CRR_Latency_p90', 2372.0, 'us'),
('TCP_CRR_Latency_p99', 30029.0, 'us'),
('TCP_CRR_Latency_min', 2000.0, 'us'),
('TCP_CRR_Latency_max', 35000.0, 'us'),
('TCP_CRR_Latency_stddev', 8147.88, 'us'),
('TCP_CRR_Transaction_Rate', 1078.07, tps),
('TCP_CRR_Latency_p50', 871.0, 'us'),
('TCP_CRR_Latency_p90', 996.0, 'us'),
('TCP_CRR_Latency_p99', 2224.0, 'us'),
('TCP_CRR_Latency_min', 800.0, 'us'),
('TCP_CRR_Latency_max', 2500.0, 'us'),
('TCP_CRR_Latency_stddev', 551.07, 'us'),
('TCP_STREAM_Throughput', 1187.94, mbps),
('TCP_STREAM_Throughput', 1973.37, 'Mbits/sec'),
('UDP_RR_Transaction_Rate', 1359.71, tps),
('UDP_RR_Latency_p50', 700.0, 'us'),
('UDP_RR_Latency_p90', 757.0, 'us'),
('UDP_RR_Latency_p99', 891.0, 'us'),
('UDP_RR_Latency_min', 600.0, 'us'),
('UDP_RR_Latency_max', 1000.0, 'us'),
('UDP_RR_Latency_stddev', 808.44, 'us'),
('UDP_RR_Transaction_Rate', 3313.49, tps),
('UDP_RR_Latency_p50', 295.0, 'us'),
('UDP_RR_Latency_p90', 330.0, 'us'),
('UDP_RR_Latency_p99', 406.0, 'us'),
('UDP_RR_Latency_min', 200.0, 'us'),
('UDP_RR_Latency_max', 500.0, 'us'),
('UDP_RR_Latency_stddev', 214.64, 'us'),
('UDP_STREAM_Throughput', 1102.42, mbps),
('UDP_STREAM_Throughput', 1802.72, 'Mbits/sec'),
],
[i[:3] for i in result])
external_meta = {'ip_type': 'external'}
internal_meta = {'ip_type': 'internal'}
expected_meta = (([external_meta] * 7 + [internal_meta] * 7) * 2 +
[external_meta, internal_meta] +
[external_meta] * 7 +
[internal_meta] * 7)
for i, meta in enumerate(expected_meta):
self.assertIsInstance(result[i][3], dict)
self.assertDictContainsSubset(meta, result[i][3])
@parameterized.named_parameters(
('no_times_up',
'MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to '
'10.0.0.137 () port 20157 AF_INET : histogram\nrecv_response_timed_n: no'
' response received. errno 110 counter 0\n'),
('has_times_up',
'MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 0 AF_INET to '
'10.0.0.172 () port 20169 AF_INET : histogram\ncatcher: timer popped '
'with times_up != 0\nrecv_response_timed_n: no response received. errno '
'4 counter -1\n'))
def testParseNetperfOutputError(self, output):
with self.assertRaises(
errors.Benchmarks.KnownIntermittentError) as e:
netperf_benchmark.ParseNetperfOutput(output, {}, 'fake_benchmark_name',
False)
self.assertIn('Failed to parse stdout', str(e.exception))
if __name__ == '__main__':
unittest.main()
|
import diamond.collector
import os
class UDPCollector(diamond.collector.Collector):
PROC = [
'/proc/net/snmp'
]
def process_config(self):
super(UDPCollector, self).process_config()
if self.config['allowed_names'] is None:
self.config['allowed_names'] = []
def get_default_config_help(self):
config_help = super(UDPCollector, self).get_default_config_help()
config_help.update({
'allowed_names': 'list of entries to collect, empty to collect all',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UDPCollector, self).get_default_config()
config.update({
'path': 'udp',
'allowed_names': 'InDatagrams, NoPorts, InErrors, ' +
'OutDatagrams, RcvbufErrors, SndbufErrors'
})
return config
def collect(self):
metrics = {}
for filepath in self.PROC:
if not os.access(filepath, os.R_OK):
self.log.error('Permission to access %s denied', filepath)
continue
header = ''
data = ''
# Seek the file for the lines that start with Tcp
file = open(filepath)
if not file:
self.log.error('Failed to open %s', filepath)
continue
while True:
line = file.readline()
# Reached EOF?
if len(line) == 0:
break
# Line has metrics?
if line.startswith("Udp"):
header = line
data = file.readline()
break
file.close()
# No data from the file?
if header == '' or data == '':
self.log.error('%s has no lines with Udp', filepath)
continue
header = header.split()
data = data.split()
for i in xrange(1, len(header)):
metrics[header[i]] = data[i]
for metric_name in metrics.keys():
if ((len(self.config['allowed_names']) > 0 and
metric_name not in self.config['allowed_names'])):
continue
value = metrics[metric_name]
value = self.derivative(metric_name, long(value))
# Publish the metric
self.publish(metric_name, value, 0)
|
from copy import copy
from .lalr_analysis import Shift, Reduce
from .. import Token
from ..exceptions import UnexpectedToken
class ParserPuppet(object):
"""ParserPuppet gives you advanced control over error handling when parsing with LALR.
For a simpler, more streamlined interface, see the ``on_error`` argument to ``Lark.parse()``.
"""
def __init__(self, parser, parser_state, lexer_state):
self.parser = parser
self.parser_state = parser_state
self.lexer_state = lexer_state
def feed_token(self, token):
"""Feed the parser with a token, and advance it to the next state, as if it received it from the lexer.
Note that ``token`` has to be an instance of ``Token``.
"""
return self.parser_state.feed_token(token, token.type == '$END')
def __copy__(self):
"""Create a new puppet with a separate state.
Calls to feed_token() won't affect the old puppet, and vice-versa.
"""
return type(self)(
self.parser,
copy(self.parser_state),
copy(self.lexer_state),
)
def copy(self):
return copy(self)
def __eq__(self, other):
if not isinstance(other, ParserPuppet):
return False
return self.parser_state == other.parser_state and self.lexer_state == other.lexer_state
def as_immutable(self):
p = copy(self)
return ImmutableParserPuppet(p.parser, p.parser_state, p.lexer_state)
def pretty(self):
"""Print the output of ``choices()`` in a way that's easier to read."""
out = ["Puppet choices:"]
for k, v in self.choices().items():
out.append('\t- %s -> %s' % (k, v))
out.append('stack size: %s' % len(self.parser_state.state_stack))
return '\n'.join(out)
def choices(self):
"""Returns a dictionary of token types, matched to their action in the parser.
Only returns token types that are accepted by the current state.
Updated by ``feed_token()``.
"""
return self.parser_state.parse_conf.parse_table.states[self.parser_state.position]
def accepts(self):
accepts = set()
for t in self.choices():
if t.isupper(): # is terminal?
new_puppet = copy(self)
try:
new_puppet.feed_token(Token(t, ''))
except UnexpectedToken:
pass
else:
accepts.add(t)
return accepts
def resume_parse(self):
"""Resume parsing from the current puppet state."""
return self.parser.parse_from_state(self.parser_state)
class ImmutableParserPuppet(ParserPuppet):
result = None
def __hash__(self):
return hash((self.parser_state, self.lexer_state))
def feed_token(self, token):
c = copy(self)
c.result = ParserPuppet.feed_token(c, token)
return c
|
from datetime import timedelta
import logging
from adguardhome import AdGuardHomeConnectionError, AdGuardHomeError
from homeassistant.components.adguard import AdGuardHomeDeviceEntity
from homeassistant.components.adguard.const import (
DATA_ADGUARD_CLIENT,
DATA_ADGUARD_VERION,
DOMAIN,
)
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up AdGuard Home switch based on a config entry."""
adguard = hass.data[DOMAIN][DATA_ADGUARD_CLIENT]
try:
version = await adguard.version()
except AdGuardHomeConnectionError as exception:
raise PlatformNotReady from exception
hass.data[DOMAIN][DATA_ADGUARD_VERION] = version
switches = [
AdGuardHomeProtectionSwitch(adguard),
AdGuardHomeFilteringSwitch(adguard),
AdGuardHomeParentalSwitch(adguard),
AdGuardHomeSafeBrowsingSwitch(adguard),
AdGuardHomeSafeSearchSwitch(adguard),
AdGuardHomeQueryLogSwitch(adguard),
]
async_add_entities(switches, True)
class AdGuardHomeSwitch(AdGuardHomeDeviceEntity, SwitchEntity):
"""Defines a AdGuard Home switch."""
def __init__(
self, adguard, name: str, icon: str, key: str, enabled_default: bool = True
):
"""Initialize AdGuard Home switch."""
self._state = False
self._key = key
super().__init__(adguard, name, icon, enabled_default)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return "_".join(
[DOMAIN, self.adguard.host, str(self.adguard.port), "switch", self._key]
)
@property
def is_on(self) -> bool:
"""Return the state of the switch."""
return self._state
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the switch."""
try:
await self._adguard_turn_off()
except AdGuardHomeError:
_LOGGER.error("An error occurred while turning off AdGuard Home switch")
self._available = False
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the switch."""
try:
await self._adguard_turn_on()
except AdGuardHomeError:
_LOGGER.error("An error occurred while turning on AdGuard Home switch")
self._available = False
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
raise NotImplementedError()
class AdGuardHomeProtectionSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home protection switch."""
def __init__(self, adguard) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, "AdGuard Protection", "mdi:shield-check", "protection"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.disable_protection()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.enable_protection()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.protection_enabled()
class AdGuardHomeParentalSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home parental control switch."""
def __init__(self, adguard) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, "AdGuard Parental Control", "mdi:shield-check", "parental"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.parental.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.parental.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.parental.enabled()
class AdGuardHomeSafeSearchSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home safe search switch."""
def __init__(self, adguard) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, "AdGuard Safe Search", "mdi:shield-check", "safesearch"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.safesearch.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.safesearch.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.safesearch.enabled()
class AdGuardHomeSafeBrowsingSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home safe search switch."""
def __init__(self, adguard) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard, "AdGuard Safe Browsing", "mdi:shield-check", "safebrowsing"
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.safebrowsing.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.safebrowsing.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.safebrowsing.enabled()
class AdGuardHomeFilteringSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home filtering switch."""
def __init__(self, adguard) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(adguard, "AdGuard Filtering", "mdi:shield-check", "filtering")
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.filtering.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.filtering.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.filtering.enabled()
class AdGuardHomeQueryLogSwitch(AdGuardHomeSwitch):
"""Defines a AdGuard Home query log switch."""
def __init__(self, adguard) -> None:
"""Initialize AdGuard Home switch."""
super().__init__(
adguard,
"AdGuard Query Log",
"mdi:shield-check",
"querylog",
enabled_default=False,
)
async def _adguard_turn_off(self) -> None:
"""Turn off the switch."""
await self.adguard.querylog.disable()
async def _adguard_turn_on(self) -> None:
"""Turn on the switch."""
await self.adguard.querylog.enable()
async def _adguard_update(self) -> None:
"""Update AdGuard Home entity."""
self._state = await self.adguard.querylog.enabled()
|
from collections import OrderedDict
import fnmatch
import re
from typing import Any, Dict, Optional, Pattern
from homeassistant.core import split_entity_id
class EntityValues:
"""Class to store entity id based values."""
def __init__(
self,
exact: Optional[Dict] = None,
domain: Optional[Dict] = None,
glob: Optional[Dict] = None,
) -> None:
"""Initialize an EntityConfigDict."""
self._cache: Dict[str, Dict] = {}
self._exact = exact
self._domain = domain
if glob is None:
compiled: Optional[Dict[Pattern[str], Any]] = None
else:
compiled = OrderedDict()
for key, value in glob.items():
compiled[re.compile(fnmatch.translate(key))] = value
self._glob = compiled
def get(self, entity_id: str) -> Dict:
"""Get config for an entity id."""
if entity_id in self._cache:
return self._cache[entity_id]
domain, _ = split_entity_id(entity_id)
result = self._cache[entity_id] = {}
if self._domain is not None and domain in self._domain:
result.update(self._domain[domain])
if self._glob is not None:
for pattern, values in self._glob.items():
if pattern.match(entity_id):
result.update(values)
if self._exact is not None and entity_id in self._exact:
result.update(self._exact[entity_id])
return result
|
import datetime
import logging
import os
import tempfile
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import spark_service
BENCHMARK_NAME = 'spark'
BENCHMARK_CONFIG = """
spark:
description: Run a jar on a spark cluster.
spark_service:
service_type: managed
worker_group:
vm_spec:
GCP:
machine_type: n1-standard-4
boot_disk_size: 500
AWS:
machine_type: m4.xlarge
zone: us-east-1a
vm_count: 2
"""
# This points to a file on the spark cluster.
DEFAULT_CLASSNAME = 'org.apache.spark.examples.SparkPi'
flags.DEFINE_string('spark_jarfile', None,
'If none, use the spark sample jar.')
flags.DEFINE_string('spark_classname', DEFAULT_CLASSNAME,
'Classname to be used')
flags.DEFINE_bool('spark_print_stdout', True, 'Print the standard '
'output of the job')
flags.DEFINE_list('spark_job_arguments', [], 'Arguments to be passed '
'to the class given by spark_classname')
flags.DEFINE_enum('spark_job_type', spark_service.SPARK_JOB_TYPE,
[spark_service.SPARK_JOB_TYPE, spark_service.HADOOP_JOB_TYPE],
'Type of the job to submit.')
FLAGS = flags.FLAGS
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
pass
def Run(benchmark_spec):
"""Executes the given jar on the specified Spark cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
spark_cluster = benchmark_spec.spark_service
jar_start = datetime.datetime.now()
stdout_path = None
results = []
jarfile = (FLAGS.spark_jarfile or
spark_cluster.GetExampleJar(spark_service.SPARK_JOB_TYPE))
try:
if FLAGS.spark_print_stdout:
# We need to get a name for a temporary file, so we create
# a file, then close it, and use that path name.
stdout_file = tempfile.NamedTemporaryFile(suffix='.stdout',
prefix='spark_benchmark',
delete=False)
stdout_path = stdout_file.name
stdout_file.close()
stats = spark_cluster.SubmitJob(jarfile,
FLAGS.spark_classname,
job_arguments=FLAGS.spark_job_arguments,
job_stdout_file=stdout_path,
job_type=FLAGS.spark_job_type)
if not stats[spark_service.SUCCESS]:
raise Exception('Class {0} from jar {1} did not run'.format(
FLAGS.spark_classname, jarfile))
jar_end = datetime.datetime.now()
if stdout_path:
with open(stdout_path, 'r') as f:
logging.info('The output of the job is ' + f.read())
metadata = spark_cluster.GetMetadata()
metadata.update({'jarfile': jarfile,
'class': FLAGS.spark_classname,
'job_arguments': str(FLAGS.spark_job_arguments),
'print_stdout': str(FLAGS.spark_print_stdout)})
results.append(sample.Sample('wall_time',
(jar_end - jar_start).total_seconds(),
'seconds', metadata))
if spark_service.RUNTIME in stats:
results.append(sample.Sample('runtime',
stats[spark_service.RUNTIME],
'seconds', metadata))
if spark_service.WAITING in stats:
results.append(sample.Sample('pending_time',
stats[spark_service.WAITING],
'seconds', metadata))
if not spark_cluster.user_managed:
create_time = (spark_cluster.resource_ready_time -
spark_cluster.create_start_time)
results.append(sample.Sample('cluster_create_time', create_time,
'seconds', metadata))
finally:
if stdout_path and os.path.isfile(stdout_path):
os.remove(stdout_path)
return results
def Cleanup(benchmark_spec):
pass
|
from __future__ import division
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.fpn import FPN
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class DummyExtractor(chainer.Link):
mean = _random_array(np, (3, 1, 1))
def forward(self, x):
n, _, h, w = x.shape
return [
chainer.Variable(_random_array(self.xp, (n, 16, h // 2, w // 2))),
chainer.Variable(_random_array(self.xp, (n, 32, h // 4, w // 4))),
chainer.Variable(_random_array(self.xp, (n, 64, h // 8, w // 8))),
]
class TestFPN(unittest.TestCase):
def setUp(self):
self.link = FPN(
base=DummyExtractor(),
n_base_output=3,
scales=(1 / 2, 1 / 4, 1 / 8))
def test_mean(self):
np.testing.assert_equal(self.link.mean, self.link.base.mean)
def _check_call(self):
x = _random_array(self.link.xp, (2, 3, 32, 32))
hs = self.link(x)
self.assertEqual(len(hs), 3)
for l in range(len(hs)):
self.assertIsInstance(hs[l], chainer.Variable)
self.assertIsInstance(hs[l].array, self.link.xp.ndarray)
self.assertEqual(hs[l].shape, (2, 256, 16 >> l, 16 >> l))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
testing.run_module(__name__, __file__)
|
import os
import tempfile
from contextlib import contextmanager
from sqlalchemy import types
from cryptography.fernet import Fernet, MultiFernet
from flask import current_app
@contextmanager
def mktempfile():
with tempfile.NamedTemporaryFile(delete=False) as f:
name = f.name
try:
yield name
finally:
try:
os.unlink(name)
except OSError as e:
current_app.logger.debug("No file {0}".format(name))
@contextmanager
def mktemppath():
try:
path = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names())
)
yield path
finally:
try:
os.unlink(path)
except OSError as e:
current_app.logger.debug("No file {0}".format(path))
def get_keys():
"""
Gets the encryption keys.
This supports multiple keys to facilitate key rotation. The first
key in the list is used to encrypt. Decryption is attempted with
each key in succession.
:return:
"""
# when running lemur create_config, this code needs to work despite
# the fact that there is not a current_app with a config at that point
keys = current_app.config.get("LEMUR_ENCRYPTION_KEYS", [])
# this function is expected to return a list of keys, but we want
# to let people just specify a single key
if not isinstance(keys, list):
keys = [keys]
# make sure there is no accidental whitespace
keys = [key.strip() for key in keys]
return keys
class Vault(types.TypeDecorator):
"""
A custom SQLAlchemy column type that transparently handles encryption.
This uses the MultiFernet from the cryptography package to facilitate
key rotation. That class handles encryption and signing.
Fernet uses AES in CBC mode with 128-bit keys and PKCS7 padding. It
uses HMAC-SHA256 for ciphertext authentication. Initialization
vectors are generated using os.urandom().
"""
# required by SQLAlchemy. defines the underlying column type
impl = types.LargeBinary
def process_bind_param(self, value, dialect):
"""
Encrypt values on the way into the database.
MultiFernet.encrypt uses the first key in the list.
"""
# we assume that the user's keys are already Fernet keys (32 byte
# keys that have been base64 encoded).
self.keys = [Fernet(key) for key in get_keys()]
if not value:
return
# ensure bytes for fernet
if isinstance(value, str):
value = value.encode("utf-8")
return MultiFernet(self.keys).encrypt(value)
def process_result_value(self, value, dialect):
"""
Decrypt values on the way out of the database.
MultiFernet tries each key until one works.
"""
# we assume that the user's keys are already Fernet keys (32 byte
# keys that have been base64 encoded).
self.keys = [Fernet(key) for key in get_keys()]
# if the value is not a string we aren't going to try to decrypt
# it. this is for the case where the column is null
if not value:
return
return MultiFernet(self.keys).decrypt(value).decode("utf8")
|
from homeassistant.components.abode.const import DOMAIN as ABODE_DOMAIN
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.const import ATTR_ENTITY_ID, STATE_IDLE
from .common import setup_platform
from tests.async_mock import patch
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, CAMERA_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get("camera.test_cam")
assert entry.unique_id == "d0a3a1c316891ceb00c20118aae2a133"
async def test_attributes(hass):
"""Test the camera attributes are correct."""
await setup_platform(hass, CAMERA_DOMAIN)
state = hass.states.get("camera.test_cam")
assert state.state == STATE_IDLE
async def test_capture_image(hass):
"""Test the camera capture image service."""
await setup_platform(hass, CAMERA_DOMAIN)
with patch("abodepy.AbodeCamera.capture") as mock_capture:
await hass.services.async_call(
ABODE_DOMAIN,
"capture_image",
{ATTR_ENTITY_ID: "camera.test_cam"},
blocking=True,
)
await hass.async_block_till_done()
mock_capture.assert_called_once()
async def test_camera_on(hass):
"""Test the camera turn on service."""
await setup_platform(hass, CAMERA_DOMAIN)
with patch("abodepy.AbodeCamera.privacy_mode") as mock_capture:
await hass.services.async_call(
CAMERA_DOMAIN,
"turn_on",
{ATTR_ENTITY_ID: "camera.test_cam"},
blocking=True,
)
await hass.async_block_till_done()
mock_capture.assert_called_once_with(False)
async def test_camera_off(hass):
"""Test the camera turn off service."""
await setup_platform(hass, CAMERA_DOMAIN)
with patch("abodepy.AbodeCamera.privacy_mode") as mock_capture:
await hass.services.async_call(
CAMERA_DOMAIN,
"turn_off",
{ATTR_ENTITY_ID: "camera.test_cam"},
blocking=True,
)
await hass.async_block_till_done()
mock_capture.assert_called_once_with(True)
|
from datetime import timedelta
from aiounifi.api import SOURCE_DATA, SOURCE_EVENT
from aiounifi.events import (
ACCESS_POINT_UPGRADED,
GATEWAY_UPGRADED,
SWITCH_UPGRADED,
WIRED_CLIENT_CONNECTED,
WIRELESS_CLIENT_CONNECTED,
WIRELESS_CLIENT_ROAM,
WIRELESS_CLIENT_ROAMRADIO,
WIRELESS_GUEST_CONNECTED,
WIRELESS_GUEST_ROAM,
WIRELESS_GUEST_ROAMRADIO,
)
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.components.device_tracker.const import SOURCE_TYPE_ROUTER
from homeassistant.core import callback
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.dt as dt_util
from .const import ATTR_MANUFACTURER, DOMAIN as UNIFI_DOMAIN
from .unifi_client import UniFiClient
from .unifi_entity_base import UniFiBase
CLIENT_TRACKER = "client"
DEVICE_TRACKER = "device"
CLIENT_CONNECTED_ATTRIBUTES = [
"_is_guest_by_uap",
"ap_mac",
"authorized",
"essid",
"ip",
"is_11r",
"is_guest",
"noted",
"qos_policy_applied",
"radio",
"radio_proto",
"vlan",
]
CLIENT_STATIC_ATTRIBUTES = [
"hostname",
"mac",
"name",
"oui",
]
DEVICE_UPGRADED = (ACCESS_POINT_UPGRADED, GATEWAY_UPGRADED, SWITCH_UPGRADED)
WIRED_CONNECTION = (WIRED_CLIENT_CONNECTED,)
WIRELESS_CONNECTION = (
WIRELESS_CLIENT_CONNECTED,
WIRELESS_CLIENT_ROAM,
WIRELESS_CLIENT_ROAMRADIO,
WIRELESS_GUEST_CONNECTED,
WIRELESS_GUEST_ROAM,
WIRELESS_GUEST_ROAMRADIO,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up device tracker for UniFi component."""
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.entities[DOMAIN] = {CLIENT_TRACKER: set(), DEVICE_TRACKER: set()}
@callback
def items_added(
clients: set = controller.api.clients, devices: set = controller.api.devices
) -> None:
"""Update the values of the controller."""
if controller.option_track_clients:
add_client_entities(controller, async_add_entities, clients)
if controller.option_track_devices:
add_device_entities(controller, async_add_entities, devices)
for signal in (controller.signal_update, controller.signal_options_update):
controller.listeners.append(async_dispatcher_connect(hass, signal, items_added))
items_added()
@callback
def add_client_entities(controller, async_add_entities, clients):
"""Add new client tracker entities from the controller."""
trackers = []
for mac in clients:
if mac in controller.entities[DOMAIN][UniFiClientTracker.TYPE]:
continue
client = controller.api.clients[mac]
if mac not in controller.wireless_clients:
if not controller.option_track_wired_clients:
continue
elif (
client.essid
and controller.option_ssid_filter
and client.essid not in controller.option_ssid_filter
):
continue
trackers.append(UniFiClientTracker(client, controller))
if trackers:
async_add_entities(trackers)
@callback
def add_device_entities(controller, async_add_entities, devices):
"""Add new device tracker entities from the controller."""
trackers = []
for mac in devices:
if mac in controller.entities[DOMAIN][UniFiDeviceTracker.TYPE]:
continue
device = controller.api.devices[mac]
trackers.append(UniFiDeviceTracker(device, controller))
if trackers:
async_add_entities(trackers)
class UniFiClientTracker(UniFiClient, ScannerEntity):
"""Representation of a network client."""
DOMAIN = DOMAIN
TYPE = CLIENT_TRACKER
def __init__(self, client, controller):
"""Set up tracked client."""
super().__init__(client, controller)
self.schedule_update = False
self.cancel_scheduled_update = None
self._is_connected = False
if client.last_seen:
self._is_connected = (
self.is_wired == client.is_wired
and dt_util.utcnow()
- dt_util.utc_from_timestamp(float(client.last_seen))
< controller.option_detection_time
)
if self._is_connected:
self.schedule_update = True
async def async_will_remove_from_hass(self) -> None:
"""Disconnect object when removed."""
if self.cancel_scheduled_update:
self.cancel_scheduled_update()
await super().async_will_remove_from_hass()
@callback
def async_update_callback(self) -> None:
"""Update the clients state."""
if self.client.last_updated == SOURCE_EVENT:
if (self.is_wired and self.client.event.event in WIRED_CONNECTION) or (
not self.is_wired and self.client.event.event in WIRELESS_CONNECTION
):
self._is_connected = True
self.schedule_update = False
if self.cancel_scheduled_update:
self.cancel_scheduled_update()
self.cancel_scheduled_update = None
# Ignore extra scheduled update from wired bug
elif not self.cancel_scheduled_update:
self.schedule_update = True
elif not self.client.event and self.client.last_updated == SOURCE_DATA:
if self.is_wired == self.client.is_wired:
self._is_connected = True
self.schedule_update = True
if self.schedule_update:
self.schedule_update = False
if self.cancel_scheduled_update:
self.cancel_scheduled_update()
self.cancel_scheduled_update = async_track_point_in_utc_time(
self.hass,
self._make_disconnected,
dt_util.utcnow() + self.controller.option_detection_time,
)
super().async_update_callback()
@callback
def _make_disconnected(self, _):
"""Mark client as disconnected."""
self._is_connected = False
self.cancel_scheduled_update = None
self.async_write_ha_state()
@property
def is_connected(self):
"""Return true if the client is connected to the network."""
if (
not self.is_wired
and self.client.essid
and self.controller.option_ssid_filter
and self.client.essid not in self.controller.option_ssid_filter
):
return False
return self._is_connected
@property
def source_type(self):
"""Return the source type of the client."""
return SOURCE_TYPE_ROUTER
@property
def unique_id(self) -> str:
"""Return a unique identifier for this client."""
return f"{self.client.mac}-{self.controller.site}"
@property
def device_state_attributes(self):
"""Return the client state attributes."""
attributes = {"is_wired": self.is_wired}
if self.is_connected:
for variable in CLIENT_CONNECTED_ATTRIBUTES:
if variable in self.client.raw:
attributes[variable] = self.client.raw[variable]
for variable in CLIENT_STATIC_ATTRIBUTES:
if variable in self.client.raw:
attributes[variable] = self.client.raw[variable]
return attributes
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_track_clients:
await self.remove_item({self.client.mac})
elif self.is_wired:
if not self.controller.option_track_wired_clients:
await self.remove_item({self.client.mac})
elif (
self.controller.option_ssid_filter
and self.client.essid not in self.controller.option_ssid_filter
):
await self.remove_item({self.client.mac})
class UniFiDeviceTracker(UniFiBase, ScannerEntity):
"""Representation of a network infrastructure device."""
DOMAIN = DOMAIN
TYPE = DEVICE_TRACKER
def __init__(self, device, controller):
"""Set up tracked device."""
super().__init__(device, controller)
self._is_connected = device.state == 1
self.cancel_scheduled_update = None
@property
def device(self):
"""Wrap item."""
return self._item
async def async_will_remove_from_hass(self) -> None:
"""Disconnect device object when removed."""
if self.cancel_scheduled_update:
self.cancel_scheduled_update()
await super().async_will_remove_from_hass()
@callback
def async_update_callback(self):
"""Update the devices' state."""
if self.device.last_updated == SOURCE_DATA:
self._is_connected = True
if self.cancel_scheduled_update:
self.cancel_scheduled_update()
self.cancel_scheduled_update = async_track_point_in_utc_time(
self.hass,
self._no_heartbeat,
dt_util.utcnow() + timedelta(seconds=self.device.next_interval + 60),
)
elif (
self.device.last_updated == SOURCE_EVENT
and self.device.event.event in DEVICE_UPGRADED
):
self.hass.async_create_task(self.async_update_device_registry())
return
super().async_update_callback()
@callback
def _no_heartbeat(self, _):
"""No heart beat by device."""
self._is_connected = False
self.cancel_scheduled_update = None
self.async_write_ha_state()
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
return self._is_connected
@property
def source_type(self):
"""Return the source type of the device."""
return SOURCE_TYPE_ROUTER
@property
def name(self) -> str:
"""Return the name of the device."""
return self.device.name or self.device.model
@property
def unique_id(self) -> str:
"""Return a unique identifier for this device."""
return self.device.mac
@property
def available(self) -> bool:
"""Return if controller is available."""
return not self.device.disabled and self.controller.available
@property
def device_info(self):
"""Return a device description for device registry."""
info = {
"connections": {(CONNECTION_NETWORK_MAC, self.device.mac)},
"manufacturer": ATTR_MANUFACTURER,
"model": self.device.model,
"sw_version": self.device.version,
}
if self.device.name:
info["name"] = self.device.name
return info
async def async_update_device_registry(self) -> None:
"""Update device registry."""
device_registry = await self.hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=self.controller.config_entry.entry_id, **self.device_info
)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if self.device.state == 0:
return {}
attributes = {}
if self.device.has_fan:
attributes["fan_level"] = self.device.fan_level
if self.device.overheating:
attributes["overheating"] = self.device.overheating
if self.device.upgradable:
attributes["upgradable"] = self.device.upgradable
return attributes
async def options_updated(self) -> None:
"""Config entry options are updated, remove entity if option is disabled."""
if not self.controller.option_track_devices:
await self.remove_item({self.device.mac})
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import codecs
import contextlib
import copy
import os
import re
import subprocess
import sys
import tempfile
import unittest
from absl import app
from absl import flags
from absl._enum_module import enum
from absl.testing import _bazelize_command
from absl.testing import absltest
from absl.testing import flagsaver
from absl.tests import app_test_helper
import mock
import six
FLAGS = flags.FLAGS
# six.StringIO best reflects the normal behavior of stdout for both py2 and 3.
mock_stdio_type = six.StringIO
_newline_regex = re.compile('(\r\n)|\r')
@contextlib.contextmanager
def patch_main_module_docstring(docstring):
old_doc = sys.modules['__main__'].__doc__
sys.modules['__main__'].__doc__ = docstring
yield
sys.modules['__main__'].__doc__ = old_doc
def _normalize_newlines(s):
return re.sub('(\r\n)|\r', '\n', s)
class UnitTests(absltest.TestCase):
def test_install_exception_handler(self):
with self.assertRaises(TypeError):
app.install_exception_handler(1)
def test_usage(self):
with mock.patch.object(
sys, 'stderr', new=mock_stdio_type()) as mock_stderr:
app.usage()
self.assertIn(__doc__, mock_stderr.getvalue())
# Assert that flags are written to stderr.
self.assertIn('\n --[no]helpfull:', mock_stderr.getvalue())
def test_usage_shorthelp(self):
with mock.patch.object(
sys, 'stderr', new=mock_stdio_type()) as mock_stderr:
app.usage(shorthelp=True)
# Assert that flags are NOT written to stderr.
self.assertNotIn(' --', mock_stderr.getvalue())
def test_usage_writeto_stderr(self):
with mock.patch.object(
sys, 'stdout', new=mock_stdio_type()) as mock_stdout:
app.usage(writeto_stdout=True)
self.assertIn(__doc__, mock_stdout.getvalue())
def test_usage_detailed_error(self):
with mock.patch.object(
sys, 'stderr', new=mock_stdio_type()) as mock_stderr:
app.usage(detailed_error='BAZBAZ')
self.assertIn('BAZBAZ', mock_stderr.getvalue())
def test_usage_exitcode(self):
# The test environment may not have the correct output encoding,
# and we can't really change it once we've started the test,
# so we have to replace it with one that understands unicode.
if six.PY2:
stderr = codecs.getwriter('utf8')(sys.stderr)
else:
stderr = sys.stderr
with mock.patch.object(sys, 'stderr', new=stderr):
try:
app.usage(exitcode=2)
self.fail('app.usage(exitcode=1) should raise SystemExit')
except SystemExit as e:
self.assertEqual(2, e.code)
def test_usage_expands_docstring(self):
with patch_main_module_docstring('Name: %s, %%s'):
with mock.patch.object(
sys, 'stderr', new=mock_stdio_type()) as mock_stderr:
app.usage()
self.assertIn('Name: {}, %s'.format(sys.argv[0]),
mock_stderr.getvalue())
def test_usage_does_not_expand_bad_docstring(self):
with patch_main_module_docstring('Name: %s, %%s, %@'):
with mock.patch.object(
sys, 'stderr', new=mock_stdio_type()) as mock_stderr:
app.usage()
self.assertIn('Name: %s, %%s, %@', mock_stderr.getvalue())
@flagsaver.flagsaver
def test_register_and_parse_flags_with_usage_exits_on_only_check_args(self):
done = app._register_and_parse_flags_with_usage.done
try:
app._register_and_parse_flags_with_usage.done = False
with self.assertRaises(SystemExit):
app._register_and_parse_flags_with_usage(
argv=['./program', '--only_check_args'])
finally:
app._register_and_parse_flags_with_usage.done = done
def test_register_and_parse_flags_with_usage_exits_on_second_run(self):
with self.assertRaises(SystemError):
app._register_and_parse_flags_with_usage()
class FunctionalTests(absltest.TestCase):
"""Functional tests that use runs app_test_helper."""
helper_type = 'pure_python'
def run_helper(self, expect_success,
expected_stdout_substring=None, expected_stderr_substring=None,
arguments=(),
env_overrides=None):
env = os.environ.copy()
env['APP_TEST_HELPER_TYPE'] = self.helper_type
env['PYTHONIOENCODING'] = 'utf8'
if env_overrides:
env.update(env_overrides)
helper = 'absl/tests/app_test_helper_{}'.format(self.helper_type)
process = subprocess.Popen(
[_bazelize_command.get_executable_path(helper)] + list(arguments),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env, universal_newlines=False)
stdout, stderr = process.communicate()
# In Python 2, we can't control the encoding used by universal_newline
# mode, which can cause UnicodeDecodeErrors when subprocess tries to
# conver the bytes to unicode, so we have to decode it manually.
stdout = _normalize_newlines(stdout.decode('utf8'))
stderr = _normalize_newlines(stderr.decode('utf8'))
message = (u'Command: {command}\n'
'Exit Code: {exitcode}\n'
'===== stdout =====\n{stdout}'
'===== stderr =====\n{stderr}'
'=================='.format(
command=' '.join([helper] + list(arguments)),
exitcode=process.returncode,
stdout=stdout or '<no output>\n',
stderr=stderr or '<no output>\n'))
if expect_success:
self.assertEqual(0, process.returncode, msg=message)
else:
self.assertNotEqual(0, process.returncode, msg=message)
if expected_stdout_substring:
self.assertIn(expected_stdout_substring, stdout, message)
if expected_stderr_substring:
self.assertIn(expected_stderr_substring, stderr, message)
return process.returncode, stdout, stderr
def test_help(self):
_, _, stderr = self.run_helper(
False,
arguments=['--help'],
expected_stdout_substring=app_test_helper.__doc__)
self.assertNotIn('--', stderr)
def test_helpfull_basic(self):
self.run_helper(
False,
arguments=['--helpfull'],
# --logtostderr is from absl.logging module.
expected_stdout_substring='--[no]logtostderr')
def test_helpfull_unicode_flag_help(self):
_, stdout, _ = self.run_helper(
False,
arguments=['--helpfull'],
expected_stdout_substring='str_flag_with_unicode_args')
self.assertIn(u'smile:\U0001F604', stdout)
if six.PY2:
# Default values get repr'd, which causes unicode strings to incorrectly
# render with their escaped values.
self.assertIn(repr(u'thumb:\U0001F44D'), stdout)
else:
# In Python 3, the repr() of a unicode string isn't escaped.
self.assertIn(u'thumb:\U0001F44D', stdout)
def test_helpshort(self):
_, _, stderr = self.run_helper(
False,
arguments=['--helpshort'],
expected_stdout_substring=app_test_helper.__doc__)
self.assertNotIn('--', stderr)
def test_custom_main(self):
self.run_helper(
True,
env_overrides={'APP_TEST_CUSTOM_MAIN_FUNC': 'custom_main'},
expected_stdout_substring='Function called: custom_main.')
def test_custom_argv(self):
self.run_helper(
True,
expected_stdout_substring='argv: ./program pos_arg1',
env_overrides={
'APP_TEST_CUSTOM_ARGV': './program --noraise_exception pos_arg1',
'APP_TEST_PRINT_ARGV': '1',
})
def test_gwq_status_file_on_exception(self):
if self.helper_type == 'pure_python':
# Pure python binary does not write to GWQ Status.
return
tmpdir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.run_helper(
False,
arguments=['--raise_exception'],
env_overrides={'GOOGLE_STATUS_DIR': tmpdir})
with open(os.path.join(tmpdir, 'STATUS')) as status_file:
self.assertIn('MyException:', status_file.read())
@unittest.skipIf(six.PY2,
'By default, faulthandler is only available in Python 3.')
def test_faulthandler_dumps_stack_on_sigsegv(self):
return_code, _, _ = self.run_helper(
False,
expected_stderr_substring='app_test_helper.py", line',
arguments=['--faulthandler_sigsegv'])
# sigsegv returns 3 on Windows, and -11 on LINUX/macOS.
expected_return_code = 3 if os.name == 'nt' else -11
self.assertEqual(expected_return_code, return_code)
def test_top_level_exception(self):
self.run_helper(
False,
arguments=['--raise_exception'],
expected_stderr_substring='MyException')
def test_only_check_args(self):
self.run_helper(
True,
arguments=['--only_check_args', '--raise_exception'])
def test_only_check_args_failure(self):
self.run_helper(
False,
arguments=['--only_check_args', '--banana'],
expected_stderr_substring='FATAL Flags parsing error')
def test_usage_error(self):
exitcode, _, _ = self.run_helper(
False,
arguments=['--raise_usage_error'],
expected_stderr_substring=app_test_helper.__doc__)
self.assertEqual(1, exitcode)
def test_usage_error_exitcode(self):
exitcode, _, _ = self.run_helper(
False,
arguments=['--raise_usage_error', '--usage_error_exitcode=88'],
expected_stderr_substring=app_test_helper.__doc__)
self.assertEqual(88, exitcode)
def test_exception_handler(self):
exception_handler_messages = (
'MyExceptionHandler: first\nMyExceptionHandler: second\n')
self.run_helper(
False,
arguments=['--raise_exception'],
expected_stdout_substring=exception_handler_messages)
def test_exception_handler_not_called(self):
_, _, stdout = self.run_helper(True)
self.assertNotIn('MyExceptionHandler', stdout)
def test_print_init_callbacks(self):
_, stdout, _ = self.run_helper(
expect_success=True, arguments=['--print_init_callbacks'])
self.assertIn('before app.run', stdout)
self.assertIn('during real_main', stdout)
class FlagDeepCopyTest(absltest.TestCase):
"""Make sure absl flags are copy.deepcopy() compatible."""
def test_deepcopyable(self):
copy.deepcopy(FLAGS)
# Nothing to assert
class FlagValuesExternalizationTest(absltest.TestCase):
"""Test to make sure FLAGS can be serialized out and parsed back in."""
@flagsaver.flagsaver
def test_nohelp_doesnt_show_help(self):
with self.assertRaisesWithPredicateMatch(SystemExit,
lambda e: e.code == 1):
app.run(
len,
argv=[
'./program', '--nohelp', '--helpshort=false', '--helpfull=0',
'--helpxml=f'
])
@flagsaver.flagsaver
def test_serialize_roundtrip(self):
# Use the global 'FLAGS' as the source, to ensure all the framework defined
# flags will go through the round trip process.
flags.DEFINE_string('testflag', 'testval', 'help', flag_values=FLAGS)
flags.DEFINE_multi_enum('test_multi_enum_flag',
['x', 'y'], ['x', 'y', 'z'],
'Multi enum help.',
flag_values=FLAGS)
class Fruit(enum.Enum):
APPLE = 1
ORANGE = 2
TOMATO = 3
flags.DEFINE_multi_enum_class('test_multi_enum_class_flag',
['APPLE', 'TOMATO'], Fruit,
'Fruit help.',
flag_values=FLAGS)
new_flag_values = flags.FlagValues()
new_flag_values.append_flag_values(FLAGS)
FLAGS.testflag = 'roundtrip_me'
FLAGS.test_multi_enum_flag = ['y', 'z']
FLAGS.test_multi_enum_class_flag = [Fruit.ORANGE, Fruit.APPLE]
argv = ['binary_name'] + FLAGS.flags_into_string().splitlines()
self.assertNotEqual(new_flag_values['testflag'], FLAGS.testflag)
self.assertNotEqual(new_flag_values['test_multi_enum_flag'],
FLAGS.test_multi_enum_flag)
self.assertNotEqual(new_flag_values['test_multi_enum_class_flag'],
FLAGS.test_multi_enum_class_flag)
new_flag_values(argv)
self.assertEqual(new_flag_values.testflag, FLAGS.testflag)
self.assertEqual(new_flag_values.test_multi_enum_flag,
FLAGS.test_multi_enum_flag)
self.assertEqual(new_flag_values.test_multi_enum_class_flag,
FLAGS.test_multi_enum_class_flag)
del FLAGS.testflag
del FLAGS.test_multi_enum_flag
del FLAGS.test_multi_enum_class_flag
if __name__ == '__main__':
absltest.main()
|
import numpy as np
from ...utils import warn
class _LinkViewer(object):
"""Class to link multiple Brain objects."""
def __init__(self, brains, time=True, camera=False, colorbar=True,
picking=False):
self.brains = brains
self.leader = self.brains[0] # select a brain as leader
# check time infos
times = [brain._times for brain in brains]
if time and not all(np.allclose(x, times[0]) for x in times):
warn('stc.times do not match, not linking time')
time = False
if camera:
self.link_cameras()
if time:
# link time sliders
self.link_sliders(
name="time",
callback=self.set_time_point,
event_type="always"
)
# link playback speed sliders
self.link_sliders(
name="playback_speed",
callback=self.set_playback_speed,
event_type="always"
)
# link toggle to start/pause playback
for brain in self.brains:
brain.actions["play"].triggered.disconnect()
brain.actions["play"].triggered.connect(
self.toggle_playback)
# link time course canvas
def _time_func(*args, **kwargs):
for brain in self.brains:
brain.callbacks["time"](*args, **kwargs)
for brain in self.brains:
if brain.show_traces:
brain.mpl_canvas.time_func = _time_func
if picking:
def _func_add(*args, **kwargs):
for brain in self.brains:
brain._add_vertex_glyph2(*args, **kwargs)
brain.plotter.update()
def _func_remove(*args, **kwargs):
for brain in self.brains:
brain._remove_vertex_glyph2(*args, **kwargs)
# save initial picked points
initial_points = dict()
for hemi in ('lh', 'rh'):
initial_points[hemi] = set()
for brain in self.brains:
initial_points[hemi] |= \
set(brain.picked_points[hemi])
# link the viewers
for brain in self.brains:
brain.clear_glyphs()
brain._add_vertex_glyph2 = brain._add_vertex_glyph
brain._add_vertex_glyph = _func_add
brain._remove_vertex_glyph2 = brain._remove_vertex_glyph
brain._remove_vertex_glyph = _func_remove
# link the initial points
for hemi in initial_points.keys():
if hemi in brain._layered_meshes:
mesh = brain._layered_meshes[hemi]._polydata
for vertex_id in initial_points[hemi]:
self.leader._add_vertex_glyph(hemi, mesh, vertex_id)
if colorbar:
fmin = self.leader._data["fmin"]
fmid = self.leader._data["fmid"]
fmax = self.leader._data["fmax"]
for brain in self.brains:
brain.callbacks["fmin"](fmin)
brain.callbacks["fmid"](fmid)
brain.callbacks["fmax"](fmax)
for slider_name in ('fmin', 'fmid', 'fmax'):
func = getattr(self, "set_" + slider_name)
self.link_sliders(
name=slider_name,
callback=func,
event_type="always"
)
def set_fmin(self, value):
for brain in self.brains:
brain.callbacks["fmin"](value)
def set_fmid(self, value):
for brain in self.brains:
brain.callbacks["fmid"](value)
def set_fmax(self, value):
for brain in self.brains:
brain.callbacks["fmax"](value)
def set_time_point(self, value):
for brain in self.brains:
brain.callbacks["time"](value, update_widget=True)
def set_playback_speed(self, value):
for brain in self.brains:
brain.callbacks["playback_speed"](value, update_widget=True)
def toggle_playback(self):
value = self.leader.callbacks["time"].slider_rep.GetValue()
# synchronize starting points before playback
self.set_time_point(value)
for brain in self.brains:
brain.toggle_playback()
def link_sliders(self, name, callback, event_type):
from ..backends._pyvista import _update_slider_callback
for brain in self.brains:
slider = brain.sliders[name]
if slider is not None:
_update_slider_callback(
slider=slider,
callback=callback,
event_type=event_type
)
def link_cameras(self):
from ..backends._pyvista import _add_camera_callback
def _update_camera(vtk_picker, event):
for brain in self.brains:
brain.plotter.update()
camera = self.leader.plotter.camera
_add_camera_callback(camera, _update_camera)
for brain in self.brains:
for renderer in brain.plotter.renderers:
renderer.camera = camera
|
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.homeassistant.triggers import (
numeric_state as numeric_state_trigger,
state as state_trigger,
)
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
CONF_VALUE_TEMPLATE,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import (
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
)
POSITION_TRIGGER_TYPES = {"position", "tilt_position"}
STATE_TRIGGER_TYPES = {"opened", "closed", "opening", "closing"}
POSITION_TRIGGER_SCHEMA = vol.All(
TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(POSITION_TRIGGER_TYPES),
vol.Optional(CONF_ABOVE): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
STATE_TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(STATE_TRIGGER_TYPES),
}
)
TRIGGER_SCHEMA = vol.Any(POSITION_TRIGGER_SCHEMA, STATE_TRIGGER_SCHEMA)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Cover devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
if not state or ATTR_SUPPORTED_FEATURES not in state.attributes:
continue
supported_features = state.attributes[ATTR_SUPPORTED_FEATURES]
supports_open_close = supported_features & (SUPPORT_OPEN | SUPPORT_CLOSE)
# Add triggers for each entity that belongs to this integration
if supports_open_close:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "opened",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "closed",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "opening",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "closing",
}
)
if supported_features & SUPPORT_SET_POSITION:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "position",
}
)
if supported_features & SUPPORT_SET_TILT_POSITION:
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "tilt_position",
}
)
return triggers
async def async_get_trigger_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List trigger capabilities."""
if config[CONF_TYPE] not in ["position", "tilt_position"]:
return {}
return {
"extra_fields": vol.Schema(
{
vol.Optional(CONF_ABOVE, default=0): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
vol.Optional(CONF_BELOW, default=100): vol.All(
vol.Coerce(int), vol.Range(min=0, max=100)
),
}
)
}
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] in STATE_TRIGGER_TYPES:
if config[CONF_TYPE] == "opened":
to_state = STATE_OPEN
elif config[CONF_TYPE] == "closed":
to_state = STATE_CLOSED
elif config[CONF_TYPE] == "opening":
to_state = STATE_OPENING
elif config[CONF_TYPE] == "closing":
to_state = STATE_CLOSING
state_config = {
CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_TO: to_state,
}
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
if config[CONF_TYPE] == "position":
position = "current_position"
if config[CONF_TYPE] == "tilt_position":
position = "current_tilt_position"
min_pos = config.get(CONF_ABOVE, -1)
max_pos = config.get(CONF_BELOW, 101)
value_template = f"{{{{ state.attributes.{position} }}}}"
numeric_state_config = {
CONF_PLATFORM: "numeric_state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
CONF_BELOW: max_pos,
CONF_ABOVE: min_pos,
CONF_VALUE_TEMPLATE: value_template,
}
numeric_state_config = numeric_state_trigger.TRIGGER_SCHEMA(numeric_state_config)
return await numeric_state_trigger.async_attach_trigger(
hass, numeric_state_config, action, automation_info, platform_type="device"
)
|
import asyncio
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple
from aioesphomeapi import (
COMPONENT_TYPE_TO_INFO,
BinarySensorInfo,
CameraInfo,
ClimateInfo,
CoverInfo,
DeviceInfo,
EntityInfo,
EntityState,
FanInfo,
LightInfo,
SensorInfo,
SwitchInfo,
TextSensorInfo,
UserService,
)
import attr
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import HomeAssistantType
if TYPE_CHECKING:
from . import APIClient
DATA_KEY = "esphome"
SAVE_DELAY = 120
# Mapping from ESPHome info type to HA platform
INFO_TYPE_TO_PLATFORM = {
BinarySensorInfo: "binary_sensor",
CameraInfo: "camera",
ClimateInfo: "climate",
CoverInfo: "cover",
FanInfo: "fan",
LightInfo: "light",
SensorInfo: "sensor",
SwitchInfo: "switch",
TextSensorInfo: "sensor",
}
@attr.s
class RuntimeEntryData:
"""Store runtime data for esphome config entries."""
entry_id: str = attr.ib()
client: "APIClient" = attr.ib()
store: Store = attr.ib()
reconnect_task: Optional[asyncio.Task] = attr.ib(default=None)
state: Dict[str, Dict[str, Any]] = attr.ib(factory=dict)
info: Dict[str, Dict[str, Any]] = attr.ib(factory=dict)
# A second list of EntityInfo objects
# This is necessary for when an entity is being removed. HA requires
# some static info to be accessible during removal (unique_id, maybe others)
# If an entity can't find anything in the info array, it will look for info here.
old_info: Dict[str, Dict[str, Any]] = attr.ib(factory=dict)
services: Dict[int, "UserService"] = attr.ib(factory=dict)
available: bool = attr.ib(default=False)
device_info: Optional[DeviceInfo] = attr.ib(default=None)
cleanup_callbacks: List[Callable[[], None]] = attr.ib(factory=list)
disconnect_callbacks: List[Callable[[], None]] = attr.ib(factory=list)
loaded_platforms: Set[str] = attr.ib(factory=set)
platform_load_lock: asyncio.Lock = attr.ib(factory=asyncio.Lock)
@callback
def async_update_entity(
self, hass: HomeAssistantType, component_key: str, key: int
) -> None:
"""Schedule the update of an entity."""
signal = f"esphome_{self.entry_id}_update_{component_key}_{key}"
async_dispatcher_send(hass, signal)
@callback
def async_remove_entity(
self, hass: HomeAssistantType, component_key: str, key: int
) -> None:
"""Schedule the removal of an entity."""
signal = f"esphome_{self.entry_id}_remove_{component_key}_{key}"
async_dispatcher_send(hass, signal)
async def _ensure_platforms_loaded(
self, hass: HomeAssistantType, entry: ConfigEntry, platforms: Set[str]
):
async with self.platform_load_lock:
needed = platforms - self.loaded_platforms
tasks = []
for platform in needed:
tasks.append(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
if tasks:
await asyncio.wait(tasks)
self.loaded_platforms |= needed
async def async_update_static_infos(
self, hass: HomeAssistantType, entry: ConfigEntry, infos: List[EntityInfo]
) -> None:
"""Distribute an update of static infos to all platforms."""
# First, load all platforms
needed_platforms = set()
for info in infos:
for info_type, platform in INFO_TYPE_TO_PLATFORM.items():
if isinstance(info, info_type):
needed_platforms.add(platform)
break
await self._ensure_platforms_loaded(hass, entry, needed_platforms)
# Then send dispatcher event
signal = f"esphome_{self.entry_id}_on_list"
async_dispatcher_send(hass, signal, infos)
@callback
def async_update_state(self, hass: HomeAssistantType, state: EntityState) -> None:
"""Distribute an update of state information to all platforms."""
signal = f"esphome_{self.entry_id}_on_state"
async_dispatcher_send(hass, signal, state)
@callback
def async_update_device_state(self, hass: HomeAssistantType) -> None:
"""Distribute an update of a core device state like availability."""
signal = f"esphome_{self.entry_id}_on_device_update"
async_dispatcher_send(hass, signal)
async def async_load_from_store(self) -> Tuple[List[EntityInfo], List[UserService]]:
"""Load the retained data from store and return de-serialized data."""
restored = await self.store.async_load()
if restored is None:
return [], []
self.device_info = _attr_obj_from_dict(
DeviceInfo, **restored.pop("device_info")
)
infos = []
for comp_type, restored_infos in restored.items():
if comp_type not in COMPONENT_TYPE_TO_INFO:
continue
for info in restored_infos:
cls = COMPONENT_TYPE_TO_INFO[comp_type]
infos.append(_attr_obj_from_dict(cls, **info))
services = []
for service in restored.get("services", []):
services.append(UserService.from_dict(service))
return infos, services
async def async_save_to_store(self) -> None:
"""Generate dynamic data to store and save it to the filesystem."""
store_data = {"device_info": attr.asdict(self.device_info), "services": []}
for comp_type, infos in self.info.items():
store_data[comp_type] = [attr.asdict(info) for info in infos.values()]
for service in self.services.values():
store_data["services"].append(service.to_dict())
self.store.async_delay_save(lambda: store_data, SAVE_DELAY)
def _attr_obj_from_dict(cls, **kwargs):
return cls(**{key: kwargs[key] for key in attr.fields_dict(cls) if key in kwargs})
|
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
from tests.async_mock import Mock
async def get_client(aiohttp_client, validator):
"""Generate a client that hits a view decorated with validator."""
app = web.Application()
app["hass"] = Mock(is_stopping=False)
class TestView(HomeAssistantView):
url = "/"
name = "test"
requires_auth = False
@validator
async def post(self, request, data):
"""Test method."""
return b""
TestView().register(app, app.router)
client = await aiohttp_client(app)
return client
async def test_validator(aiohttp_client):
"""Test the validator."""
client = await get_client(
aiohttp_client, RequestDataValidator(vol.Schema({vol.Required("test"): str}))
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 400
async def test_validator_allow_empty(aiohttp_client):
"""Test the validator with empty data."""
client = await get_client(
aiohttp_client,
RequestDataValidator(
vol.Schema(
{
# Although we allow empty, our schema should still be able
# to validate an empty dict.
vol.Optional("test"): str
}
),
allow_empty=True,
),
)
resp = await client.post("/", json={"test": "bla"})
assert resp.status == 200
resp = await client.post("/", json={"test": 100})
assert resp.status == 400
resp = await client.post("/")
assert resp.status == 200
|
import voluptuous as vol
from homeassistant.components import rpi_pfio
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import CONF_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_INVERT_LOGIC = "invert_logic"
CONF_PORTS = "ports"
CONF_SETTLE_TIME = "settle_time"
DEFAULT_INVERT_LOGIC = False
DEFAULT_SETTLE_TIME = 20
PORT_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_SETTLE_TIME, default=DEFAULT_SETTLE_TIME): cv.positive_int,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_PORTS, default={}): vol.Schema({cv.positive_int: PORT_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PiFace Digital Input devices."""
binary_sensors = []
ports = config.get(CONF_PORTS)
for port, port_entity in ports.items():
name = port_entity.get(CONF_NAME)
settle_time = port_entity[CONF_SETTLE_TIME] / 1000
invert_logic = port_entity[CONF_INVERT_LOGIC]
binary_sensors.append(
RPiPFIOBinarySensor(hass, port, name, settle_time, invert_logic)
)
add_entities(binary_sensors, True)
rpi_pfio.activate_listener(hass)
class RPiPFIOBinarySensor(BinarySensorEntity):
"""Represent a binary sensor that a PiFace Digital Input."""
def __init__(self, hass, port, name, settle_time, invert_logic):
"""Initialize the RPi binary sensor."""
self._port = port
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
self._state = None
def read_pfio(port):
"""Read state from PFIO."""
self._state = rpi_pfio.read_input(self._port)
self.schedule_update_ha_state()
rpi_pfio.edge_detect(hass, self._port, read_pfio, settle_time)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return the state of the entity."""
return self._state != self._invert_logic
def update(self):
"""Update the PFIO state."""
self._state = rpi_pfio.read_input(self._port)
|
import sys
from os import chdir, getcwd
from os.path import join, basename
from tempfile import mkdtemp
from unittest import TestCase
from uuid import uuid4 as uuid
from shutil import rmtree
from shlex import split
from contextlib import contextmanager, nested
from textwrap import dedent
from mock import patch
from git import Repo
from git.cmd import Git
from gitsweep.inspector import Inspector
from gitsweep.deleter import Deleter
from gitsweep.cli import CommandLine
@contextmanager
def cwd_bounce(dir):
"""
Temporarily changes to a directory and changes back in the end.
Where ``dir`` is the directory you wish to change to. When the context
manager exits it will change back to the original working directory.
Context manager will yield the original working directory and make that
available to the context manager's assignment target.
"""
original_dir = getcwd()
try:
chdir(dir)
yield original_dir
finally:
chdir(original_dir)
class GitSweepTestCase(TestCase):
"""
Sets up a Git repository and provides some command to manipulate it.
"""
def setUp(self):
"""
Sets up the Git repository for testing.
The following will be available after :py:method`setUp()` runs.
self.repodir
The absolute filename of the Git repository
self.repo
A ``git.Repo`` object for self.repodir
This will create the root commit in the test repository automaticall.
"""
super(GitSweepTestCase, self).setUp()
repodir = mkdtemp()
self.repodir = repodir
self.repo = Repo.init(repodir)
rootcommit_filename = join(repodir, 'rootcommit')
with open(rootcommit_filename, 'w') as fh:
fh.write('')
self.repo.index.add([basename(rootcommit_filename)])
self.repo.index.commit('Root commit')
# Cache the remote per test
self._remote = None
# Keep track of cloned repositories that track self.repo
self._clone_dirs = []
def tearDown(self):
"""
Remove any created repositories.
"""
rmtree(self.repodir)
for clone in self._clone_dirs:
rmtree(clone)
def assertResults(self, expected, actual):
"""
Assert that output matches expected argument.
"""
expected = dedent(expected).strip()
actual = actual.strip()
self.assertEqual(expected, actual)
def command(self, command):
"""
Runs the Git command in self.repo
"""
args = split(command)
cmd = Git(self.repodir)
cmd.execute(args)
@property
def remote(self):
"""
Clones the test case's repository and tracks it as a remote.
Returns a ``git.Repo`` object.
"""
if not self._remote:
clonedir = mkdtemp()
self._clone_dirs.append(clonedir)
self._remote = Repo.clone(self.repo, clonedir)
# Update in case the remote has changed
self._remote.remotes[0].pull()
return self._remote
def graph(self):
"""
Prints a graph of the git log.
This is used for testing and debugging only.
"""
sys.stdout.write(Git(self.repodir).execute(
['git', 'log', '--graph', '--oneline']))
def make_commit(self):
"""
Makes a random commit in the current branch.
"""
fragment = uuid().hex[:8]
filename = join(self.repodir, fragment)
with open(filename, 'w') as fh:
fh.write(uuid().hex)
self.repo.index.add([basename(filename)])
self.repo.index.commit('Adding {0}'.format(basename(filename)))
class InspectorTestCase(TestCase):
"""
Creates an Inspector object for testing.
"""
def setUp(self):
super(InspectorTestCase, self).setUp()
self._inspector = None
@property
def inspector(self):
"""
Return and optionally create an Inspector from self.remote.
"""
if not self._inspector:
self._inspector = Inspector(self.remote)
return self._inspector
def merged_refs(self, refobjs=False):
"""
Get a list of branch names from merged refs from self.inspector.
By default, it returns a list of branch names. You can return the
actual ``git.RemoteRef`` objects by passing ``refobjs=True``.
"""
refs = self.inspector.merged_refs()
if refobjs:
return refs
return [i.remote_head for i in refs]
class DeleterTestCase(TestCase):
"""
Creates a Deleter object for testing.
"""
def setUp(self):
super(DeleterTestCase, self).setUp()
self._deleter = None
@property
def deleter(self):
"""
Return and optionally create a Deleter from self.remote.
"""
if not self._deleter:
self._deleter = Deleter(self.remote)
return self._deleter
class CommandTestCase(GitSweepTestCase, InspectorTestCase, DeleterTestCase):
"""
Used to test the command-line interface.
"""
def setUp(self):
super(CommandTestCase, self).setUp()
self._commandline = None
self._original_dir = getcwd()
# Change the working directory to our clone
chdir(self.remote.working_dir)
def tearDown(self):
"""
Change back to the original directory.
"""
chdir(self._original_dir)
@property
def cli(self):
"""
Return and optionally create a CommandLine object.
"""
if not self._commandline:
self._commandline = CommandLine([])
return self._commandline
def gscommand(self, command):
"""
Runs the command with the given args.
"""
args = split(command)
self.cli.args = args[1:]
patches = (
patch.object(sys, 'stdout'),
patch.object(sys, 'stderr'))
with nested(*patches):
stdout = sys.stdout
stderr = sys.stderr
try:
self.cli.run()
except SystemExit as se:
pass
stdout = ''.join([i[0][0] for i in stdout.write.call_args_list])
stderr = ''.join([i[0][0] for i in stderr.write.call_args_list])
return (se.code, stdout, stderr)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import tensornetwork as tn
from tensornetwork.matrixproductstates.infinite_mps import InfiniteMPS
import tensorflow as tf
from jax.config import config
@pytest.fixture(
name="backend_dtype_values",
params=[('numpy', np.float64), ('numpy', np.complex128),
('tensorflow', np.float64), ('tensorflow', np.complex128),
('pytorch', np.float64), ('jax', np.float64)])
def backend_dtype(request):
return request.param
def get_random_np(shape, dtype, seed=0):
np.random.seed(seed) #get the same tensors every time you call this function
if dtype is np.complex64:
return np.random.randn(*shape).astype(
np.float32) + 1j * np.random.randn(*shape).astype(np.float32)
if dtype is np.complex128:
return np.random.randn(*shape).astype(
np.float64) + 1j * np.random.randn(*shape).astype(np.float64)
return np.random.randn(*shape).astype(dtype)
@pytest.mark.parametrize("N, pos", [(10, -1), (10, 10)])
def test_infinite_mps_init(backend, N, pos):
D, d = 10, 2
tensors = [np.random.randn(2, d, D)] + [
np.random.randn(D, d, D) for _ in range(N - 2)
] + [np.random.randn(D, d, 1)]
with pytest.raises(ValueError):
InfiniteMPS(tensors, center_position=pos, backend=backend)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_TMeigs(dtype):
D, d, N = 10, 2, 10
imps = InfiniteMPS.random(
d=[d] * N, D=[D] * (N + 1), dtype=dtype, backend='numpy')
eta, l = imps.transfer_matrix_eigs('r')
l2 = imps.unit_cell_transfer_operator('r', l)
np.testing.assert_allclose(eta * l, l2)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("direction", ['left', 'right'])
def test_unitcell_transfer_operator(dtype, direction):
D, d, N = 10, 2, 10
imps = InfiniteMPS.random(
d=[d] * N, D=[D] * (N + 1), dtype=dtype, backend='numpy')
m = imps.backend.randn((D, D), dtype=dtype, seed=10)
res1 = imps.unit_cell_transfer_operator(direction, m)
sites = range(len(imps))
if direction == 'right':
sites = reversed(sites)
for site in sites:
m = imps.apply_transfer_operator(site, direction, m)
np.testing.assert_allclose(m, res1)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_InfiniteMPS_canonicalize(dtype):
D, d, N = 10, 2, 4
imps = InfiniteMPS.random(
d=[d] * N, D=[D] * (N + 1), dtype=dtype, backend='numpy')
imps.canonicalize()
assert imps.check_canonical() < 1E-12
|
import enum
import logging
from typing import List
import bellows.zigbee.application
from zigpy.config import CONF_DEVICE_PATH # noqa: F401 # pylint: disable=unused-import
import zigpy_cc.zigbee.application
import zigpy_deconz.zigbee.application
import zigpy_xbee.zigbee.application
import zigpy_zigate.zigbee.application
import zigpy_znp.zigbee.application
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.cover import DOMAIN as COVER
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.components.fan import DOMAIN as FAN
from homeassistant.components.light import DOMAIN as LIGHT
from homeassistant.components.lock import DOMAIN as LOCK
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.switch import DOMAIN as SWITCH
from .typing import CALLABLE_T
ATTR_ARGS = "args"
ATTR_ATTRIBUTE = "attribute"
ATTR_ATTRIBUTE_ID = "attribute_id"
ATTR_ATTRIBUTE_NAME = "attribute_name"
ATTR_AVAILABLE = "available"
ATTR_CLUSTER_ID = "cluster_id"
ATTR_CLUSTER_TYPE = "cluster_type"
ATTR_COMMAND = "command"
ATTR_COMMAND_TYPE = "command_type"
ATTR_DEVICE_IEEE = "device_ieee"
ATTR_DEVICE_TYPE = "device_type"
ATTR_ENDPOINTS = "endpoints"
ATTR_ENDPOINT_NAMES = "endpoint_names"
ATTR_ENDPOINT_ID = "endpoint_id"
ATTR_IEEE = "ieee"
ATTR_IN_CLUSTERS = "in_clusters"
ATTR_LAST_SEEN = "last_seen"
ATTR_LEVEL = "level"
ATTR_LQI = "lqi"
ATTR_MANUFACTURER = "manufacturer"
ATTR_MANUFACTURER_CODE = "manufacturer_code"
ATTR_MEMBERS = "members"
ATTR_MODEL = "model"
ATTR_NAME = "name"
ATTR_NEIGHBORS = "neighbors"
ATTR_NODE_DESCRIPTOR = "node_descriptor"
ATTR_NWK = "nwk"
ATTR_OUT_CLUSTERS = "out_clusters"
ATTR_POWER_SOURCE = "power_source"
ATTR_PROFILE_ID = "profile_id"
ATTR_QUIRK_APPLIED = "quirk_applied"
ATTR_QUIRK_CLASS = "quirk_class"
ATTR_RSSI = "rssi"
ATTR_SIGNATURE = "signature"
ATTR_TYPE = "type"
ATTR_UNIQUE_ID = "unique_id"
ATTR_VALUE = "value"
ATTR_WARNING_DEVICE_DURATION = "duration"
ATTR_WARNING_DEVICE_MODE = "mode"
ATTR_WARNING_DEVICE_STROBE = "strobe"
ATTR_WARNING_DEVICE_STROBE_DUTY_CYCLE = "duty_cycle"
ATTR_WARNING_DEVICE_STROBE_INTENSITY = "intensity"
BAUD_RATES = [2400, 4800, 9600, 14400, 19200, 38400, 57600, 115200, 128000, 256000]
BINDINGS = "bindings"
CHANNEL_ACCELEROMETER = "accelerometer"
CHANNEL_ANALOG_INPUT = "analog_input"
CHANNEL_ATTRIBUTE = "attribute"
CHANNEL_BASIC = "basic"
CHANNEL_COLOR = "light_color"
CHANNEL_COVER = "window_covering"
CHANNEL_DOORLOCK = "door_lock"
CHANNEL_ELECTRICAL_MEASUREMENT = "electrical_measurement"
CHANNEL_EVENT_RELAY = "event_relay"
CHANNEL_FAN = "fan"
CHANNEL_HUMIDITY = "humidity"
CHANNEL_IAS_WD = "ias_wd"
CHANNEL_IDENTIFY = "identify"
CHANNEL_ILLUMINANCE = "illuminance"
CHANNEL_LEVEL = ATTR_LEVEL
CHANNEL_MULTISTATE_INPUT = "multistate_input"
CHANNEL_OCCUPANCY = "occupancy"
CHANNEL_ON_OFF = "on_off"
CHANNEL_POWER_CONFIGURATION = "power"
CHANNEL_PRESSURE = "pressure"
CHANNEL_SHADE = "shade"
CHANNEL_SMARTENERGY_METERING = "smartenergy_metering"
CHANNEL_TEMPERATURE = "temperature"
CHANNEL_THERMOSTAT = "thermostat"
CHANNEL_ZDO = "zdo"
CHANNEL_ZONE = ZONE = "ias_zone"
CLUSTER_COMMAND_SERVER = "server"
CLUSTER_COMMANDS_CLIENT = "client_commands"
CLUSTER_COMMANDS_SERVER = "server_commands"
CLUSTER_TYPE_IN = "in"
CLUSTER_TYPE_OUT = "out"
COMPONENTS = (
BINARY_SENSOR,
CLIMATE,
COVER,
DEVICE_TRACKER,
FAN,
LIGHT,
LOCK,
SENSOR,
SWITCH,
)
CONF_BAUDRATE = "baudrate"
CONF_DATABASE = "database_path"
CONF_DEVICE_CONFIG = "device_config"
CONF_ENABLE_QUIRKS = "enable_quirks"
CONF_FLOWCONTROL = "flow_control"
CONF_RADIO_TYPE = "radio_type"
CONF_USB_PATH = "usb_path"
CONF_ZIGPY = "zigpy_config"
DATA_DEVICE_CONFIG = "zha_device_config"
DATA_ZHA = "zha"
DATA_ZHA_CONFIG = "config"
DATA_ZHA_BRIDGE_ID = "zha_bridge_id"
DATA_ZHA_CORE_EVENTS = "zha_core_events"
DATA_ZHA_DISPATCHERS = "zha_dispatchers"
DATA_ZHA_GATEWAY = "zha_gateway"
DATA_ZHA_PLATFORM_LOADED = "platform_loaded"
DEBUG_COMP_BELLOWS = "bellows"
DEBUG_COMP_ZHA = "homeassistant.components.zha"
DEBUG_COMP_ZIGPY = "zigpy"
DEBUG_COMP_ZIGPY_CC = "zigpy_cc"
DEBUG_COMP_ZIGPY_DECONZ = "zigpy_deconz"
DEBUG_COMP_ZIGPY_XBEE = "zigpy_xbee"
DEBUG_COMP_ZIGPY_ZIGATE = "zigpy_zigate"
DEBUG_LEVEL_CURRENT = "current"
DEBUG_LEVEL_ORIGINAL = "original"
DEBUG_LEVELS = {
DEBUG_COMP_BELLOWS: logging.DEBUG,
DEBUG_COMP_ZHA: logging.DEBUG,
DEBUG_COMP_ZIGPY: logging.DEBUG,
DEBUG_COMP_ZIGPY_CC: logging.DEBUG,
DEBUG_COMP_ZIGPY_DECONZ: logging.DEBUG,
DEBUG_COMP_ZIGPY_XBEE: logging.DEBUG,
DEBUG_COMP_ZIGPY_ZIGATE: logging.DEBUG,
}
DEBUG_RELAY_LOGGERS = [DEBUG_COMP_ZHA, DEBUG_COMP_ZIGPY]
DEFAULT_RADIO_TYPE = "ezsp"
DEFAULT_BAUDRATE = 57600
DEFAULT_DATABASE_NAME = "zigbee.db"
DISCOVERY_KEY = "zha_discovery_info"
DOMAIN = "zha"
GROUP_ID = "group_id"
GROUP_IDS = "group_ids"
GROUP_NAME = "group_name"
MFG_CLUSTER_ID_START = 0xFC00
POWER_MAINS_POWERED = "Mains"
POWER_BATTERY_OR_UNKNOWN = "Battery or Unknown"
class RadioType(enum.Enum):
"""Possible options for radio type."""
znp = (
"ZNP = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_znp.zigbee.application.ControllerApplication,
)
ezsp = (
"EZSP = Silicon Labs EmberZNet protocol: Elelabs, HUSBZB-1, Telegesis",
bellows.zigbee.application.ControllerApplication,
)
deconz = (
"deCONZ = dresden elektronik deCONZ protocol: ConBee I/II, RaspBee I/II",
zigpy_deconz.zigbee.application.ControllerApplication,
)
ti_cc = (
"Legacy TI_CC = Texas Instruments Z-Stack ZNP protocol: CC253x, CC26x2, CC13x2",
zigpy_cc.zigbee.application.ControllerApplication,
)
zigate = (
"ZiGate = ZiGate Zigbee radios: PiZiGate, ZiGate USB-TTL, ZiGate WiFi",
zigpy_zigate.zigbee.application.ControllerApplication,
)
xbee = (
"XBee = Digi XBee Zigbee radios: Digi XBee Series 2, 2C, 3",
zigpy_xbee.zigbee.application.ControllerApplication,
)
@classmethod
def list(cls) -> List[str]:
"""Return a list of descriptions."""
return [e.description for e in RadioType]
@classmethod
def get_by_description(cls, description: str) -> str:
"""Get radio by description."""
for radio in cls:
if radio.description == description:
return radio.name
raise ValueError
def __init__(self, description: str, controller_cls: CALLABLE_T):
"""Init instance."""
self._desc = description
self._ctrl_cls = controller_cls
@property
def controller(self) -> CALLABLE_T:
"""Return controller class."""
return self._ctrl_cls
@property
def description(self) -> str:
"""Return radio type description."""
return self._desc
REPORT_CONFIG_MAX_INT = 900
REPORT_CONFIG_MAX_INT_BATTERY_SAVE = 10800
REPORT_CONFIG_MIN_INT = 30
REPORT_CONFIG_MIN_INT_ASAP = 1
REPORT_CONFIG_MIN_INT_IMMEDIATE = 0
REPORT_CONFIG_MIN_INT_OP = 5
REPORT_CONFIG_MIN_INT_BATTERY_SAVE = 3600
REPORT_CONFIG_RPT_CHANGE = 1
REPORT_CONFIG_DEFAULT = (
REPORT_CONFIG_MIN_INT,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_ASAP = (
REPORT_CONFIG_MIN_INT_ASAP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_BATTERY_SAVE = (
REPORT_CONFIG_MIN_INT_BATTERY_SAVE,
REPORT_CONFIG_MAX_INT_BATTERY_SAVE,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_IMMEDIATE = (
REPORT_CONFIG_MIN_INT_IMMEDIATE,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
REPORT_CONFIG_OP = (
REPORT_CONFIG_MIN_INT_OP,
REPORT_CONFIG_MAX_INT,
REPORT_CONFIG_RPT_CHANGE,
)
SENSOR_ACCELERATION = "acceleration"
SENSOR_BATTERY = "battery"
SENSOR_ELECTRICAL_MEASUREMENT = CHANNEL_ELECTRICAL_MEASUREMENT
SENSOR_GENERIC = "generic"
SENSOR_HUMIDITY = CHANNEL_HUMIDITY
SENSOR_ILLUMINANCE = CHANNEL_ILLUMINANCE
SENSOR_METERING = "metering"
SENSOR_OCCUPANCY = CHANNEL_OCCUPANCY
SENSOR_OPENING = "opening"
SENSOR_PRESSURE = CHANNEL_PRESSURE
SENSOR_TEMPERATURE = CHANNEL_TEMPERATURE
SENSOR_TYPE = "sensor_type"
SIGNAL_ADD_ENTITIES = "zha_add_new_entities"
SIGNAL_ATTR_UPDATED = "attribute_updated"
SIGNAL_AVAILABLE = "available"
SIGNAL_MOVE_LEVEL = "move_level"
SIGNAL_REMOVE = "remove"
SIGNAL_SET_LEVEL = "set_level"
SIGNAL_STATE_ATTR = "update_state_attribute"
SIGNAL_UPDATE_DEVICE = "{}_zha_update_device"
SIGNAL_GROUP_ENTITY_REMOVED = "group_entity_removed"
SIGNAL_GROUP_MEMBERSHIP_CHANGE = "group_membership_change"
UNKNOWN = "unknown"
UNKNOWN_MANUFACTURER = "unk_manufacturer"
UNKNOWN_MODEL = "unk_model"
WARNING_DEVICE_MODE_STOP = 0
WARNING_DEVICE_MODE_BURGLAR = 1
WARNING_DEVICE_MODE_FIRE = 2
WARNING_DEVICE_MODE_EMERGENCY = 3
WARNING_DEVICE_MODE_POLICE_PANIC = 4
WARNING_DEVICE_MODE_FIRE_PANIC = 5
WARNING_DEVICE_MODE_EMERGENCY_PANIC = 6
WARNING_DEVICE_STROBE_NO = 0
WARNING_DEVICE_STROBE_YES = 1
WARNING_DEVICE_SOUND_LOW = 0
WARNING_DEVICE_SOUND_MEDIUM = 1
WARNING_DEVICE_SOUND_HIGH = 2
WARNING_DEVICE_SOUND_VERY_HIGH = 3
WARNING_DEVICE_STROBE_LOW = 0x00
WARNING_DEVICE_STROBE_MEDIUM = 0x01
WARNING_DEVICE_STROBE_HIGH = 0x02
WARNING_DEVICE_STROBE_VERY_HIGH = 0x03
WARNING_DEVICE_SQUAWK_MODE_ARMED = 0
WARNING_DEVICE_SQUAWK_MODE_DISARMED = 1
ZHA_DISCOVERY_NEW = "zha_discovery_new_{}"
ZHA_GW_MSG = "zha_gateway_message"
ZHA_GW_MSG_DEVICE_FULL_INIT = "device_fully_initialized"
ZHA_GW_MSG_DEVICE_INFO = "device_info"
ZHA_GW_MSG_DEVICE_JOINED = "device_joined"
ZHA_GW_MSG_DEVICE_REMOVED = "device_removed"
ZHA_GW_MSG_GROUP_ADDED = "group_added"
ZHA_GW_MSG_GROUP_INFO = "group_info"
ZHA_GW_MSG_GROUP_MEMBER_ADDED = "group_member_added"
ZHA_GW_MSG_GROUP_MEMBER_REMOVED = "group_member_removed"
ZHA_GW_MSG_GROUP_REMOVED = "group_removed"
ZHA_GW_MSG_LOG_ENTRY = "log_entry"
ZHA_GW_MSG_LOG_OUTPUT = "log_output"
ZHA_GW_MSG_RAW_INIT = "raw_device_initialized"
EFFECT_BLINK = 0x00
EFFECT_BREATHE = 0x01
EFFECT_OKAY = 0x02
EFFECT_DEFAULT_VARIANT = 0x00
|
from .abstract_owm_sensor import AbstractOpenWeatherMapSensor
from .const import (
ATTR_API_THIS_DAY_FORECAST,
DOMAIN,
ENTRY_FORECAST_COORDINATOR,
ENTRY_NAME,
ENTRY_WEATHER_COORDINATOR,
FORECAST_MONITORED_CONDITIONS,
FORECAST_SENSOR_TYPES,
MONITORED_CONDITIONS,
WEATHER_SENSOR_TYPES,
)
from .forecast_update_coordinator import ForecastUpdateCoordinator
from .weather_update_coordinator import WeatherUpdateCoordinator
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up OpenWeatherMap sensor entities based on a config entry."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
name = domain_data[ENTRY_NAME]
weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR]
forecast_coordinator = domain_data[ENTRY_FORECAST_COORDINATOR]
weather_sensor_types = WEATHER_SENSOR_TYPES
forecast_sensor_types = FORECAST_SENSOR_TYPES
entities = []
for sensor_type in MONITORED_CONDITIONS:
unique_id = f"{config_entry.unique_id}-{sensor_type}"
entities.append(
OpenWeatherMapSensor(
name,
unique_id,
sensor_type,
weather_sensor_types[sensor_type],
weather_coordinator,
)
)
for sensor_type in FORECAST_MONITORED_CONDITIONS:
unique_id = f"{config_entry.unique_id}-forecast-{sensor_type}"
entities.append(
OpenWeatherMapForecastSensor(
f"{name} Forecast",
unique_id,
sensor_type,
forecast_sensor_types[sensor_type],
forecast_coordinator,
)
)
async_add_entities(entities)
class OpenWeatherMapSensor(AbstractOpenWeatherMapSensor):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
weather_coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
super().__init__(
name, unique_id, sensor_type, sensor_configuration, weather_coordinator
)
self._weather_coordinator = weather_coordinator
@property
def state(self):
"""Return the state of the device."""
return self._weather_coordinator.data.get(self._sensor_type, None)
class OpenWeatherMapForecastSensor(AbstractOpenWeatherMapSensor):
"""Implementation of an OpenWeatherMap this day forecast sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
forecast_coordinator: ForecastUpdateCoordinator,
):
"""Initialize the sensor."""
super().__init__(
name, unique_id, sensor_type, sensor_configuration, forecast_coordinator
)
self._forecast_coordinator = forecast_coordinator
@property
def state(self):
"""Return the state of the device."""
return self._forecast_coordinator.data[ATTR_API_THIS_DAY_FORECAST].get(
self._sensor_type, None
)
|
from pscript import RawJS
from flexx import flx
# Associate assets needed by this app.
flx.assets.associate_asset(__name__, "http://code.jquery.com/jquery-1.10.2.js")
flx.assets.associate_asset(__name__, "http://code.jquery.com/ui/1.11.4/jquery-ui.js")
flx.assets.associate_asset(__name__,
"http://code.jquery.com/ui/1.11.4/themes/smoothness/jquery-ui.css")
class DatePicker(flx.Widget):
def _create_dom(self):
global window
node = window.document.createElement('input')
RawJS('$')(node).datepicker()
return node
class Example(flx.Widget):
def init(self):
with flx.FormLayout():
self.start = DatePicker(title='Start date')
self.end = DatePicker(title='End date')
flx.Widget(flex=1)
if __name__ == '__main__':
m = flx.launch(Example, 'app')
flx.run()
|
from unittest.mock import patch
import pytest
import zigpy.profiles.zha
import zigpy.zcl.clusters.closures as closures
import zigpy.zcl.clusters.general as general
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.lock import DOMAIN
from homeassistant.const import STATE_LOCKED, STATE_UNAVAILABLE, STATE_UNLOCKED
from .common import async_enable_traffic, find_entity_id, send_attributes_report
from tests.common import mock_coro
LOCK_DOOR = 0
UNLOCK_DOOR = 1
@pytest.fixture
async def lock(hass, zigpy_device_mock, zha_device_joined_restored):
"""Lock cluster fixture."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [closures.DoorLock.cluster_id, general.Basic.cluster_id],
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.DOOR_LOCK,
}
},
)
zha_device = await zha_device_joined_restored(zigpy_device)
return zha_device, zigpy_device.endpoints[1].door_lock
async def test_lock(hass, lock):
"""Test zha lock platform."""
zha_device, cluster = lock
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_UNLOCKED
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the lock was created and that it is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to unlocked
assert hass.states.get(entity_id).state == STATE_UNLOCKED
# set state to locked
await send_attributes_report(hass, cluster, {1: 0, 0: 1, 2: 2})
assert hass.states.get(entity_id).state == STATE_LOCKED
# set state to unlocked
await send_attributes_report(hass, cluster, {1: 0, 0: 2, 2: 3})
assert hass.states.get(entity_id).state == STATE_UNLOCKED
# lock from HA
await async_lock(hass, cluster, entity_id)
# unlock from HA
await async_unlock(hass, cluster, entity_id)
async def async_lock(hass, cluster, entity_id):
"""Test lock functionality from hass."""
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([zcl_f.Status.SUCCESS])
):
# lock via UI
await hass.services.async_call(
DOMAIN, "lock", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.call_args[0][0] is False
assert cluster.request.call_args[0][1] == LOCK_DOOR
async def async_unlock(hass, cluster, entity_id):
"""Test lock functionality from hass."""
with patch(
"zigpy.zcl.Cluster.request", return_value=mock_coro([zcl_f.Status.SUCCESS])
):
# lock via UI
await hass.services.async_call(
DOMAIN, "unlock", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.call_args[0][0] is False
assert cluster.request.call_args[0][1] == UNLOCK_DOOR
|
from absl import flags
from perfkitbenchmarker.linux_benchmarks import coremark_benchmark
FLAGS = flags.FLAGS
BENCHMARK_NAME = coremark_benchmark.BENCHMARK_NAME
BENCHMARK_CONFIG = coremark_benchmark.BENCHMARK_CONFIG
GetConfig = coremark_benchmark.GetConfig
def Prepare(benchmark_spec):
"""Installs coremark on the target VM under Cygwin."""
vm = benchmark_spec.vms[0]
vm.InstallCygwin(packages=['wget', 'gcc-core', 'tar', 'make'])
vm.Install('coremark')
coremark_benchmark.PrepareCoremark(vm.RemoteCommandCygwin)
def Run(benchmark_spec):
"""Runs coremark on the VM under Cygwin.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects with the benchmark results.
"""
vm = benchmark_spec.vms[0]
return coremark_benchmark.RunCoremark(vm.RemoteCommandCygwin,
vm.NumCpusForBenchmark())
def Cleanup(benchmark_spec):
"""Cleans up coremark on the target VM."""
vm = benchmark_spec.vms[0]
coremark_benchmark.CleanupCoremark(vm.RemoteCommandCygwin)
|
from math import log10
from pygal._compat import to_str
from pygal.graph.graph import Graph
from pygal.util import alter, cached_property, decorate, safe_enumerate
from pygal.view import ReverseView, View
class Dot(Graph):
"""Dot graph class"""
def dot(self, serie, r_max):
"""Draw a dot line"""
serie_node = self.svg.serie(serie)
view_values = list(map(self.view, serie.points))
for i, value in safe_enumerate(serie.values):
x, y = view_values[i]
if self.logarithmic:
log10min = log10(self._min) - 1
log10max = log10(self._max or 1)
if value != 0:
size = r_max * ((log10(abs(value)) - log10min) /
(log10max - log10min))
else:
size = 0
else:
size = r_max * (abs(value) / (self._max or 1))
metadata = serie.metadata.get(i)
dots = decorate(
self.svg, self.svg.node(serie_node['plot'], class_="dots"),
metadata
)
alter(
self.svg.node(
dots,
'circle',
cx=x,
cy=y,
r=size,
class_='dot reactive tooltip-trigger' +
(' negative' if value < 0 else '')
), metadata
)
val = self._format(serie, i)
self._tooltip_data(
dots, val, x, y, 'centered', self._get_x_label(i)
)
self._static_value(serie_node, val, x, y, metadata)
def _compute(self):
"""Compute y min and max and y scale and set labels"""
x_len = self._len
y_len = self._order
self._box.xmax = x_len
self._box.ymax = y_len
self._x_pos = [n / 2 for n in range(1, 2 * x_len, 2)]
self._y_pos = [n / 2 for n in reversed(range(1, 2 * y_len, 2))]
for j, serie in enumerate(self.series):
serie.points = [(self._x_pos[i], self._y_pos[j])
for i in range(x_len)]
def _compute_y_labels(self):
self._y_labels = list(
zip(
self.y_labels and map(to_str, self.y_labels) or [
serie.title['title']
if isinstance(serie.title, dict) else serie.title or ''
for serie in self.series
], self._y_pos
)
)
def _set_view(self):
"""Assign a view to current graph"""
view_class = ReverseView if self.inverse_y_axis else View
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
)
@cached_property
def _values(self):
"""Getter for series values (flattened)"""
return [abs(val) for val in super(Dot, self)._values if val != 0]
@cached_property
def _max(self):
"""Getter for the maximum series value"""
return (
self.range[1] if (self.range and self.range[1] is not None) else
(max(map(abs, self._values)) if self._values else None)
)
def _plot(self):
"""Plot all dots for series"""
r_max = min(
self.view.x(1) - self.view.x(0),
(self.view.y(0) or 0) - self.view.y(1)
) / (2 * 1.05)
for serie in self.series:
self.dot(serie, r_max)
|
import urwid
class FibonacciWalker(urwid.ListWalker):
"""ListWalker-compatible class for browsing fibonacci set.
positions returned are (value at position-1, value at position) tuples.
"""
def __init__(self):
self.focus = (0,1)
self.numeric_layout = NumericLayout()
def _get_at_pos(self, pos):
"""Return a widget and the position passed."""
return urwid.Text("%d"%pos[1], layout=self.numeric_layout), pos
def get_focus(self):
return self._get_at_pos(self.focus)
def set_focus(self, focus):
self.focus = focus
self._modified()
def get_next(self, start_from):
a, b = start_from
focus = b, a+b
return self._get_at_pos(focus)
def get_prev(self, start_from):
a, b = start_from
focus = b-a, a
return self._get_at_pos(focus)
def main():
palette = [
('body','black','dark cyan', 'standout'),
('foot','light gray', 'black'),
('key','light cyan', 'black', 'underline'),
('title', 'white', 'black',),
]
footer_text = [
('title', "Fibonacci Set Viewer"), " ",
('key', "UP"), ", ", ('key', "DOWN"), ", ",
('key', "PAGE UP"), " and ", ('key', "PAGE DOWN"),
" move view ",
('key', "Q"), " exits",
]
def exit_on_q(input):
if input in ('q', 'Q'):
raise urwid.ExitMainLoop()
listbox = urwid.ListBox(FibonacciWalker())
footer = urwid.AttrMap(urwid.Text(footer_text), 'foot')
view = urwid.Frame(urwid.AttrWrap(listbox, 'body'), footer=footer)
loop = urwid.MainLoop(view, palette, unhandled_input=exit_on_q)
loop.run()
class NumericLayout(urwid.TextLayout):
"""
TextLayout class for bottom-right aligned numbers
"""
def layout( self, text, width, align, wrap ):
"""
Return layout structure for right justified numbers.
"""
lt = len(text)
r = lt % width # remaining segment not full width wide
if r:
linestarts = range( r, lt, width )
return [
# right-align the remaining segment on 1st line
[(width-r,None),(r, 0, r)]
# fill the rest of the lines
] + [[(width, x, x+width)] for x in linestarts]
else:
linestarts = range( 0, lt, width )
return [[(width, x, x+width)] for x in linestarts]
if __name__=="__main__":
main()
|
from typing import Any, Callable, List
import pyvera as veraApi
from homeassistant.components.cover import (
ATTR_POSITION,
DOMAIN as PLATFORM_DOMAIN,
ENTITY_ID_FORMAT,
CoverEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from . import VeraDevice
from .common import ControllerData, get_controller_data
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up the sensor config entry."""
controller_data = get_controller_data(hass, entry)
async_add_entities(
[
VeraCover(device, controller_data)
for device in controller_data.devices.get(PLATFORM_DOMAIN)
]
)
class VeraCover(VeraDevice[veraApi.VeraCurtain], CoverEntity):
"""Representation a Vera Cover."""
def __init__(
self, vera_device: veraApi.VeraCurtain, controller_data: ControllerData
):
"""Initialize the Vera device."""
VeraDevice.__init__(self, vera_device, controller_data)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def current_cover_position(self) -> int:
"""
Return current position of cover.
0 is closed, 100 is fully open.
"""
position = self.vera_device.get_level()
if position <= 5:
return 0
if position >= 95:
return 100
return position
def set_cover_position(self, **kwargs) -> None:
"""Move the cover to a specific position."""
self.vera_device.set_level(kwargs.get(ATTR_POSITION))
self.schedule_update_ha_state()
@property
def is_closed(self) -> bool:
"""Return if the cover is closed."""
if self.current_cover_position is not None:
return self.current_cover_position == 0
def open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
self.vera_device.open()
self.schedule_update_ha_state()
def close_cover(self, **kwargs: Any) -> None:
"""Close the cover."""
self.vera_device.close()
self.schedule_update_ha_state()
def stop_cover(self, **kwargs: Any) -> None:
"""Stop the cover."""
self.vera_device.stop()
self.schedule_update_ha_state()
|
from django.utils.translation import gettext_lazy as _
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.plugin_base import TransparentContainer
from shop.cascade.plugin_base import ShopPluginBase
class ShopExtendableMixin:
"""
Add this mixin class to the list of ``model_mixins``, in the plugin class wishing to use extensions.
"""
@property
def left_extension(self):
if self.child_plugin_instances is None:
return
result = [cp for cp in self.child_plugin_instances if cp.plugin_type == 'ShopLeftExtension']
if result:
return result[0]
@property
def right_extension(self):
if self.child_plugin_instances is None:
return
result = [cp for cp in self.child_plugin_instances if cp.plugin_type == 'ShopRightExtension']
if result:
return result[0]
class LeftRightExtensionMixin:
"""
Plugin classes wishing to use extensions shall inherit from this class.
"""
@classmethod
def get_child_classes(cls, slot, page, instance=None):
child_classes = ['ShopLeftExtension', 'ShopRightExtension', None]
# allow only one left and one right extension
for child in instance.get_children():
child_classes.remove(child.plugin_type)
return child_classes
class ShopLeftExtension(TransparentContainer, ShopPluginBase):
name = _("Left Extension")
require_parent = True
parent_classes = ('ShopCartPlugin', 'ShopOrderViewsPlugin')
allow_children = True
render_template = 'cascade/generic/wrapper.html'
plugin_pool.register_plugin(ShopLeftExtension)
class ShopRightExtension(TransparentContainer, ShopPluginBase):
name = _("Right Extension")
require_parent = True
parent_classes = ('ShopCartPlugin', 'ShopOrderViewsPlugin')
allow_children = True
render_template = 'cascade/generic/wrapper.html'
plugin_pool.register_plugin(ShopRightExtension)
|
import os
from slacker import Slacker
def list_slack():
"""List channels & users in slack."""
try:
token = os.environ['SLACK_TOKEN']
slack = Slacker(token)
# Get channel list
response = slack.channels.list()
channels = response.body['channels']
for channel in channels:
print(channel['id'], channel['name'])
# if not channel['is_archived']:
# slack.channels.join(channel['name'])
print()
# Get users list
response = slack.users.list()
users = response.body['members']
for user in users:
if not user['deleted']:
print(user['id'], user['name'], user['is_admin'], user[
'is_owner'])
print()
except KeyError as ex:
print('Environment variable %s not set.' % str(ex))
if __name__ == '__main__':
list_slack()
|
from homeassistant.components.cover import ATTR_CURRENT_POSITION
from homeassistant.components.ozw.cover import VALUE_SELECTED_ID
from .common import setup_ozw
VALUE_ID = "Value"
async def test_cover(hass, cover_data, sent_messages, cover_msg):
"""Test setting up config entry."""
receive_message = await setup_ozw(hass, fixture=cover_data)
# Test loaded
state = hass.states.get("cover.roller_shutter_3_instance_1_level")
assert state is not None
assert state.state == "closed"
assert state.attributes[ATTR_CURRENT_POSITION] == 0
# Test opening
await hass.services.async_call(
"cover",
"open_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 99, "ValueIDKey": 625573905}
# Feedback on state
cover_msg.decode()
cover_msg.payload["Value"] = 99
cover_msg.encode()
receive_message(cover_msg)
await hass.async_block_till_done()
state = hass.states.get("cover.roller_shutter_3_instance_1_level")
assert state is not None
assert state.state == "open"
assert state.attributes[ATTR_CURRENT_POSITION] == 100
# Test closing
await hass.services.async_call(
"cover",
"close_cover",
{"entity_id": "cover.roller_shutter_3_instance_1_level"},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 625573905}
# Test setting position
await hass.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 50},
blocking=True,
)
assert len(sent_messages) == 3
msg = sent_messages[2]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 50, "ValueIDKey": 625573905}
# Test converting position to zwave range for position > 0
await hass.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 100},
blocking=True,
)
assert len(sent_messages) == 4
msg = sent_messages[3]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 99, "ValueIDKey": 625573905}
# Test converting position to zwave range for position = 0
await hass.services.async_call(
"cover",
"set_cover_position",
{"entity_id": "cover.roller_shutter_3_instance_1_level", "position": 0},
blocking=True,
)
assert len(sent_messages) == 5
msg = sent_messages[4]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 625573905}
async def test_barrier(hass, cover_gdo_data, sent_messages, cover_gdo_msg):
"""Test setting up config entry."""
receive_message = await setup_ozw(hass, fixture=cover_gdo_data)
# Test loaded
state = hass.states.get("cover.gd00z_4_barrier_state")
assert state is not None
assert state.state == "closed"
# Test opening
await hass.services.async_call(
"cover",
"open_cover",
{"entity_id": "cover.gd00z_4_barrier_state"},
blocking=True,
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 4, "ValueIDKey": 281475083239444}
# Feedback on state
cover_gdo_msg.decode()
cover_gdo_msg.payload[VALUE_ID][VALUE_SELECTED_ID] = 4
cover_gdo_msg.encode()
receive_message(cover_gdo_msg)
await hass.async_block_till_done()
state = hass.states.get("cover.gd00z_4_barrier_state")
assert state is not None
assert state.state == "open"
# Test closing
await hass.services.async_call(
"cover",
"close_cover",
{"entity_id": "cover.gd00z_4_barrier_state"},
blocking=True,
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": 0, "ValueIDKey": 281475083239444}
|
import pytest
from homeassistant.components.owntracks import helper
from tests.async_mock import patch
@pytest.fixture(name="nacl_imported")
def mock_nacl_imported():
"""Mock a successful import."""
with patch("homeassistant.components.owntracks.helper.nacl"):
yield
@pytest.fixture(name="nacl_not_imported")
def mock_nacl_not_imported():
"""Mock non successful import."""
with patch("homeassistant.components.owntracks.helper.nacl", new=None):
yield
def test_supports_encryption(nacl_imported):
"""Test if env supports encryption."""
assert helper.supports_encryption()
def test_supports_encryption_failed(nacl_not_imported):
"""Test if env does not support encryption."""
assert not helper.supports_encryption()
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import translate_point
class TestTranslatePoint(unittest.TestCase):
def test_translate_point_ndarray(self):
point = np.random.uniform(
low=0., high=32., size=(3, 10, 2))
out = translate_point(point, y_offset=3, x_offset=5)
expected = np.empty_like(point)
expected[:, :, 0] = point[:, :, 0] + 3
expected[:, :, 1] = point[:, :, 1] + 5
np.testing.assert_equal(out, expected)
def test_translate_point_list(self):
point = [
np.random.uniform(low=0., high=32., size=(12, 2)),
np.random.uniform(low=0., high=32., size=(10, 2))
]
out = translate_point(point, y_offset=3, x_offset=5)
for i, pnt in enumerate(point):
expected = np.empty_like(pnt)
expected[:, 0] = pnt[:, 0] + 3
expected[:, 1] = pnt[:, 1] + 5
np.testing.assert_equal(out[i], expected)
testing.run_module(__name__, __file__)
|
import asyncio
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST
from homeassistant.core import HomeAssistant
from .const import ALL_PLATFORMS, DOMAIN, UNDO_UPDATE_LISTENER
from .gateway import async_setup_entry_gw
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Plugwise platform."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Plugwise components from a config entry."""
if entry.data.get(CONF_HOST):
return await async_setup_entry_gw(hass, entry)
# PLACEHOLDER USB entry setup
return False
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in ALL_PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
import logging
from threading import Thread
from kalliope.core import SignalModule
from kalliope.core import Utils
from kalliope.signals.mqtt_subscriber.MqttClient import MqttClient
from kalliope.signals.mqtt_subscriber.models import Broker, Topic
CLIENT_ID = "kalliope"
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Mqtt_subscriber(SignalModule, Thread):
def __init__(self, **kwargs):
super(Mqtt_subscriber, self).__init__(**kwargs)
Thread.__init__(self, name=Mqtt_subscriber)
Utils.print_info('[Mqtt_subscriber] Starting manager') # variables
self.list_synapses_with_mqtt = list(super(Mqtt_subscriber, self).get_list_synapse())
self.broker_ip = None
self.topic = None
self.json_message = False
def run(self):
logger.debug("[Mqtt_subscriber] Starting Mqtt_subscriber")
# we need to sort broker URL by ip, then for each broker, we sort by topic and attach synapses name to run to it
list_broker_to_instantiate = self.get_list_broker_to_instantiate(self.list_synapses_with_mqtt)
# now instantiate a MQTT client for each broker object
self.instantiate_mqtt_client(list_broker_to_instantiate)
@staticmethod
def check_parameters(parameters):
"""
overwrite method
receive a dict of parameter from a mqtt_subscriber signal
:param parameters: dict of mqtt_signal_parameters
:return: True if parameters are valid
"""
# check mandatory parameters
mandatory_parameters = ["broker_ip", "topic"]
if not all(key in parameters for key in mandatory_parameters):
return False
return True
@staticmethod
def get_list_broker_to_instantiate(list_synapse_with_mqtt_subscriber):
"""
return a list of Broker object from the given list of synapse
:param list_synapse_with_mqtt_subscriber: list of Synapse object
:return: list of Broker
"""
returned_list_of_broker = list()
for synapse in list_synapse_with_mqtt_subscriber:
for signal in synapse.signals:
# check if the broker exist in the list
if not any(x.broker_ip == signal.parameters["broker_ip"] for x in returned_list_of_broker):
logger.debug("[Mqtt_subscriber] Create new broker: %s" % signal.parameters["broker_ip"])
# create a new broker object
new_broker = Broker()
new_broker.build_from_signal_dict(signal.parameters)
# add the current topic
logger.debug("[Mqtt_subscriber] Add new topic to broker %s: %s" % (new_broker.broker_ip,
signal.parameters["topic"]))
new_topic = Topic()
new_topic.name = signal.parameters["topic"]
if "is_json" in signal.parameters:
logger.debug("[Mqtt_subscriber] Message for the topic %s will be json converted"
% new_topic.name)
new_topic.is_json = bool(signal.parameters["is_json"])
else:
new_topic.is_json = False
# add the current synapse to the topic
new_topic.synapses = list()
new_topic.synapses.append(synapse)
new_broker.topics.append(new_topic)
logger.debug("[Mqtt_subscriber] Add new synapse to topic %s :%s" % (new_topic.name, synapse.name))
returned_list_of_broker.append(new_broker)
else:
# the broker exist. get it from the list of broker
broker_to_edit = next((broker for broker in returned_list_of_broker
if signal.parameters["broker_ip"] == broker.broker_ip))
# check if the topic already exist
if not any(topic.name == signal.parameters["topic"] for topic in broker_to_edit.topics):
new_topic = Topic()
new_topic.name = signal.parameters["topic"]
if "is_json" in signal.parameters:
logger.debug("[Mqtt_subscriber] Message for the topic %s will be json converted"
% new_topic.name)
new_topic.is_json = bool(signal.parameters["is_json"])
else:
new_topic.is_json = False
logger.debug("[Mqtt_subscriber] Add new topic to existing broker "
"%s: %s" % (broker_to_edit.broker_ip, signal.parameters["topic"]))
# add the current synapse to the topic
logger.debug("[Mqtt_subscriber] Add new synapse "
"to topic %s :%s" % (new_topic.name, synapse.name))
new_topic.synapses = list()
new_topic.synapses.append(synapse)
# add the topic to the broker
broker_to_edit.topics.append(new_topic)
else:
# the topic already exist, get it from the list
topic_to_edit = next((topic for topic in broker_to_edit.topics
if topic.name == signal.parameters["topic"]))
# add the synapse
logger.debug("[Mqtt_subscriber] Add synapse %s to existing topic %s "
"in existing broker %s" % (synapse.name,
topic_to_edit.name,
broker_to_edit.broker_ip))
topic_to_edit.synapses.append(synapse)
return returned_list_of_broker
def instantiate_mqtt_client(self, list_broker_to_instantiate):
"""
Instantiate a MqttClient thread for each broker
:param list_broker_to_instantiate: list of broker to run
"""
for broker in list_broker_to_instantiate:
mqtt_client = MqttClient(broker=broker, brain=self.brain)
mqtt_client.start()
|
from flask import Blueprint
from flask_restful import reqparse, Api
from lemur.common.schema import validate_schema
from lemur.common.utils import paginated_parser
from lemur.auth.service import AuthenticatedResource
from lemur.logs.schemas import logs_output_schema
from lemur.logs import service
mod = Blueprint("logs", __name__)
api = Api(mod)
class LogsList(AuthenticatedResource):
""" Defines the 'logs' endpoint """
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(LogsList, self).__init__()
@validate_schema(None, logs_output_schema)
def get(self):
"""
.. http:get:: /logs
The current log list
**Example request**:
.. sourcecode:: http
GET /logs HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: text/javascript
{
"items": [
]
"total": 2
}
:query sortBy: field to sort on
:query sortDir: asc or desc
:query page: int default is 1
:query filter: key value pair format is k;v
:query count: count number default is 10
:reqheader Authorization: OAuth token to authenticate
:statuscode 200: no error
"""
parser = paginated_parser.copy()
parser.add_argument("owner", type=str, location="args")
parser.add_argument("id", type=str, location="args")
args = parser.parse_args()
return service.render(args)
api.add_resource(LogsList, "/logs", endpoint="logs")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.