filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_4882 | #!/usr/bin/env python3
import os
import subprocess
import pypact as pp
import matplotlib.pyplot as plt
do_collapse = True
show_plot = True
group = 709
inventory = [('Fe', 1.0)]
# files file
def createfiles():
nuclear_data_base = os.getenv('NUCLEAR_DATA', os.path.join(os.sep, 'opt', 'fispact', 'nuclear_data'))
ff = pp.FilesFile(base_dir=nuclear_data_base)
ff.setXS('TENDL2015')
ff.setFissionYield('GEFY52')
ff.setProbTab('TENDL2015')
ff.setDecay('DECAY')
ff.setRegulatory('DECAY')
ff.setGammaAbsorb('DECAY')
for invalid in ff.invalidpaths():
print("FilesFile:: missing file: {}".format(invalid))
return ff
# input file
def createinput():
id = pp.InputData()
id.overwriteExisting()
id.enableJSON()
id.approxGammaSpectrum()
if do_collapse:
id.readXSData(group)
id.readDecayData()
id.enableSystemMonitor(False)
id.enableHalflifeInOutput()
id.enableHazardsInOutput()
id.setProjectile(pp.PROJECTILE_NEUTRON)
id.enableInitialInventoryInOutput()
id.setLogLevel(pp.LOG_SEVERITY_ERROR)
id.setAtomsThreshold(1.0e-3)
id.setDensity(7.875)
id.setMass(1.0e-3)
for e, r in inventory:
id.addElement(e, percentage=r*100.0)
id.addIrradiation(300.0, 1.1e15)
id.addCooling(10.0)
id.addCooling(100.0)
id.addCooling(1000.0)
id.addCooling(10000.0)
id.addCooling(100000.0)
id.validate()
return id
# fluxes file
def createflux():
# set monoenergetic flux at 14 MeV for group 709
flux = pp.FluxesFile(name="14 MeV (almost) monoenergetic", norm=1.0)
flux.setGroup(group)
flux.setValue(12.0e6, 0.1)
flux.setValue(13.0e6, 0.4)
flux.setValue(14.0e6, 1.0)
flux.validate()
return flux
# perform analysis on the output
def analyse(output):
# plot the final inventory ignoring the initial elements
elements = {}
ignore_elements = list(map(list, zip(*inventory)))[0]
if len(output) == 0:
print("No valid inventory output, exiting")
exit
for n in output[-1].nuclides:
if n.element not in ignore_elements:
if n.element in elements:
elements[n.element] += n.grams
else:
elements[n.element] = n.grams
total_grams = sum([g for e, g in elements.items()])
for e, g in elements.items():
print("{} {:.2f}%".format(e, g*100.0/total_grams))
# we must rescale the values
elements[e] = g/total_grams
labels, values = list(zip(*(list(elements.items()))))
if show_plot:
plt.pie(list(values), labels=list(labels), autopct='%2.2f%%', shadow=False)
plt.show()
# main script
input = createinput()
files = createfiles()
fluxes = createflux()
output = pp.compute(input, files, fluxes)
analyse(output)
|
the-stack_0_4883 | #
# * The source code in this file is developed independently by NEC Corporation.
#
# # NLCPy License #
#
# Copyright (c) 2020-2021 NEC Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither NEC Corporation nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division, absolute_import, print_function
from numpy.testing import assert_array_almost_equal
import numpy as np
import nlcpy as ny
def test_me_case_1():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a)
ans_ny = ny.cov(ny_a)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_2():
np_a = np.array([-2.1, -1, 4.3])
ny_a = ny.array([-2.1, -1, 4.3])
ans_np = np.cov(np_a)
ans_ny = ny.cov(ny_a)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_3():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
np_y = np.array([2, 1, 1, 8, 9, 4, 3, 5, 7])
ny_y = ny.array([2, 1, 1, 8, 9, 4, 3, 5, 7])
ans_np = np.cov(np_a, np_y)
ans_ny = ny.cov(ny_a, ny_y)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_4():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a.T, rowvar=False)
ans_ny = ny.cov(ny_a.T, rowvar=False)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_5():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a.T, rowvar=True)
ans_ny = ny.cov(ny_a.T, rowvar=True)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_6():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, bias=False)
ans_ny = ny.cov(ny_a, bias=False)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_7():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, bias=True)
ans_ny = ny.cov(ny_a, bias=True)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_8():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=None)
ans_ny = ny.cov(ny_a, ddof=None)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_9():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=0)
ans_ny = ny.cov(ny_a, ddof=0)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_10():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=1)
ans_ny = ny.cov(ny_a, ddof=1)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def test_me_case_11():
np_a = np.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ny_a = ny.array([[1, 2, 1, 9, 10, 3, 2, 6, 7],
[2, 1, 8, 3, 7, 5, 10, 7, 2]])
ans_np = np.cov(np_a, ddof=2)
ans_ny = ny.cov(ny_a, ddof=2)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def testt_me_case_12():
np_a = np.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
np_y = np.array([1, 2, 2, 1, 1, 1, 1])
ny_a = ny.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ny_y = ny.array([1, 2, 2, 1, 1, 1, 1])
ans_np = np.cov(np_a, fweights=np_y)
ans_ny = ny.cov(ny_a, fweights=ny_y)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def testt_me_case_13():
np_a = np.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ny_a = ny.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ans_np = np.cov(np_a, aweights=None)
ans_ny = ny.cov(ny_a, aweights=None)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
def testt_me_case_14():
np_a = np.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
ny_a = ny.array([[10, 5, 2, 4, 9, 3, 2], [10, 2, 8, 3, 7, 4, 1]])
np_w = np.array([0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1])
ny_w = ny.array([0.1, 0.2, 0.2, 0.2, 0.1, 0.1, 0.1])
ans_np = np.cov(np_a, aweights=np_w)
ans_ny = ny.cov(ny_a, aweights=ny_w)
print("numpy={} nlcpy={}".format(ans_np, ans_ny))
assert_array_almost_equal(ans_np, ans_ny.get())
|
the-stack_0_4886 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
from collections import OrderedDict
from itertools import product
from io import StringIO
import sys
import pyparsing
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import (
CategoricalHyperparameter,
UniformIntegerHyperparameter,
UniformFloatHyperparameter,
NumericalHyperparameter,
Constant,
IntegerHyperparameter,
NormalIntegerHyperparameter,
NormalFloatHyperparameter,
)
from ConfigSpace.conditions import (
EqualsCondition,
NotEqualsCondition,
InCondition,
AndConjunction,
OrConjunction,
ConditionComponent,
)
from ConfigSpace.forbidden import (
ForbiddenEqualsClause,
ForbiddenAndConjunction,
ForbiddenInClause,
AbstractForbiddenComponent,
MultipleValueForbiddenClause,
)
# Build pyparsing expressions for params
pp_param_name = pyparsing.Word(
pyparsing.alphanums + "_" + "-" + "@" + "." + ":" + ";" + "\\" + "/" + "?" + "!"
+ "$" + "%" + "&" + "*" + "+" + "<" + ">")
pp_digits = "0123456789"
pp_plusorminus = pyparsing.Literal('+') | pyparsing.Literal('-')
pp_int = pyparsing.Combine(pyparsing.Optional(pp_plusorminus) + pyparsing.Word(pp_digits))
pp_float = pyparsing.Combine(
pyparsing.Optional(pp_plusorminus) + pyparsing.Optional(pp_int) + "." + pp_int
)
pp_eorE = pyparsing.Literal('e') | pyparsing.Literal('E')
pp_floatorint = pp_float | pp_int
pp_e_notation = pyparsing.Combine(pp_floatorint + pp_eorE + pp_int)
pp_number = pp_e_notation | pp_float | pp_int
pp_numberorname = pp_number | pp_param_name
pp_il = pyparsing.Word("il")
pp_choices = pp_param_name + pyparsing.Optional(pyparsing.OneOrMore("," + pp_param_name))
pp_cont_param = pp_param_name + "[" + pp_number + "," + pp_number + "]" + \
"[" + pp_number + "]" + pyparsing.Optional(pp_il)
pp_cat_param = pp_param_name + "{" + pp_choices + "}" + "[" + pp_param_name + "]"
pp_condition = pp_param_name + "|" + pp_param_name + "in" + "{" + pp_choices + "}"
pp_forbidden_clause = "{" + pp_param_name + "=" + pp_numberorname + \
pyparsing.Optional(pyparsing.OneOrMore("," + pp_param_name + "=" + pp_numberorname)) + "}"
def build_categorical(param):
if param.probabilities is not None:
raise ValueError('The pcs format does not support categorical hyperparameters with '
'assigend weights/probabilities (for hyperparameter %s)' % param.name)
cat_template = "%s {%s} [%s]"
return cat_template % (param.name,
", ".join([str(value) for value in param.choices]),
str(param.default_value))
def build_constant(param):
constant_template = "%s {%s} [%s]"
return constant_template % (param.name, param.value, param.value)
def build_continuous(param):
if type(param) in (NormalIntegerHyperparameter,
NormalFloatHyperparameter):
param = param.to_uniform()
float_template = "%s%s [%s, %s] [%s]"
int_template = "%s%s [%d, %d] [%d]i"
if param.log:
float_template += "l"
int_template += "l"
if param.q is not None:
q_prefix = "Q%d_" % (int(param.q),)
else:
q_prefix = ""
default_value = param.default_value
if isinstance(param, IntegerHyperparameter):
default_value = int(default_value)
return int_template % (q_prefix, param.name, param.lower,
param.upper, default_value)
else:
return float_template % (q_prefix, param.name, str(param.lower),
str(param.upper), str(default_value))
def build_condition(condition):
if not isinstance(condition, ConditionComponent):
raise TypeError("build_condition must be called with an instance of "
"'%s', got '%s'" %
(ConditionComponent, type(condition)))
# Check if SMAC can handle the condition
if isinstance(condition, OrConjunction):
raise NotImplementedError("SMAC cannot handle OR conditions: %s" %
(condition))
if isinstance(condition, NotEqualsCondition):
raise NotImplementedError("SMAC cannot handle != conditions: %s" %
(condition))
# Now handle the conditions SMAC can handle
condition_template = "%s | %s in {%s}"
if isinstance(condition, AndConjunction):
return '\n'.join([
build_condition(cond) for cond in condition.components
])
elif isinstance(condition, InCondition):
return condition_template % (condition.child.name,
condition.parent.name,
", ".join(condition.values))
elif isinstance(condition, EqualsCondition):
return condition_template % (condition.child.name,
condition.parent.name,
condition.value)
else:
raise NotImplementedError(condition)
def build_forbidden(clause):
if not isinstance(clause, AbstractForbiddenComponent):
raise TypeError("build_forbidden must be called with an instance of "
"'%s', got '%s'" %
(AbstractForbiddenComponent, type(clause)))
if not isinstance(clause, (ForbiddenEqualsClause, ForbiddenAndConjunction)):
raise NotImplementedError("SMAC cannot handle '%s' of type %s" %
str(clause), (type(clause)))
retval = StringIO()
retval.write("{")
# Really simple because everything is an AND-conjunction of equals
# conditions
dlcs = clause.get_descendant_literal_clauses()
for dlc in dlcs:
if retval.tell() > 1:
retval.write(", ")
retval.write("%s=%s" % (dlc.hyperparameter.name, dlc.value))
retval.write("}")
retval.seek(0)
return retval.getvalue()
def read(pcs_string, debug=False):
"""
Read in a :py:class:`~ConfigSpace.configuration_space.ConfigurationSpace`
definition from a pcs file.
Example
-------
.. testsetup:: pcs_test
from ConfigSpace import ConfigurationSpace
import ConfigSpace.hyperparameters as CSH
from ConfigSpace.read_and_write import pcs
cs = ConfigurationSpace()
cs.add_hyperparameter(CSH.CategoricalHyperparameter('a', choices=[1, 2, 3]))
with open('configspace.pcs', 'w') as f:
f.write(pcs.write(cs))
.. doctest:: pcs_test
>>> from ConfigSpace.read_and_write import pcs
>>> with open('configspace.pcs', 'r') as fh:
... deserialized_conf = pcs.read(fh)
Parameters
----------
pcs_string : str
ConfigSpace definition in pcs format
debug : bool
Provides debug information. Defaults to False.
Returns
-------
:py:class:`~ConfigSpace.configuration_space.ConfigurationSpace`
The deserialized ConfigurationSpace object
"""
configuration_space = ConfigurationSpace()
conditions = []
forbidden = []
# some statistics
ct = 0
cont_ct = 0
cat_ct = 0
line_ct = 0
for line in pcs_string:
line_ct += 1
if "#" in line:
# It contains a comment
pos = line.find("#")
line = line[:pos]
# Remove quotes and whitespaces at beginning and end
line = line.replace('"', "").replace("'", "")
line = line.strip()
if "|" in line:
# It's a condition
try:
c = pp_condition.parseString(line)
conditions.append(c)
except pyparsing.ParseException:
raise NotImplementedError("Could not parse condition: %s" % line)
continue
if "}" not in line and "]" not in line:
continue
if line.startswith("{") and line.endswith("}"):
forbidden.append(line)
continue
if len(line.strip()) == 0:
continue
ct += 1
param = None
create = {"int": UniformIntegerHyperparameter,
"float": UniformFloatHyperparameter,
"categorical": CategoricalHyperparameter}
try:
param_list = pp_cont_param.parseString(line)
il = param_list[9:]
if len(il) > 0:
il = il[0]
param_list = param_list[:9]
name = param_list[0]
lower = float(param_list[2])
upper = float(param_list[4])
paramtype = "int" if "i" in il else "float"
log = True if "l" in il else False
default_value = float(param_list[7])
param = create[paramtype](name=name, lower=lower, upper=upper,
q=None, log=log, default_value=default_value)
cont_ct += 1
except pyparsing.ParseException:
pass
try:
param_list = pp_cat_param.parseString(line)
name = param_list[0]
choices = [c for c in param_list[2:-4:2]]
default_value = param_list[-2]
param = create["categorical"](name=name, choices=choices,
default_value=default_value)
cat_ct += 1
except pyparsing.ParseException:
pass
if param is None:
raise NotImplementedError("Could not parse: %s" % line)
configuration_space.add_hyperparameter(param)
for clause in forbidden:
# TODO test this properly!
# TODO Add a try/catch here!
# noinspection PyUnusedLocal
param_list = pp_forbidden_clause.parseString(clause)
tmp_list = []
clause_list = []
for value in param_list[1:]:
if len(tmp_list) < 3:
tmp_list.append(value)
else:
# So far, only equals is supported by SMAC
if tmp_list[1] == '=':
# TODO maybe add a check if the hyperparameter is
# actually in the configuration space
clause_list.append(ForbiddenEqualsClause(
configuration_space.get_hyperparameter(tmp_list[0]),
tmp_list[2]))
else:
raise NotImplementedError()
tmp_list = []
configuration_space.add_forbidden_clause(ForbiddenAndConjunction(
*clause_list))
# Now handle conditions
# If there are two conditions for one child, these two conditions are an
# AND-conjunction of conditions, thus we have to connect them
conditions_per_child = OrderedDict()
for condition in conditions:
child_name = condition[0]
if child_name not in conditions_per_child:
conditions_per_child[child_name] = list()
conditions_per_child[child_name].append(condition)
for child_name in conditions_per_child:
condition_objects = []
for condition in conditions_per_child[child_name]:
child = configuration_space.get_hyperparameter(child_name)
parent_name = condition[2]
parent = configuration_space.get_hyperparameter(parent_name)
restrictions = condition[5:-1:2]
# TODO: cast the type of the restriction!
if len(restrictions) == 1:
condition = EqualsCondition(child, parent, restrictions[0])
else:
condition = InCondition(child, parent, values=restrictions)
condition_objects.append(condition)
# Now we have all condition objects for this child, so we can build a
# giant AND-conjunction of them (if number of conditions >= 2)!
if len(condition_objects) > 1:
and_conjunction = AndConjunction(*condition_objects)
configuration_space.add_condition(and_conjunction)
else:
configuration_space.add_condition(condition_objects[0])
return configuration_space
def write(configuration_space):
"""
Create a string representation of a
:class:`~ConfigSpace.configuration_space.ConfigurationSpace` in pcs format.
This string can be written to file.
Example
-------
.. doctest::
>>> import ConfigSpace as CS
>>> import ConfigSpace.hyperparameters as CSH
>>> from ConfigSpace.read_and_write import pcs
>>> cs = CS.ConfigurationSpace()
>>> cs.add_hyperparameter(CSH.CategoricalHyperparameter('a', choices=[1, 2, 3]))
a, Type: Categorical, Choices: {1, 2, 3}, Default: 1
<BLANKLINE>
>>> with open('configspace.pcs', 'w') as fh:
... fh.write(pcs.write(cs))
15
Parameters
----------
configuration_space : :py:class:`~ConfigSpace.configuration_space.ConfigurationSpace`
a configuration space
Returns
-------
str
The string representation of the configuration space
"""
if not isinstance(configuration_space, ConfigurationSpace):
raise TypeError("pcs_parser.write expects an instance of %s, "
"you provided '%s'" % (ConfigurationSpace, type(configuration_space)))
param_lines = StringIO()
condition_lines = StringIO()
forbidden_lines = []
for hyperparameter in configuration_space.get_hyperparameters():
# Check if the hyperparameter names are valid SMAC names!
try:
pp_param_name.parseString(hyperparameter.name)
except pyparsing.ParseException:
raise ValueError(
"Illegal hyperparameter name for SMAC: %s" % hyperparameter.name)
# First build params
if param_lines.tell() > 0:
param_lines.write("\n")
if isinstance(hyperparameter, NumericalHyperparameter):
param_lines.write(build_continuous(hyperparameter))
elif isinstance(hyperparameter, CategoricalHyperparameter):
param_lines.write(build_categorical(hyperparameter))
elif isinstance(hyperparameter, Constant):
param_lines.write(build_constant(hyperparameter))
else:
raise TypeError("Unknown type: %s (%s)" % (
type(hyperparameter), hyperparameter))
for condition in configuration_space.get_conditions():
if condition_lines.tell() > 0:
condition_lines.write("\n")
condition_lines.write(build_condition(condition))
for forbidden_clause in configuration_space.get_forbiddens():
# Convert in-statement into two or more equals statements
dlcs = forbidden_clause.get_descendant_literal_clauses()
# First, get all in statements and convert them to equal statements
in_statements = []
other_statements = []
for dlc in dlcs:
if isinstance(dlc, MultipleValueForbiddenClause):
if not isinstance(dlc, ForbiddenInClause):
raise ValueError("SMAC cannot handle this forbidden "
"clause: %s" % dlc)
in_statements.append(
[ForbiddenEqualsClause(dlc.hyperparameter, value)
for value in dlc.values])
else:
other_statements.append(dlc)
# Second, create the product of all elements in the IN statements,
# create a ForbiddenAnd and add all ForbiddenEquals
if len(in_statements) > 0:
for i, p in enumerate(product(*in_statements)):
all_forbidden_clauses = list(p) + other_statements
f = ForbiddenAndConjunction(*all_forbidden_clauses)
forbidden_lines.append(build_forbidden(f))
else:
forbidden_lines.append(build_forbidden(forbidden_clause))
if condition_lines.tell() > 0:
condition_lines.seek(0)
param_lines.write("\n\n")
for line in condition_lines:
param_lines.write(line)
if len(forbidden_lines) > 0:
forbidden_lines.sort()
param_lines.write("\n\n")
for line in forbidden_lines:
param_lines.write(line)
param_lines.write("\n")
# Check if the default configuration is a valid configuration!
param_lines.seek(0)
return param_lines.getvalue()
if __name__ == "__main__":
fh = open(sys.argv[1])
orig_pcs = fh.readlines()
sp = read(orig_pcs, debug=True)
created_pcs = write(sp).split("\n")
print("============== Writing Results")
print("#Lines: ", len(created_pcs))
print("#LostLines: ", len(orig_pcs) - len(created_pcs))
diff = ["%s\n" % i for i in created_pcs if i not in " ".join(orig_pcs)]
print("Identical Lines: ", len(created_pcs) - len(diff))
print()
print("Up to 10 random different lines (of %d):" % len(diff))
print("".join(diff[:10]))
|
the-stack_0_4888 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 Germain Z. <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# Add vi/vim-like modes to WeeChat.
#
import csv
import os
import re
import subprocess
from StringIO import StringIO
import time
import weechat
# Script info.
# ============
SCRIPT_NAME = "vimode"
SCRIPT_AUTHOR = "GermainZ <[email protected]>"
SCRIPT_VERSION = "0.5"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = ("Add vi/vim-like modes and keybindings to WeeChat.")
# Global variables.
# =================
# General.
# --------
# Halp! Halp! Halp!
GITHUB_BASE = "https://github.com/GermainZ/weechat-vimode/blob/master/"
README_URL = GITHUB_BASE + "README.md"
FAQ_KEYBINDINGS = GITHUB_BASE + "FAQ#problematic-key-bindings.md"
FAQ_ESC = GITHUB_BASE + "FAQ.md#esc-key-not-being-detected-instantly"
# Holds the text of the command-line mode (currently only Ex commands ":").
cmd_text = ""
# Mode we're in. One of INSERT, NORMAL or REPLACE.
mode = "INSERT"
# Holds normal commands (e.g. "dd").
vi_buffer = ""
# See `cb_key_combo_default()`.
esc_pressed = 0
# See `cb_key_pressed()`.
last_signal_time = 0
# See `start_catching_keys()` for more info.
catching_keys_data = {'amount': 0}
# Used for ; and , to store the last f/F/t/T motion.
last_search_motion = {'motion': None, 'data': None}
# Script options.
vimode_settings = {'no_warn': ("off", "don't warn about problematic"
"keybindings and tmux/screen")}
# Regex patterns.
# ---------------
WHITESPACE = re.compile(r"\s")
IS_KEYWORD = re.compile(r"[a-zA-Z0-9_@À-ÿ]")
REGEX_MOTION_LOWERCASE_W = re.compile(r"\b\S|(?<=\s)\S")
REGEX_MOTION_UPPERCASE_W = re.compile(r"(?<=\s)\S")
REGEX_MOTION_UPPERCASE_E = re.compile(r"\S(?!\S)")
REGEX_MOTION_UPPERCASE_B = REGEX_MOTION_UPPERCASE_E
REGEX_MOTION_G_UPPERCASE_E = REGEX_MOTION_UPPERCASE_W
REGEX_MOTION_CARRET = re.compile(r"\S")
REGEX_INT = r"[0-9]"
# Regex used to detect problematic keybindings.
# For example: meta-wmeta-s is bound by default to ``/window swap``.
# If the user pressed Esc-w, WeeChat will detect it as meta-w and will not
# send any signal to `cb_key_combo_default()` just yet, since it's the
# beginning of a known key combo.
# Instead, `cb_key_combo_default()` will receive the Esc-ws signal, which
# becomes "ws" after removing the Esc part, and won't know how to handle it.
REGEX_PROBLEMATIC_KEYBINDINGS = re.compile(r"meta-\w(meta|ctrl)")
# Vi commands.
# ------------
# See Also: `cb_exec_cmd()`.
VI_COMMANDS = {'h': "/help",
'qall': "/exit",
'q': "/close",
'w': "/save",
'set': "/set",
'bp': "/buffer -1",
'bn': "/buffer +1",
'bd': "/close",
'b#': "/input jump_last_buffer_displayed",
'b': "/buffer",
'sp': "/window splith",
'vsp': "/window splitv"}
# Vi operators.
# -------------
# Each operator must have a corresponding function, called "operator_X" where
# X is the operator. For example: `operator_c()`.
VI_OPERATORS = ["c", "d", "y"]
# Vi motions.
# -----------
# Vi motions. Each motion must have a corresponding function, called
# "motion_X" where X is the motion (e.g. `motion_w()`).
# See Also: `SPECIAL_CHARS`.
VI_MOTIONS = ["w", "e", "b", "^", "$", "h", "l", "W", "E", "B", "f", "F", "t",
"T", "ge", "gE", "0"]
# Special characters for motions. The corresponding function's name is
# converted before calling. For example, "^" will call `motion_carret` instead
# of `motion_^` (which isn't allowed because of illegal characters).
SPECIAL_CHARS = {'^': "carret",
'$': "dollar"}
# Methods for vi operators, motions and key bindings.
# ===================================================
# Documented base examples:
# -------------------------
def operator_base(buf, input_line, pos1, pos2, overwrite):
"""Operator method example.
Args:
buf (str): pointer to the current WeeChat buffer.
input_line (str): the content of the input line.
pos1 (int): the starting position of the motion.
pos2 (int): the ending position of the motion.
overwrite (bool, optional): whether the character at the cursor's new
position should be overwritten or not (for inclusive motions).
Defaults to False.
Notes:
Should be called "operator_X", where X is the operator, and defined in
`VI_OPERATORS`.
Must perform actions (e.g. modifying the input line) on its own,
using the WeeChat API.
See Also:
For additional examples, see `operator_d()` and
`operator_y()`.
"""
# Get start and end positions.
start = min(pos1, pos2)
end = max(pos1, pos2)
# Print the text the operator should go over.
weechat.prnt("", "Selection: %s" % input_line[start:end])
def motion_base(input_line, cur, count):
"""Motion method example.
Args:
input_line (str): the content of the input line.
cur (int): the position of the cursor.
count (int): the amount of times to multiply or iterate the action.
Returns:
A tuple containing three values:
int: the new position of the cursor.
bool: True if the motion is inclusive, False otherwise.
bool: True if the motion is catching, False otherwise.
See `start_catching_keys()` for more info on catching motions.
Notes:
Should be called "motion_X", where X is the motion, and defined in
`VI_MOTIONS`.
Must not modify the input line directly.
See Also:
For additional examples, see `motion_w()` (normal motion) and
`motion_f()` (catching motion).
"""
# Find (relative to cur) position of next number.
pos = get_pos(input_line, REGEX_INT, cur, True, count)
# Return the new (absolute) cursor position.
# This motion is exclusive, so overwrite is False.
return cur + pos, False
def key_base(buf, input_line, cur, count):
"""Key method example.
Args:
buf (str): pointer to the current WeeChat buffer.
input_line (str): the content of the input line.
cur (int): the position of the cursor.
count (int): the amount of times to multiply or iterate the action.
Notes:
Should be called `key_X`, where X represents the key(s), and defined
in `VI_KEYS`.
Must perform actions on its own (using the WeeChat API).
See Also:
For additional examples, see `key_a()` (normal key) and
`key_r()` (catching key).
"""
# Key was pressed. Go to Insert mode (similar to "i").
set_mode("INSERT")
# Operators:
# ----------
def operator_d(buf, input_line, pos1, pos2, overwrite=False):
"""Delete text from `pos1` to `pos2` from the input line.
If `overwrite` is set to True, the character at the cursor's new position
is removed as well (the motion is inclusive).
See Also:
`operator_base()`.
"""
start = min(pos1, pos2)
end = max(pos1, pos2)
if overwrite:
end += 1
input_line = list(input_line)
del input_line[start:end]
input_line = "".join(input_line)
weechat.buffer_set(buf, "input", input_line)
set_cur(buf, input_line, pos1)
def operator_c(buf, input_line, pos1, pos2, overwrite=False):
"""Delete text from `pos1` to `pos2` from the input and enter Insert mode.
If `overwrite` is set to True, the character at the cursor's new position
is removed as well (the motion is inclusive.)
See Also:
`operator_base()`.
"""
operator_d(buf, input_line, pos1, pos2, overwrite)
set_mode("INSERT")
def operator_y(buf, input_line, pos1, pos2, _):
"""Yank text from `pos1` to `pos2` from the input line.
See Also:
`operator_base()`.
"""
start = min(pos1, pos2)
end = max(pos1, pos2)
proc = subprocess.Popen(["xclip", "-selection", "c"],
stdin=subprocess.PIPE)
proc.communicate(input=input_line[start:end])
# Motions:
# --------
def motion_0(input_line, cur, count):
"""Go to the first character of the line.
See Also;
`motion_base()`.
"""
return 0, False, False
def motion_w(input_line, cur, count):
"""Go `count` words forward and return position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_LOWERCASE_W, cur, True, count)
if pos == -1:
return len(input_line), False, False
return cur + pos, False, False
def motion_W(input_line, cur, count):
"""Go `count` WORDS forward and return position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_W, cur, True, count)
if pos == -1:
return len(input_line), False, False
return cur + pos, False, False
def motion_e(input_line, cur, count):
"""Go to the end of `count` words and return position.
See Also:
`motion_base()`.
"""
for _ in range(max(1, count)):
found = False
pos = cur
for pos in range(cur + 1, len(input_line) - 1):
# Whitespace, keep going.
if WHITESPACE.match(input_line[pos]):
pass
# End of sequence made from 'iskeyword' characters only,
# or end of sequence made from non 'iskeyword' characters only.
elif ((IS_KEYWORD.match(input_line[pos]) and
(not IS_KEYWORD.match(input_line[pos + 1]) or
WHITESPACE.match(input_line[pos + 1]))) or
(not IS_KEYWORD.match(input_line[pos]) and
(IS_KEYWORD.match(input_line[pos + 1]) or
WHITESPACE.match(input_line[pos + 1])))):
found = True
cur = pos
break
# We're at the character before the last and we still found nothing.
# Go to the last character.
if not found:
cur = pos + 1
return cur, True, False
def motion_E(input_line, cur, count):
"""Go to the end of `count` WORDS and return cusor position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_UPPERCASE_E, cur, True, count)
if pos == -1:
return len(input_line), False, False
return cur + pos, True, False
def motion_b(input_line, cur, count):
"""Go `count` words backwards and return position.
See Also:
`motion_base()`.
"""
# "b" is just "e" on inverted data (e.g. "olleH" instead of "Hello").
pos_inv = motion_e(input_line[::-1], len(input_line) - cur - 1, count)[0]
pos = len(input_line) - pos_inv - 1
return pos, True, False
def motion_B(input_line, cur, count):
"""Go `count` WORDS backwards and return position.
See Also:
`motion_base()`.
"""
new_cur = len(input_line) - cur
pos = get_pos(input_line[::-1], REGEX_MOTION_UPPERCASE_B, new_cur,
count=count)
if pos == -1:
return 0, False, False
pos = len(input_line) - (pos + new_cur + 1)
return pos, True, False
def motion_ge(input_line, cur, count):
"""Go to end of `count` words backwards and return position.
See Also:
`motion_base()`.
"""
# "ge is just "w" on inverted data (e.g. "olleH" instead of "Hello").
pos_inv = motion_w(input_line[::-1], len(input_line) - cur - 1, count)[0]
pos = len(input_line) - pos_inv - 1
return pos, True, False
def motion_gE(input_line, cur, count):
"""Go to end of `count` WORDS backwards and return position.
See Also:
`motion_base()`.
"""
new_cur = len(input_line) - cur - 1
pos = get_pos(input_line[::-1], REGEX_MOTION_G_UPPERCASE_E, new_cur,
True, count)
if pos == -1:
return 0, False, False
pos = len(input_line) - (pos + new_cur + 1)
return pos, True, False
def motion_h(input_line, cur, count):
"""Go `count` characters to the left and return position.
See Also:
`motion_base()`.
"""
return max(0, cur - max(count, 1)), False, False
def motion_l(input_line, cur, count):
"""Go `count` characters to the right and return position.
See Also:
`motion_base()`.
"""
return cur + max(count, 1), False, False
def motion_carret(input_line, cur, count):
"""Go to first non-blank character of line and return position.
See Also:
`motion_base()`.
"""
pos = get_pos(input_line, REGEX_MOTION_CARRET, 0)
return pos, False, False
def motion_dollar(input_line, cur, count):
"""Go to end of line and return position.
See Also:
`motion_base()`.
"""
pos = len(input_line)
return pos, False, False
def motion_f(input_line, cur, count):
"""Go to `count`'th occurence of character and return position.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_f", input_line, cur, count)
def cb_motion_f(update_last=True):
"""Callback for `motion_f()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
pos = get_pos(catching_keys_data['input_line'], re.escape(pattern),
catching_keys_data['cur'], True,
catching_keys_data['count'])
catching_keys_data['new_cur'] = max(0, pos) + catching_keys_data['cur']
if update_last:
last_search_motion = {'motion': "f", 'data': pattern}
cb_key_combo_default(None, None, "")
def motion_F(input_line, cur, count):
"""Go to `count`'th occurence of char to the right and return position.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_F", input_line, cur, count)
def cb_motion_F(update_last=True):
"""Callback for `motion_F()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
cur = len(catching_keys_data['input_line']) - catching_keys_data['cur']
pos = get_pos(catching_keys_data['input_line'][::-1],
re.escape(pattern),
cur,
False,
catching_keys_data['count'])
catching_keys_data['new_cur'] = catching_keys_data['cur'] - max(0, pos + 1)
if update_last:
last_search_motion = {'motion': "F", 'data': pattern}
cb_key_combo_default(None, None, "")
def motion_t(input_line, cur, count):
"""Go to `count`'th occurence of char and return position.
The position returned is the position of the character to the left of char.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_t", input_line, cur, count)
def cb_motion_t(update_last=True):
"""Callback for `motion_t()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
pos = get_pos(catching_keys_data['input_line'], re.escape(pattern),
catching_keys_data['cur'] + 1,
True, catching_keys_data['count'])
pos += 1
if pos > 0:
catching_keys_data['new_cur'] = pos + catching_keys_data['cur'] - 1
else:
catching_keys_data['new_cur'] = catching_keys_data['cur']
if update_last:
last_search_motion = {'motion': "t", 'data': pattern}
cb_key_combo_default(None, None, "")
def motion_T(input_line, cur, count):
"""Go to `count`'th occurence of char to the left and return position.
The position returned is the position of the character to the right of
char.
See Also:
`motion_base()`.
"""
return start_catching_keys(1, "cb_motion_T", input_line, cur, count)
def cb_motion_T(update_last=True):
"""Callback for `motion_T()`.
Args:
update_last (bool, optional): should `last_search_motion` be updated?
Set to False when calling from `key_semicolon()` or `key_comma()`
so that the last search motion isn't overwritten.
Defaults to True.
See Also:
`start_catching_keys()`.
"""
global last_search_motion
pattern = catching_keys_data['keys']
pos = get_pos(catching_keys_data['input_line'][::-1], re.escape(pattern),
(len(catching_keys_data['input_line']) -
(catching_keys_data['cur'] + 1)) + 1,
True, catching_keys_data['count'])
pos += 1
if pos > 0:
catching_keys_data['new_cur'] = catching_keys_data['cur'] - pos + 1
else:
catching_keys_data['new_cur'] = catching_keys_data['cur']
if update_last:
last_search_motion = {'motion': "T", 'data': pattern}
cb_key_combo_default(None, None, "")
# Keys:
# -----
def key_cc(buf, input_line, cur, count):
"""Delete line and start Insert mode.
See Also:
`key_base()`.
"""
weechat.command("", "/input delete_line")
set_mode("INSERT")
def key_C(buf, input_line, cur, count):
"""Delete from cursor to end of line and start Insert mode.
See Also:
`key_base()`.
"""
weechat.command("", "/input delete_end_of_line")
set_mode("INSERT")
def key_yy(buf, input_line, cur, count):
"""Yank line.
See Also:
`key_base()`.
"""
proc = subprocess.Popen(["xclip", "-selection", "c"],
stdin=subprocess.PIPE)
proc.communicate(input=input_line)
def key_i(buf, input_line, cur, count):
"""Start Insert mode.
See Also:
`key_base()`.
"""
set_mode("INSERT")
def key_a(buf, input_line, cur, count):
"""Move cursor one character to the right and start Insert mode.
See Also:
`key_base()`.
"""
set_cur(buf, input_line, cur + 1, False)
set_mode("INSERT")
def key_A(buf, input_line, cur, count):
"""Move cursor to end of line and start Insert mode.
See Also:
`key_base()`.
"""
set_cur(buf, input_line, len(input_line), False)
set_mode("INSERT")
def key_I(buf, input_line, cur, count):
"""Move cursor to first non-blank character and start Insert mode.
See Also:
`key_base()`.
"""
pos, _, _ = motion_carret(input_line, cur, 0)
set_cur(buf, input_line, pos)
set_mode("INSERT")
def key_G(buf, input_line, cur, count):
"""Scroll to specified line or bottom of buffer.
See Also:
`key_base()`.
"""
if count > 0:
# This is necessary to prevent weird scroll jumps.
weechat.command("", "/window scroll_top")
weechat.command("", "/window scroll %s" % (count - 1))
else:
weechat.command("", "/window scroll_bottom")
def key_r(buf, input_line, cur, count):
"""Replace `count` characters under the cursor.
See Also:
`key_base()`.
"""
start_catching_keys(1, "cb_key_r", input_line, cur, count, buf)
def cb_key_r():
"""Callback for `key_r()`.
See Also:
`start_catching_keys()`.
"""
global catching_keys_data
input_line = list(catching_keys_data['input_line'])
count = max(catching_keys_data['count'], 1)
cur = catching_keys_data['cur']
if cur + count <= len(input_line):
for _ in range(count):
input_line[cur] = catching_keys_data['keys']
cur += 1
input_line = "".join(input_line)
weechat.buffer_set(catching_keys_data['buf'], "input", input_line)
set_cur(catching_keys_data['buf'], input_line, cur - 1)
catching_keys_data = {'amount': 0}
def key_R(buf, input_line, cur, count):
"""Start Replace mode.
See Also:
`key_base()`.
"""
set_mode("REPLACE")
def key_tilda(buf, input_line, cur, count):
"""Switch the case of `count` characters under the cursor.
See Also:
`key_base()`.
"""
input_line = list(input_line)
count = max(1, count)
while count and cur < len(input_line):
input_line[cur] = input_line[cur].swapcase()
count -= 1
cur += 1
input_line = "".join(input_line)
weechat.buffer_set(buf, "input", input_line)
set_cur(buf, input_line, cur)
def key_alt_j(buf, input_line, cur, count):
"""Go to WeeChat buffer.
Called to preserve WeeChat's alt-j buffer switching.
This is only called when alt-j<num> is pressed after pressing Esc, because
\x01\x01j is received in key_combo_default which becomes \x01j after
removing the detected Esc key.
If Esc isn't the last pressed key, \x01j<num> is directly received in
key_combo_default.
"""
start_catching_keys(2, "cb_key_alt_j", input_line, cur, count)
def cb_key_alt_j():
"""Callback for `key_alt_j()`.
See Also:
`start_catching_keys()`.
"""
global catching_keys_data
weechat.command("", "/buffer " + catching_keys_data['keys'])
catching_keys_data = {'amount': 0}
def key_semicolon(buf, input_line, cur, count, swap=False):
"""Repeat last f, t, F, T `count` times.
Args:
swap (bool, optional): if True, the last motion will be repeated in the
opposite direction (e.g. "f" instead of "F"). Defaults to False.
See Also:
`key_base()`.
"""
global catching_keys_data, vi_buffer
catching_keys_data = ({'amount': 0,
'input_line': input_line,
'cur': cur,
'keys': last_search_motion['data'],
'count': count,
'new_cur': 0,
'buf': buf})
# Swap the motion's case if called from key_comma.
if swap:
motion = last_search_motion['motion'].swapcase()
else:
motion = last_search_motion['motion']
func = "cb_motion_%s" % motion
vi_buffer = motion
globals()[func](False)
def key_comma(buf, input_line, cur, count):
"""Repeat last f, t, F, T in opposite direction `count` times.
See Also:
`key_base()`.
"""
key_semicolon(buf, input_line, cur, count, True)
# Vi key bindings.
# ================
# String values will be executed as normal WeeChat commands.
# For functions, see `key_base()` for reference.
VI_KEYS = {'j': "/window scroll_down",
'k': "/window scroll_up",
'G': key_G,
'gg': "/window scroll_top",
'x': "/input delete_next_char",
'X': "/input delete_previous_char",
'dd': "/input delete_line",
'D': "/input delete_end_of_line",
'cc': key_cc,
'C': key_C,
'i': key_i,
'a': key_a,
'A': key_A,
'I': key_I,
'yy': key_yy,
'p': "/input clipboard_paste",
'/': "/input search_text",
'gt': "/buffer +1",
'K': "/buffer +1",
'gT': "/buffer -1",
'J': "/buffer -1",
'r': key_r,
'R': key_R,
'~': key_tilda,
'\x01[[A': "/input history_previous",
'\x01[[B': "/input history_next",
'\x01[[C': "/input move_next_char",
'\x01[[D': "/input move_previous_char",
'\x01[[H': "/input move_beginning_of_line",
'\x01[[F': "/input move_end_of_line",
'\x01[[5~': "/window page_up",
'\x01[[6~': "/window page_down",
'\x01[[3~': "/input delete_next_char",
'\x01[[2~': key_i,
'\x01M': "/input return",
'\x01?': "/input move_previous_char",
' ': "/input move_next_char",
'\x01[j': key_alt_j,
'\x01[1': "/buffer *1",
'\x01[2': "/buffer *2",
'\x01[3': "/buffer *3",
'\x01[4': "/buffer *4",
'\x01[5': "/buffer *5",
'\x01[6': "/buffer *6",
'\x01[7': "/buffer *7",
'\x01[8': "/buffer *8",
'\x01[9': "/buffer *9",
'\x01[0': "/buffer *10",
'\x01^': "/input jump_last_buffer_displayed",
'\x01D': "/window page_down",
'\x01U': "/window page_up",
'\x01Wh': "/window left",
'\x01Wj': "/window down",
'\x01Wk': "/window up",
'\x01Wl': "/window right",
'\x01W=': "/window balance",
'\x01Wx': "/window swap",
'\x01Ws': "/window splith",
'\x01Wv': "/window splitv",
'\x01Wq': "/window merge",
';': key_semicolon,
',': key_comma}
# Add alt-j<number> bindings.
for i in range(10, 99):
VI_KEYS['\x01[j%s' % i] = "/buffer %s" % i
# Key handling.
# =============
def cb_key_pressed(data, signal, signal_data):
"""Detect potential Esc presses.
Alt and Esc are detected as the same key in most terminals. The difference
is that Alt signal is sent just before the other pressed key's signal.
We therefore use a timeout (50ms) to detect whether Alt or Esc was pressed.
"""
global last_signal_time
last_signal_time = time.time()
if signal_data == "\x01[":
# In 50ms, check if any other keys were pressed. If not, it's Esc!
weechat.hook_timer(50, 0, 1, "cb_check_esc",
"{:f}".format(last_signal_time))
return weechat.WEECHAT_RC_OK
def cb_check_esc(data, remaining_calls):
"""Check if the Esc key was pressed and change the mode accordingly."""
global esc_pressed, vi_buffer, cmd_text, catching_keys_data
if last_signal_time == float(data):
esc_pressed += 1
set_mode("NORMAL")
# Cancel any current partial commands.
vi_buffer = ""
cmd_text = ""
weechat.command("", "/bar hide vi_cmd")
catching_keys_data = {'amount': 0}
weechat.bar_item_update("vi_buffer")
return weechat.WEECHAT_RC_OK
def cb_key_combo_default(data, signal, signal_data):
"""Eat and handle key events when in Normal mode, if needed.
The key_combo_default signal is sent when a key combo is pressed. For
example, alt-k will send the "\x01[k" signal.
Esc is handled a bit differently to avoid delays, see `cb_key_pressed()`.
"""
global esc_pressed, vi_buffer, cmd_text
# If Esc was pressed, strip the Esc part from the pressed keys.
# Example: user presses Esc followed by i. This is detected as "\x01[i",
# but we only want to handle "i".
keys = signal_data
if esc_pressed or esc_pressed == -2:
if keys.startswith("\x01[" * esc_pressed):
# Multiples of 3 seem to "cancel" themselves,
# e.g. Esc-Esc-Esc-Alt-j-11 is detected as "\x01[\x01[\x01"
# followed by "\x01[j11" (two different signals).
if signal_data == "\x01[" * 3:
esc_pressed = -1 # `cb_check_esc()` will increment it to 0.
else:
esc_pressed = 0
# This can happen if a valid combination is started but interrupted
# with Esc, such as Ctrl-W→Esc→w which would send two signals:
# "\x01W\x01[" then "\x01W\x01[w".
# In that case, we still need to handle the next signal ("\x01W\x01[w")
# so we use the special value "-2".
else:
esc_pressed = -2
keys = keys.split("\x01[")[-1] # Remove the "Esc" part(s).
# Ctrl-Space.
elif keys == "\x01@":
set_mode("NORMAL")
return weechat.WEECHAT_RC_OK_EAT
# Nothing to do here.
if mode == "INSERT":
return weechat.WEECHAT_RC_OK
# We're in Replace mode — allow "normal" key presses (e.g. "a") and
# overwrite the next character with them, but let the other key presses
# pass normally (e.g. backspace, arrow keys, etc).
if mode == "REPLACE":
if len(keys) == 1:
weechat.command("", "/input delete_next_char")
elif keys == "\x01?":
weechat.command("", "/input move_previous_char")
return weechat.WEECHAT_RC_OK_EAT
return weechat.WEECHAT_RC_OK
# We're catching keys! Only "normal" key presses interest us (e.g. "a"),
# not complex ones (e.g. backspace).
if len(keys) == 1 and catching_keys_data['amount']:
catching_keys_data['keys'] += keys
catching_keys_data['amount'] -= 1
# Done catching keys, execute the callback.
if catching_keys_data['amount'] == 0:
globals()[catching_keys_data['callback']]()
vi_buffer = ""
weechat.bar_item_update("vi_buffer")
return weechat.WEECHAT_RC_OK_EAT
# We're in command-line mode.
if cmd_text:
# Backspace key.
if keys == "\x01?":
# Remove the last character from our command line.
cmd_text = list(cmd_text)
del cmd_text[-1]
cmd_text = "".join(cmd_text)
# Return key.
elif keys == "\x01M":
weechat.hook_timer(1, 0, 1, "cb_exec_cmd", cmd_text)
cmd_text = ""
# Input.
elif len(keys) == 1:
cmd_text += keys
# Update (and maybe hide) the bar item.
weechat.bar_item_update("cmd_text")
if not cmd_text:
weechat.command("", "/bar hide vi_cmd")
return weechat.WEECHAT_RC_OK_EAT
# Enter command mode.
elif keys == ":":
cmd_text += ":"
weechat.command("", "/bar show vi_cmd")
weechat.bar_item_update("cmd_text")
return weechat.WEECHAT_RC_OK_EAT
# Add key to the buffer.
vi_buffer += keys
weechat.bar_item_update("vi_buffer")
if not vi_buffer:
return weechat.WEECHAT_RC_OK
# Check if the keys have a (partial or full) match. If so, also get the
# keys without the count. (These are the actual keys we should handle.)
# After that, `vi_buffer` is only used for display purposes — only
# `vi_keys` is checked for all the handling.
# If no matches are found, the keys buffer is cleared.
matched, vi_keys, count = get_keys_and_count(vi_buffer)
if not matched:
vi_buffer = ""
return weechat.WEECHAT_RC_OK_EAT
buf = weechat.current_buffer()
input_line = weechat.buffer_get_string(buf, "input")
cur = weechat.buffer_get_integer(buf, "input_pos")
# It's a key. If the corresponding value is a string, we assume it's a
# WeeChat command. Otherwise, it's a method we'll call.
if vi_keys in VI_KEYS:
if isinstance(VI_KEYS[vi_keys], str):
for _ in range(max(count, 1)):
# This is to avoid crashing WeeChat on script reloads/unloads,
# because no hooks must still be running when a script is
# reloaded or unloaded.
if VI_KEYS[vi_keys] == "/input return":
return weechat.WEECHAT_RC_OK
weechat.command("", VI_KEYS[vi_keys])
current_cur = weechat.buffer_get_integer(buf, "input_pos")
set_cur(buf, input_line, current_cur)
else:
VI_KEYS[vi_keys](buf, input_line, cur, count)
# It's a motion (e.g. "w") — call `motion_X()` where X is the motion, then
# set the cursor's position to what that function returned.
elif vi_keys in VI_MOTIONS:
if vi_keys in SPECIAL_CHARS:
func = "motion_%s" % SPECIAL_CHARS[vi_keys]
else:
func = "motion_%s" % vi_keys
end, _, _ = globals()[func](input_line, cur, count)
set_cur(buf, input_line, end)
# It's an operator + motion (e.g. "dw") — call `motion_X()` (where X is
# the motion), then we call `operator_Y()` (where Y is the operator)
# with the position `motion_X()` returned. `operator_Y()` should then
# handle changing the input line.
elif (len(vi_keys) > 1 and
vi_keys[0] in VI_OPERATORS and
vi_keys[1:] in VI_MOTIONS):
if vi_keys[1:] in SPECIAL_CHARS:
func = "motion_%s" % SPECIAL_CHARS[vi_keys[1:]]
else:
func = "motion_%s" % vi_keys[1:]
pos, overwrite, catching = globals()[func](input_line, cur, count)
# If it's a catching motion, we don't want to call the operator just
# yet -- this code will run again when the motion is complete, at which
# point we will.
if not catching:
oper = "operator_%s" % vi_keys[0]
globals()[oper](buf, input_line, cur, pos, overwrite)
# The combo isn't completed yet (e.g. just "d").
else:
return weechat.WEECHAT_RC_OK_EAT
# We've already handled the key combo, so clear the keys buffer.
if not catching_keys_data['amount']:
vi_buffer = ""
weechat.bar_item_update("vi_buffer")
return weechat.WEECHAT_RC_OK_EAT
# Callbacks.
# ==========
# Bar items.
# ----------
def cb_vi_buffer(data, item, window):
"""Return the content of the vi buffer (pressed keys on hold)."""
return vi_buffer
def cb_cmd_text(data, item, window):
"""Return the text of the command line."""
return cmd_text
def cb_mode_indicator(data, item, window):
"""Return the current mode (INSERT/NORMAL/REPLACE)."""
return mode[0][0][0][0][0]
def cb_line_numbers(data, item, window):
"""Fill the line numbers bar item."""
bar_height = weechat.window_get_integer(window, "win_chat_height")
content = ""
for i in range(1, bar_height + 1):
content += "%s \n" % i
return content
# Callbacks for the line numbers bar.
# ...................................
def cb_update_line_numbers(data, signal, signal_data):
"""Call `cb_timer_update_line_numbers()` when switching buffers.
A timer is required because the bar item is refreshed before the new buffer
is actually displayed, so ``win_chat_height`` would refer to the old
buffer. Using a timer refreshes the item after the new buffer is displayed.
"""
weechat.hook_timer(10, 0, 1, "cb_timer_update_line_numbers", "")
return weechat.WEECHAT_RC_OK
def cb_timer_update_line_numbers(data, remaining_calls):
"""Update the line numbers bar item."""
weechat.bar_item_update("line_numbers")
return weechat.WEECHAT_RC_OK
# Config.
# -------
def cb_config(data, option, value):
"""Script option changed, update our copy."""
option_name = option.split(".")[-1]
if option_name in vimode_settings:
vimode_settings[option_name] = value
return weechat.WEECHAT_RC_OK
# Command-line execution.
# -----------------------
def cb_exec_cmd(data, remaining_calls):
"""Translate and execute our custom commands to WeeChat command."""
# Process the entered command.
data = list(data)
del data[0]
data = "".join(data)
# s/foo/bar command.
if data.startswith("s/"):
cmd = data
parsed_cmd = next(csv.reader(StringIO(cmd), delimiter="/",
escapechar="\\"))
pattern = re.escape(parsed_cmd[1])
repl = parsed_cmd[2]
repl = re.sub(r"([^\\])&", r"\1" + pattern, repl)
flag = None
if len(parsed_cmd) == 4:
flag = parsed_cmd[3]
count = 1
if flag == "g":
count = 0
buf = weechat.current_buffer()
input_line = weechat.buffer_get_string(buf, "input")
input_line = re.sub(pattern, repl, input_line, count)
weechat.buffer_set(buf, "input", input_line)
# Shell command.
elif data.startswith("!"):
weechat.command("", "/exec -buffer shell %s" % data[1:])
# Commands like `:22`. This should start cursor mode (``/cursor``) and take
# us to the relevant line.
# TODO: look into possible replacement key bindings for: ← ↑ → ↓ Q m q.
elif data.isdigit():
line_number = int(data)
hdata_window = weechat.hdata_get("window")
window = weechat.current_window()
x = weechat.hdata_integer(hdata_window, window, "win_chat_x")
y = (weechat.hdata_integer(hdata_window, window, "win_chat_y") +
(line_number - 1))
weechat.command("", "/cursor go {},{}".format(x, y))
# Check againt defined commands.
else:
data = data.split(" ", 1)
cmd = data[0]
args = ""
if len(data) == 2:
args = data[1]
if cmd in VI_COMMANDS:
weechat.command("", "%s %s" % (VI_COMMANDS[cmd], args))
# No vi commands defined, run the command as a WeeChat command.
else:
weechat.command("", "/{} {}".format(cmd, args))
return weechat.WEECHAT_RC_OK
# Script commands.
# ----------------
def cb_vimode_cmd(data, buf, args):
"""Handle script commands (``/vimode <command>``)."""
# ``/vimode`` or ``/vimode help``
if not args or args == "help":
weechat.prnt("", "[vimode.py] %s" % README_URL)
# ``/vimode bind_keys`` or ``/vimode bind_keys --list``
elif args.startswith("bind_keys"):
infolist = weechat.infolist_get("key", "", "default")
weechat.infolist_reset_item_cursor(infolist)
commands = ["/key unbind ctrl-W",
"/key bind ctrl-W /input delete_previous_word",
"/key bind ctrl-^ /input jump_last_buffer_displayed",
"/key bind ctrl-Wh /window left",
"/key bind ctrl-Wj /window down",
"/key bind ctrl-Wk /window up",
"/key bind ctrl-Wl /window right",
"/key bind ctrl-W= /window balance",
"/key bind ctrl-Wx /window swap",
"/key bind ctrl-Ws /window splith",
"/key bind ctrl-Wv /window splitv",
"/key bind ctrl-Wq /window merge"]
while weechat.infolist_next(infolist):
key = weechat.infolist_string(infolist, "key")
if re.match(REGEX_PROBLEMATIC_KEYBINDINGS, key):
commands.append("/key unbind %s" % key)
if args == "bind_keys":
weechat.prnt("", "Running commands:")
for command in commands:
weechat.command("", command)
weechat.prnt("", "Done.")
elif args == "bind_keys --list":
weechat.prnt("", "Listing commands we'll run:")
for command in commands:
weechat.prnt("", " %s" % command)
weechat.prnt("", "Done.")
return weechat.WEECHAT_RC_OK
# Helpers.
# ========
# Motions/keys helpers.
# ---------------------
def get_pos(data, regex, cur, ignore_cur=False, count=0):
"""Return the position of `regex` match in `data`, starting at `cur`.
Args:
data (str): the data to search in.
regex (pattern): regex pattern to search for.
cur (int): where to start the search.
ignore_cur (bool, optional): should the first match be ignored if it's
also the character at `cur`?
Defaults to False.
count (int, optional): the index of the match to return. Defaults to 0.
Returns:
int: position of the match. -1 if no matches are found.
"""
# List of the *positions* of the found patterns.
matches = [m.start() for m in re.finditer(regex, data[cur:])]
pos = -1
if count:
if len(matches) > count - 1:
if ignore_cur and matches[0] == 0:
if len(matches) > count:
pos = matches[count]
else:
pos = matches[count - 1]
elif matches:
if ignore_cur and matches[0] == 0:
if len(matches) > 1:
pos = matches[1]
else:
pos = matches[0]
return pos
def set_cur(buf, input_line, pos, cap=True):
"""Set the cursor's position.
Args:
buf (str): pointer to the current WeeChat buffer.
input_line (str): the content of the input line.
pos (int): the position to set the cursor to.
cap (bool, optional): if True, the `pos` will shortened to the length
of `input_line` if it's too long. Defaults to True.
"""
if cap:
pos = min(pos, len(input_line) - 1)
weechat.buffer_set(buf, "input_pos", str(pos))
def start_catching_keys(amount, callback, input_line, cur, count, buf=None):
"""Start catching keys. Used for special commands (e.g. "f", "r").
amount (int): amount of keys to catch.
callback (str): name of method to call once all keys are caught.
input_line (str): input line's content.
cur (int): cursor's position.
count (int): count, e.g. "2" for "2fs".
buf (str, optional): pointer to the current WeeChat buffer.
Defaults to None.
`catching_keys_data` is a dict with the above arguments, as well as:
keys (str): pressed keys will be added under this key.
new_cur (int): the new cursor's position, set in the callback.
When catching keys is active, normal pressed keys (e.g. "a" but not arrows)
will get added to `catching_keys_data` under the key "keys", and will not
be handled any further.
Once all keys are caught, the method defined in the "callback" key is
called, and can use the data in `catching_keys_data` to perform its action.
"""
global catching_keys_data
if "new_cur" in catching_keys_data:
new_cur = catching_keys_data['new_cur']
catching_keys_data = {'amount': 0}
return new_cur, True, False
catching_keys_data = ({'amount': amount,
'callback': callback,
'input_line': input_line,
'cur': cur,
'keys': "",
'count': count,
'new_cur': 0,
'buf': buf})
return cur, False, True
def get_keys_and_count(combo):
"""Check if `combo` is a valid combo and extract keys/counts if so.
Args:
combo (str): pressed keys combo.
Returns:
matched (bool): True if the combo has a (partial or full) match, False
otherwise.
combo (str): `combo` with the count removed. These are the actual keys
we should handle.
count (int): count for `combo`.
"""
# Look for a potential match (e.g. "d" might become "dw" or "dd" so we
# accept it, but "d9" is invalid).
matched = False
# Digits are allowed at the beginning (counts or "0").
count = 0
if combo.isdigit():
matched = True
elif combo and combo[0].isdigit():
count = ""
for char in combo:
if char.isdigit():
count += char
else:
break
combo = combo.replace(count, "", 1)
count = int(count)
# Check against defined keys.
if not matched:
for key in VI_KEYS:
if key.startswith(combo):
matched = True
break
# Check against defined motions.
if not matched:
for motion in VI_MOTIONS:
if motion.startswith(combo):
matched = True
break
# Check against defined operators + motions.
if not matched:
for operator in VI_OPERATORS:
if combo.startswith(operator):
# Check for counts before the motion (but after the operator).
vi_keys_no_op = combo[len(operator):]
# There's no motion yet.
if vi_keys_no_op.isdigit():
matched = True
break
# Get the motion count, then multiply the operator count by
# it, similar to vim's behavior.
elif vi_keys_no_op and vi_keys_no_op[0].isdigit():
motion_count = ""
for char in vi_keys_no_op:
if char.isdigit():
motion_count += char
else:
break
# Remove counts from `vi_keys_no_op`.
combo = combo.replace(motion_count, "", 1)
motion_count = int(motion_count)
count = max(count, 1) * motion_count
# Check against defined motions.
for motion in VI_MOTIONS:
if motion.startswith(combo[1:]):
matched = True
break
return matched, combo, count
# Other helpers.
# --------------
def set_mode(arg):
"""Set the current mode and update the bar mode indicator."""
global mode
mode = arg
# If we're going to Normal mode, the cursor must move one character to the
# left.
if mode == "NORMAL":
buf = weechat.current_buffer()
input_line = weechat.buffer_get_string(buf, "input")
cur = weechat.buffer_get_integer(buf, "input_pos")
set_cur(buf, input_line, cur - 1, False)
weechat.bar_item_update("mode_indicator")
def print_warning(text):
"""Print warning, in red, to the current buffer."""
weechat.prnt("", ("%s[vimode.py] %s" % (weechat.color("red"), text)))
def check_warnings():
"""Warn the user about problematic key bindings and tmux/screen."""
user_warned = False
# Warn the user about problematic key bindings that may conflict with
# vimode.
# The solution is to remove these key bindings, but that's up to the user.
infolist = weechat.infolist_get("key", "", "default")
problematic_keybindings = []
while weechat.infolist_next(infolist):
key = weechat.infolist_string(infolist, "key")
command = weechat.infolist_string(infolist, "command")
if re.match(REGEX_PROBLEMATIC_KEYBINDINGS, key):
problematic_keybindings.append("%s -> %s" % (key, command))
if problematic_keybindings:
user_warned = True
print_warning("Problematic keybindings detected:")
for keybinding in problematic_keybindings:
print_warning(" %s" % keybinding)
print_warning("These keybindings may conflict with vimode.")
print_warning("You can remove problematic key bindings and add"
" recommended ones by using /vimode bind_keys, or only"
" list them with /vimode bind_keys --list")
print_warning("For help, see: %s" % FAQ_KEYBINDINGS)
del problematic_keybindings
# Warn tmux/screen users about possible Esc detection delays.
if "STY" in os.environ or "TMUX" in os.environ:
if user_warned:
weechat.prnt("", "")
user_warned = True
print_warning("tmux/screen users, see: %s" % FAQ_ESC)
if (user_warned and not
weechat.config_string_to_boolean(vimode_settings['no_warn'])):
if user_warned:
weechat.prnt("", "")
print_warning("To force disable warnings, you can set"
" plugins.var.python.vimode.no_warn to 'on'")
# Main script.
# ============
if __name__ == "__main__":
weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, "", "")
# Warn the user if he's using an unsupported WeeChat version.
VERSION = weechat.info_get("version_number", "")
if int(VERSION) < 0x01000000:
print_warning("Please upgrade to WeeChat ≥ 1.0.0. Previous versions"
" are not supported.")
# Set up script options.
for option, value in vimode_settings.items():
if weechat.config_is_set_plugin(option):
vimode_settings[option] = weechat.config_get_plugin(option)
else:
weechat.config_set_plugin(option, value[0])
vimode_settings[option] = value[0]
weechat.config_set_desc_plugin(option,
"%s (default: \"%s\")" % (value[1],
value[0]))
# Warn the user about possible problems if necessary.
if not weechat.config_string_to_boolean(vimode_settings['no_warn']):
check_warnings()
# Create bar items and setup hooks.
weechat.bar_item_new("mode_indicator", "cb_mode_indicator", "")
weechat.bar_item_new("cmd_text", "cb_cmd_text", "")
weechat.bar_item_new("vi_buffer", "cb_vi_buffer", "")
weechat.bar_item_new("line_numbers", "cb_line_numbers", "")
weechat.bar_new("vi_cmd", "off", "0", "root", "", "bottom", "vertical",
"vertical", "0", "0", "default", "default", "default", "0",
"cmd_text")
weechat.bar_new("vi_line_numbers", "on", "0", "window", "", "left",
"vertical", "vertical", "0", "0", "default", "default",
"default", "0", "line_numbers")
weechat.hook_config("plugins.var.python.%s.*" % SCRIPT_NAME, "cb_config",
"")
weechat.hook_signal("key_pressed", "cb_key_pressed", "")
weechat.hook_signal("key_combo_default", "cb_key_combo_default", "")
weechat.hook_signal("buffer_switch", "cb_update_line_numbers", "")
weechat.hook_command("vimode", SCRIPT_DESC, "[help | bind_keys [--list]]",
" help: show help\n"
"bind_keys: unbind problematic keys, and bind"
" recommended keys to use in WeeChat\n"
" --list: only list changes",
"help || bind_keys |--list",
"cb_vimode_cmd", "")
|
the-stack_0_4890 | from functools import lru_cache
from findimports import ModuleGraph
from pathlib import Path
from onegov.core import LEVELS
def test_hierarchy():
""" Originally, onegov.* modules were separated into separate repositories
and deployed individually to PyPI.
This meant that each module would list the dependencies it needed,
including other onegov.* modules. As a side-effect, this ensured that
a module like onegov.core would not import from onegov.org, creating
an undesired dependency.
With the move to a single repository and a container build, we lost this
side-effect. It is now possible for onegov.core to import from onegov.org
and that is not something we want, because things like the core should
not import from modules higher up the chain.
This test ensures that this restriction is still honored.
Each module is put into a level. Modules may import from the same level
or the levels below, but not from the levels above.
The current list of levels is also used for the upgrade step order. It can
be found in `onegov.core.__init__.py`.
This is not exactly equivalent to what we had before, but it is good
basic check to ensure that we do not add unwanted dependencies.
"""
modules = level_by_module(LEVELS)
# all modules must be defined
for module in existing_modules():
assert module in modules, f"module not defined in hierarchy: {module}"
# graph all imports
graph = ModuleGraph()
graph.parsePathname(str(sources()))
# ensure hierarchy
for id, module in graph.modules.items():
name = module_name(module.filename)
if name is None:
continue
allowed = allowed_imports(LEVELS, name)
for imported in module.imported_names:
import_name = '.'.join(imported.name.split('.')[:2])
if not import_name.startswith('onegov'):
continue
assert import_name in allowed, \
f"Invalid import {name} → {import_name} in {imported.filename}"
def allowed_imports(levels, module):
""" Given a module name, returns an imprtable set of onegov modules. """
allowed = set()
for modules in levels:
allowed.update(modules)
if module in modules:
return allowed
assert False, f"unknown module: {module}"
def sources():
""" Returns the path to 'src'. """
return Path(__file__).parent.parent / 'src'
@lru_cache(maxsize=128)
def module_name(path):
""" Given a path, returns the onegov module, or None, if not a onegov
module (and therefore not relevant to this analysis).
"""
namespace = sources() / 'onegov'
if namespace in Path(path).parents:
name = str(path).replace(str(namespace), '')\
.strip('/')\
.split('/', 1)[0]
return f'onegov.{name}'
def level_by_module(levels):
""" Returns a dictionary with modules -> level. """
result = {}
for level, modules in enumerate(levels):
for module in modules:
assert module not in result, f"duplicate module: {module}"
result[module] = level
return result
def existing_modules():
""" Yields the module names found in the src/onegov folder. """
for child in (sources() / 'onegov').iterdir():
if child.is_dir():
yield f'onegov.{child.name}'
|
the-stack_0_4891 | # -*- coding: utf-8 -*-
try:
from models.interface import AbstractModel
except:
from interface import AbstractModel
import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
import torchvision.datasets as datasets
import matplotlib.pyplot as plt
import numpy as np
import pickle
from torch import Tensor
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from scipy.fft import rfft, rfftfreq, fft, fftfreq
import scipy
import time
import copy
import json
from pathlib import Path
class EEGDCNNModel(AbstractModel):
DATA_PATH = "./"
OUTPUT_PATH = "./"
def __init__(self, sample_rate=1, data_frequency=128):
model = nn.Sequential(
nn.Conv2d(4, 32, [3, 1]),
nn.ReLU(),
nn.Dropout(),
nn.Conv2d(32, 64, [3, 1]),
nn.ReLU(),
nn.Dropout(),
nn.MaxPool2d([3, 3]),
nn.Flatten(),
nn.Linear(5760, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 4)
)
self.model = model
base_path = Path(__file__).parent
self.model.load_state_dict(torch.load((base_path / 'model_multi.pth').resolve(), 'cpu'))
self.model.eval()
self.sample_rate = sample_rate
self.data_frequency = data_frequency
print("Initialized EEG DCNN Model with sample rate {} data freq {}".format(self.sample_rate, self.data_frequency))
# data passed in is one trial with only the 32 channels with last 3 sec trimmed
# period has to be a factor of the total clip length
def run(self, data_path):
print("Running EEG DCNN Model")
self.run_eeg(self.DATA_PATH + data_path, self.data_frequency, self.sample_rate)
def run_eeg(self, data_path, data_frequency, sample_rate):
self.data = np.array(pickle.load(open(data_path, "rb"), encoding='latin1'))
# data is 32 channel, 7680 (60 * 128)
channels_total = self.data.shape[0]
time_total = self.data.shape[1]
windows = int((time_total / data_frequency) * sample_rate)
final_data = []
# sliding window is 8 because thats what the window was when training
train_sliding_window = 4
# loops through all the windows
for i in range(windows - train_sliding_window):
time_window = self.data[:, int((data_frequency * i) / sample_rate): int((data_frequency * (i + train_sliding_window)) / sample_rate)]
transformed_channel = []
# loops through all the channels
for channel_num in range(channels_total):
channel_data = time_window[channel_num]
# convert to frequency domain
fft_channel = np.abs(rfft(channel_data))
fftfreq_channel = rfftfreq(channel_data.size, 1/ data_frequency)
# fft_channel_normalized = np.fft.fftshift(fft_channel / channel_data.size)
# power_spectrum = np.square(fft_channel_normalized)
# power = np.sum(power_spectrum)
# identify frequency ranges
one_freq = np.where(fftfreq_channel == 1)[0][0]
eight_freq = np.where(fftfreq_channel == 8)[0][0]
fourteen_freq = np.where(fftfreq_channel == 14)[0][0]
thirty_freq = np.where(fftfreq_channel == 30)[0][0]
fourtyfive_freq = np.where(fftfreq_channel == 45)[0][0]
# make bins for frequency ranges
theta_bin = fft_channel[one_freq:eight_freq]
alpha_bin = fft_channel[eight_freq:fourteen_freq]
beta_bin = fft_channel[fourteen_freq:thirty_freq]
gamma_bin = fft_channel[thirty_freq:fourtyfive_freq]
all_bins = [theta_bin, alpha_bin, beta_bin, gamma_bin]
transformed_channel.append(all_bins)
binned_pcc_matrix = np.ones((4, channels_total, channels_total)) # 4, 32, 32
for bin_num in range(4):
pcc_matrix = binned_pcc_matrix[bin_num] # 32, 32
index_mover = 0
# creates correlation matrices for each bin
for channel_num_i in range(0, channels_total):
for channel_num_j in range(index_mover, channels_total):
data1 = transformed_channel[channel_num_i][bin_num]
data2 = transformed_channel[channel_num_j][bin_num]
pcc_num = scipy.stats.pearsonr(data1, data2)[0]
pcc_matrix[channel_num_i][channel_num_j] = pcc_num
pcc_matrix[channel_num_j][channel_num_i] = pcc_num
index_mover += 1
binned_pcc_matrix[bin_num] = pcc_matrix
final_data.append(binned_pcc_matrix)
# makes last 8 sec the same as the last output
for i in range(min(windows, train_sliding_window)):
final_data.append(binned_pcc_matrix)
self.data = torch.tensor(final_data).float()
# run model
output = self.model(self.data)
_, preds = torch.max(output, 1)
# output data as json
json_data = dict()
for i in range(len(preds)):
json_data[i / sample_rate] = int(preds[i])
json_dict = dict()
json_dict["metadata"] = {"dataPath": data_path, "eegLabelFrequency": str(sample_rate), "eegModelName":"defaulteeg"}
json_dict["data"] = json_data
with open(self.OUTPUT_PATH + 'defaulteeg.json', "w+") as outfile:
json.dump(json_dict, outfile)
def test_output_format_eeg():
model = EEGDCNNModel(sample_rate=2)
model.OUTPUT_PATH = './output/'
print("Testing output format")
model.run('uploads/dev/s01_trial01.dat')
output = json.load(open('output/defaulteeg.json', 'r'))
# print(type(output), output)
assert set(output.keys()) == set(['metadata', 'data']), "Error: wrong keys in output json: " + str(output.keys())
assert "59.0" in output['data'].keys() and '58.5' in output['data'].keys(), "Error with timestamps: " + str(output['data'].keys())
print("Passed output test")
def test_parameters_eeg():
print("Testing model parameters")
model = EEGDCNNModel(sample_rate=4)
model.OUTPUT_PATH = './output/'
model.run('uploads/dev/s01_trial01.dat')
output = json.load(open('output/defaulteeg.json', 'r'))
assert str(output['metadata']['eegLabelFrequency']) == '4', "Error setting eegLabelFrequency: " + str(output['metadata'])
print("Passed parameter test")
if __name__ == "__main__":
# test_run = EEGDCNNModel(sample_rate=1, data_frequency=128)
# test_run.run('s01_trial01.dat')
test_output_format_eeg()
test_parameters_eeg() |
the-stack_0_4892 | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from Deadline.Cloud import HardwareType
class AzureVmSpec:
def __init__(self, vcpus, mem_mb):
self.vcpus = vcpus
self.mem_mb = mem_mb
AZURE_VM_SIZES = {
# Compute Optimised
'Standard_F2': AzureVmSpec(2, 4 * 1024),
'Standard_F4': AzureVmSpec(4, 8 * 1024),
'Standard_F8': AzureVmSpec(8, 16 * 1024),
'Standard_F16': AzureVmSpec(16, 32 * 1024),
# General purpose
'Standard_D2_v3': AzureVmSpec(2, 8 * 1024),
'Standard_D4_v3': AzureVmSpec(4, 16 * 1024),
'Standard_D8_v3': AzureVmSpec(8, 32 * 1024),
'Standard_D16_v3': AzureVmSpec(16, 64 * 1024),
'Standard_D32_v3': AzureVmSpec(32, 128 * 1024),
'Standard_D64_v3': AzureVmSpec(64, 256 * 1024),
# GPU v1
'Standard_NC6': AzureVmSpec(6, 56 * 1024),
'Standard_NC12': AzureVmSpec(12, 112 * 1024),
'Standard_NC24': AzureVmSpec(24, 224 * 1024),
}
def vm_sizes_to_hardware_types(vm_sizes):
"""
Maps Azure VM sizes to Deadline HardwareType list
:param vm_sizes: list
:return: list of Deadline.Cloud.HardwareType
:rtype: list of Deadline.Cloud.HardwareType
"""
hw_types = []
if vm_sizes:
for vm_size in vm_sizes:
hwt = HardwareType()
hwt.ID = vm_size
hwt.Name = vm_size
hwt.RamMB = 0
hwt.VCPUs = 0
if vm_size in AZURE_VM_SIZES:
vm_spec = AZURE_VM_SIZES[vm_size]
hwt.RamMB = vm_spec.mem_mb
hwt.VCPUs = vm_spec.vcpus
hw_types.append(hwt)
else:
for vm_size, vm_spec in AZURE_VM_SIZES.iteritems():
hwt = HardwareType()
hwt.ID = vm_size
hwt.Name = vm_size
hwt.RamMB = vm_spec.mem_mb
hwt.VCPUs = vm_spec.vcpus
hw_types.append(hwt)
return hw_types
|
the-stack_0_4893 | import argparse
from sniffles.feature import FeatureParser
from sniffles.rule_formats import (PetabiPacketClassifierFormat, RegexFormat,
RuleFormat, SnortRuleFormat)
def main():
parser = argparse.ArgumentParser(description='Random Rule Generator')
parser.add_argument('-c', '--count', type=int, default=1,
help='the number of rules to generate (default: 1)')
parser.add_argument('-f', '--feature_file',
help='the file containing the feature set description')
parser.add_argument('-o', '--output_file', default='rules.txt',
help='the output file to which rules are written '
'(default: rules.txt)')
parser.add_argument('-r', '--rule_format',
choices=['petabipktclass', 'regex', 'snort'],
default='regex',
help='rule format')
args = parser.parse_args()
try:
myfp = FeatureParser(args.feature_file)
myfeatures = myfp.getFeatures()
myrules = generateRules(myfeatures, args.count)
printRules(myrules, args.output_file, args.rule_format)
except Exception as err:
print("RandRuleGen-main: " + str(err))
def generateRules(feature_list, count=1):
return ['; '.join(map(str, feature_list)) + '; '] * count
def printRules(rule_list=None, outfile=None, rule_format=None):
if rule_list and outfile:
fd = open(outfile, 'w', encoding='utf-8')
for rule in rule_list:
rwf = getRuleWithFormat(rule, rule_format)
fd.write(str(rwf))
fd.write("\n")
fd.close()
def getRuleWithFormat(rule=None, fmt=None):
rulefmt = None
if rule:
if fmt is not None:
if fmt == "snort":
rulefmt = SnortRuleFormat(
rule, getRuleWithFormat.rule_counter)
getRuleWithFormat.rule_counter += 1
if fmt == "petabipktclass":
rulefmt = PetabiPacketClassifierFormat(rule)
if fmt == "regex":
rulefmt = RegexFormat(rule)
if rulefmt is None:
rulefmt = RuleFormat(rule)
return rulefmt
getRuleWithFormat.rule_counter = 1
if __name__ == "__main__":
main()
|
the-stack_0_4894 | import requests
from PIL import Image
from datainfo import file_list
for item in file_list:
item_file = '../items/'+item
items = open(item_file, 'r').read().split()
for name in items:
print('downloading', name)
url = 'https://gameinfo.albiononline.com/api/gameinfo/items/'
response = requests.get(url+name, stream=True)
if response.status_code == 200:
img = Image.open(response.raw)
img = img.resize((50, 50))
img.save('img_lowquality/'+name+'.png')
|
the-stack_0_4895 | # Copyright 2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _user_pattern module.
"""
from __future__ import absolute_import, print_function
import re
import copy
import pytest
from zhmcclient import Client, HTTPError, NotFound, UserPattern
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
class TestUserPattern(object):
"""All tests for the UserPattern and UserPatternManager classes."""
def setup_method(self):
"""
Setup that is called by pytest before each test method.
Set up a faked session, and add a faked Console without any
child resources.
"""
# pylint: disable=attribute-defined-outside-init
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
self.faked_console = self.session.hmc.consoles.add({
'object-id': None,
# object-uri will be automatically set
'parent': None,
'class': 'console',
'name': 'fake-console1',
'description': 'Console #1',
})
self.console = self.client.consoles.find(name=self.faked_console.name)
def add_user_pattern(self, name, pattern, type_, user_template_uri):
"""
Add a faked user pattern object to the faked Console and return it.
"""
faked_user_pattern = self.faked_console.user_patterns.add({
'element-id': 'oid-{}'.format(name),
# element-uri will be automatically set
'parent': '/api/console',
'class': 'user-pattern',
'name': name,
'description': 'User Pattern {}'.format(name),
'pattern': pattern,
'type': type_,
'retention-time': 0,
'user-template-uri': user_template_uri,
})
return faked_user_pattern
def add_user(self, name, type_):
"""
Add a faked user object to the faked Console and return it.
"""
faked_user = self.faked_console.users.add({
'object-id': 'oid-{}'.format(name),
# object-uri will be automatically set
'parent': '/api/console',
'class': 'user',
'name': name,
'description': 'User {}'.format(name),
'type': type_,
'authentication-type': 'local',
})
return faked_user
def test_upm_repr(self):
"""Test UserPatternManager.__repr__()."""
user_pattern_mgr = self.console.user_patterns
# Execute the code to be tested
repr_str = repr(user_pattern_mgr)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.
format(classname=user_pattern_mgr.__class__.__name__,
id=id(user_pattern_mgr)),
repr_str)
def test_upm_initial_attrs(self):
"""Test initial attributes of UserPatternManager."""
user_pattern_mgr = self.console.user_patterns
# Verify all public properties of the manager object
assert user_pattern_mgr.resource_class == UserPattern
assert user_pattern_mgr.class_name == 'user-pattern'
assert user_pattern_mgr.session is self.session
assert user_pattern_mgr.parent is self.console
assert user_pattern_mgr.console is self.console
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(full_properties=False),
['element-uri']),
(dict(full_properties=True),
['element-uri', 'name']),
(dict(), # test default for full_properties (True)
['element-uri', 'name']),
]
)
@pytest.mark.parametrize(
"filter_args, exp_names", [
(None,
['a', 'b']),
({},
['a', 'b']),
({'name': 'a'},
['a']),
]
)
def test_upm_list(
self, filter_args, exp_names, full_properties_kwargs, prop_names):
"""Test UserPatternManager.list()."""
faked_user1 = self.add_user(name='a', type_='standard')
faked_user2 = self.add_user(name='b', type_='standard')
faked_user_pattern1 = self.add_user_pattern(
name='a', pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
faked_user_pattern2 = self.add_user_pattern(
name='b', pattern='b_.*', type_='regular-expression',
user_template_uri=faked_user2.uri)
faked_user_patterns = [faked_user_pattern1, faked_user_pattern2]
exp_faked_user_patterns = [u for u in faked_user_patterns
if u.name in exp_names]
user_pattern_mgr = self.console.user_patterns
# Execute the code to be tested
user_patterns = user_pattern_mgr.list(filter_args=filter_args,
**full_properties_kwargs)
assert_resources(user_patterns, exp_faked_user_patterns, prop_names)
@pytest.mark.parametrize(
"input_props, exp_prop_names, exp_exc", [
({}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X'}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*'}, # several missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*'}, # several missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*',
'type': 'glob-like'}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 0}, # props missing
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'description': 'fake description X',
'name': 'a',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 28,
'user-template-uri': '/api/users/oid-tpl'},
['element-uri', 'name', 'description', 'pattern', 'type',
'retention-time', 'user-template-uri'],
None),
]
)
def test_upm_create(self, input_props, exp_prop_names, exp_exc):
"""Test UserPatternManager.create()."""
faked_user_template = self.add_user(name='tpl', type_='template')
assert faked_user_template.uri == '/api/users/oid-tpl'
user_pattern_mgr = self.console.user_patterns
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user_pattern_mgr.create(properties=input_props)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
user_pattern = user_pattern_mgr.create(properties=input_props)
# Check the resource for consistency within itself
assert isinstance(user_pattern, UserPattern)
user_pattern_name = user_pattern.name
exp_user_pattern_name = user_pattern.properties['name']
assert user_pattern_name == exp_user_pattern_name
user_pattern_uri = user_pattern.uri
exp_user_pattern_uri = user_pattern.properties['element-uri']
assert user_pattern_uri == exp_user_pattern_uri
# Check the properties against the expected names and values
for prop_name in exp_prop_names:
assert prop_name in user_pattern.properties
if prop_name in input_props:
value = user_pattern.properties[prop_name]
exp_value = input_props[prop_name]
assert value == exp_value
def test_up_repr(self):
"""Test UserPattern.__repr__()."""
faked_user1 = self.add_user(name='a', type_='standard')
faked_user_pattern1 = self.add_user_pattern(
name='a', pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
user_pattern1 = self.console.user_patterns.find(
name=faked_user_pattern1.name)
# Execute the code to be tested
repr_str = repr(user_pattern1)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.
format(classname=user_pattern1.__class__.__name__,
id=id(user_pattern1)),
repr_str)
@pytest.mark.parametrize(
"input_props, exp_exc", [
({'name': 'a',
'description': 'fake description X',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 28,
'user-template-uri': '/api/users/oid-tpl'},
None),
]
)
def test_up_delete(self, input_props, exp_exc):
"""Test UserPattern.delete()."""
faked_user_pattern = self.add_user_pattern(
name=input_props['name'],
pattern=input_props['pattern'],
type_=input_props['type'],
user_template_uri=input_props['user-template-uri'])
user_pattern_mgr = self.console.user_patterns
user_pattern = user_pattern_mgr.find(name=faked_user_pattern.name)
if exp_exc is not None:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
user_pattern.delete()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the user pattern still exists
user_pattern_mgr.find(name=faked_user_pattern.name)
else:
# Execute the code to be tested.
user_pattern.delete()
# Check that the user pattern no longer exists
with pytest.raises(NotFound) as exc_info:
user_pattern_mgr.find(name=faked_user_pattern.name)
def test_up_delete_create_same(self):
"""Test UserPattern.delete() followed by create() with same name."""
user_pattern_name = 'faked_a'
faked_user1 = self.add_user(name='a', type_='standard')
# Add the user pattern to be tested
self.add_user_pattern(
name=user_pattern_name, pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
# Input properties for a user pattern with the same name
sn_user_pattern_props = {
'name': user_pattern_name,
'description': 'User Pattern with same name',
'pattern': 'a*',
'type': 'glob-like',
'retention-time': 28,
'user-template-uri': '/api/users/oid-tpl',
}
user_pattern_mgr = self.console.user_patterns
user_pattern = user_pattern_mgr.find(name=user_pattern_name)
# Execute the deletion code to be tested
user_pattern.delete()
# Check that the user pattern no longer exists
with pytest.raises(NotFound):
user_pattern_mgr.find(name=user_pattern_name)
# Execute the creation code to be tested.
user_pattern_mgr.create(sn_user_pattern_props)
# Check that the user pattern exists again under that name
sn_user_pattern = user_pattern_mgr.find(name=user_pattern_name)
description = sn_user_pattern.get_property('description')
assert description == sn_user_pattern_props['description']
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New user pattern description'},
]
)
def test_up_update_properties(self, input_props):
"""Test UserPattern.update_properties()."""
user_pattern_name = 'faked_a'
faked_user1 = self.add_user(name='a', type_='standard')
# Add the user pattern to be tested
self.add_user_pattern(
name=user_pattern_name, pattern='a_*', type_='glob-like',
user_template_uri=faked_user1.uri)
user_pattern_mgr = self.console.user_patterns
user_pattern = user_pattern_mgr.find(name=user_pattern_name)
user_pattern.pull_full_properties()
saved_properties = copy.deepcopy(user_pattern.properties)
# Execute the code to be tested
user_pattern.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in user_pattern.properties
prop_value = user_pattern.properties[prop_name]
assert prop_value == exp_prop_value, \
"Unexpected value for property {!r}".format(prop_name)
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
user_pattern.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in user_pattern.properties
prop_value = user_pattern.properties[prop_name]
assert prop_value == exp_prop_value
|
the-stack_0_4897 | import copy
def compose(a, b, keep_null=False):
"""
Compose two operations into one.
``keep_null`` [default=false] is a boolean that controls whether None/Null
attributes are retrained.
"""
if a is None:
a = {}
if b is None:
b = {}
# deep copy b, but get rid of None values if keep_null is falsey
attributes = dict((k, copy.deepcopy(v)) for k, v in b.items() if keep_null or v is not None)
for k, v in a.items():
if k not in b:
attributes[k] = copy.deepcopy(v)
return attributes or None
def diff(a, b):
"""
Return the difference between operations a and b.
"""
if a is None:
a = {}
if b is None:
b = {}
keys = set(a.keys()).union(set(b.keys()))
attributes = {}
for k in keys:
av, bv = a.get(k, None), b.get(k, None)
if av != bv:
attributes[k] = bv
return attributes or None
def transform(a, b, priority=True):
"""
Return the transformation from operation a to b.
If ``priority`` is falsey [default=True] then just return b.
"""
if a is None:
a = {}
if b is None:
b = {}
if not priority:
return b or None
attributes = {}
for k, v in b.items():
if k not in a:
attributes[k] = v
return attributes or None
def length_of(op):
typ = type_of(op)
if typ == 'delete':
return op['delete']
elif typ == 'retain':
return op['retain']
elif isinstance(op.get('insert'), str):
return len(op['insert'])
else:
return 1
def type_of(op):
if not op:
return None
if isinstance(op.get('delete'), int):
return 'delete'
if isinstance(op.get('retain'), int):
return 'retain'
return 'insert'
class Iterator(object):
"""
An iterator that enables itself to break off operations
to exactly the length needed via the ``next()`` method.
"""
def __init__(self, ops=[]):
self.ops = ops
self.reset()
def reset(self):
self.index = 0
self.offset = 0
def has_next(self):
return self.peek_length() is not None
def next(self, length=None):
offset = self.offset
op = self.peek()
op_type = type_of(op)
if op is None:
return { 'retain': None }
op_length = length_of(op)
if (length is None or length >= op_length - offset):
length = op_length - offset
self.index += 1
self.offset = 0
else:
self.offset += length
if op_type == 'delete':
return { 'delete': length }
result_op = {}
if op.get('attributes'):
result_op['attributes'] = op['attributes']
if op_type == 'retain':
result_op['retain'] = length
elif isinstance(op.get('insert'), str):
result_op['insert'] = op['insert'][offset:offset+length]
else:
assert offset == 0
assert length == 1
if 'insert' in op:
result_op['insert'] = op['insert']
return result_op
__next__ = next
def __length__(self):
return len(self.ops)
def __iter__(self):
return self
def peek(self):
try:
return self.ops[self.index]
except IndexError:
return None
def peek_length(self):
next_op = self.peek()
if next_op is None:
return None
return length_of(next_op) - self.offset
def peek_type(self):
op = self.peek()
if op is None:
return 'retain'
return type_of(op)
length = length_of
type = type_of
iterator = lambda x: Iterator(x) |
the-stack_0_4899 | n, l, t = input().split()
n, l, t = int(n), int(l), int(t)
p = [int(i) for i in input().split()]
sp = sorted(p)
map_set = list()
for i in p:
map_set.append(sp.index(i))
ori = [1] * n
for ti in range(t):
for i in range(n-1):
if sp[i] == sp[i+1]:
ori[i] ^= (-1^1)
ori[i+1] ^= (-1^1)
if sp[0] == 0 and ori[0] == -1:
ori[0] = 1
if sp[n-1] == l and ori[n-1] == 1:
ori[n-1] = -1
for i in range(n):
sp[i] += ori[i]
for i in map_set:
print(sp[i], end=" ")
print() |
the-stack_0_4901 | """
This module contains the panel API.
"""
import logging
from pyqode.core.api.mode import Mode
from pyqode.qt import QtWidgets, QtGui
def _logger():
""" Returns module's logger """
return logging.getLogger(__name__)
class Panel(QtWidgets.QWidget, Mode):
"""
Base class for editor panels.
A panel is a mode and a QWidget.
.. note:: Use enabled to disable panel actions and setVisible to change the
visibility of the panel.
"""
class Position(object):
"""
Enumerates the possible panel positions
"""
#: Top margin
TOP = 0
#: Left margin
LEFT = 1
#: Right margin
RIGHT = 2
#: Bottom margin
BOTTOM = 3
@classmethod
def iterable(cls):
""" Returns possible positions as an iterable (list) """
return [cls.TOP, cls.LEFT, cls.RIGHT, cls.BOTTOM]
@property
def scrollable(self):
"""
A scrollable panel will follow the editor's scroll-bars. Left and right
panels follow the vertical scrollbar. Top and bottom panels follow the
horizontal scrollbar.
:type: bool
"""
return self._scrollable
@scrollable.setter
def scrollable(self, value):
self._scrollable = value
def __init__(self, dynamic=False):
Mode.__init__(self)
QtWidgets.QWidget.__init__(self)
#: Specifies whether the panel is dynamic. A dynamic panel is a panel
#: that will be shown/hidden depending on the context.
#: Dynamic panel should not appear in any GUI menu (e.g. no display
#: in the panels menu of the notepad example).
self.dynamic = dynamic
#: Panel order into the zone it is installed to. This value is
#: automatically set when installing the panel but it can be changed
#: later (negative values can also be used).
self.order_in_zone = -1
self._scrollable = False
self._background_brush = None
self._foreground_pen = None
#: Position in the editor (top, left, right, bottom)
self.position = -1
def on_install(self, editor):
"""
Extends :meth:`pyqode.core.api.Mode.on_install` method to set the
editor instance as the parent widget.
.. warning:: Don't forget to call **super** if you override this
method!
:param editor: editor instance
:type editor: pyqode.core.api.CodeEdit
"""
Mode.on_install(self, editor)
self.setParent(editor)
self.setPalette(QtWidgets.QApplication.instance().palette())
self.setFont(QtWidgets.QApplication.instance().font())
self.editor.panels.refresh()
self._background_brush = QtGui.QBrush(QtGui.QColor(
self.palette().window().color()))
self._foreground_pen = QtGui.QPen(QtGui.QColor(
self.palette().windowText().color()))
def paintEvent(self, event):
# Fills the panel background using QPalette
if self.isVisible():
# fill background
self._background_brush = QtGui.QBrush(QtGui.QColor(
self.palette().window().color()))
self._foreground_pen = QtGui.QPen(QtGui.QColor(
self.palette().windowText().color()))
painter = QtGui.QPainter(self)
painter.fillRect(event.rect(), self._background_brush)
def setVisible(self, visible):
"""
Shows/Hides the panel
Automatically call CodeEdit.refresh_panels.
:param visible: Visible state
"""
_logger().log(5, '%s visibility changed', self.name)
super(Panel, self).setVisible(visible)
if self.editor:
self.editor.panels.refresh()
|
the-stack_0_4906 | from face_detection import Model_face_detection
from facial_landmarks_detection import Model_landmarks
from head_pose_estimation import Model_pose
from gaze_estimation import Model_gaze
from argparse import ArgumentParser
from mouse_controller import MouseController
from input_feeder import InputFeeder
import cv2
import os
import sys
import logging as log
moveto=['up','down', 'left', 'right']
def build_argparser():
parser= ArgumentParser()
parser.add_argument("--face", required=False,help= "Face detecion model path ",default='/home/adrian-estelio/Documents/vision/intel/face-detection-retail-0005/FP32-INT8/face-detection-retail-0005')
parser.add_argument("--landmarks", required=False,help= "landmarks detection model path ", default='/home/adrian-estelio/Documents/vision/intel/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009')
parser.add_argument("--head", required=False,help= "head pose estimation model path ",default='/home/adrian-estelio/Documents/vision/intel/head-pose-estimation-adas-0001/FP32/head-pose-estimation-adas-0001')
parser.add_argument("--gaze", required=False,help= "Gaze estimation model path ",default='/home/adrian-estelio/Documents/vision/intel/gaze-estimation-adas-0002/FP32/gaze-estimation-adas-0002')
parser.add_argument("--input", required=False,help="Input: image or video path or webcam (CAM) ", default='CAM')#/home/adrian-estelio/Documents/vision/Mouse_controller/resources/image.jpg')
parser.add_argument("--visual_o",required=False,help="Flag to display face: True or False", default="True")
parser.add_argument("--device",required=False,help="Device to run the inference", default="CPU")
return parser
def move(coor):
mouse= MouseController('high','fast')
if coor[0]<-0.33 and coor[1]>-0.05 and coor[1]<0.05:
log.info("Moving to %s",moveto[3])
mouse.move(1,0)
elif coor[0]>0.33 and coor[1]<0:
log.info("Moving to %s",moveto[2])
mouse.move(-1,0)
elif coor[1]>0.11 and coor[0]>-0.17:
log.info("Moving to %s",moveto[0])
mouse.move(0,1)
elif coor[0]>-0.05 and coor[1]<-0.13:
log.info("Moving to %s",moveto[1])
mouse.move(0,-1)
def infer_on_stream(args):
face_model = Model_face_detection(args.face,device=args.device)
face_model.load_model()
landmarks_model = Model_landmarks(args.landmarks,device=args.device)
landmarks_model.load_model()
head_model = Model_pose(args.head,device=args.device)
head_model.load_model()
gaze_model = Model_gaze(args.gaze,device=args.device)
gaze_model.load_model()
if args.input == 'CAM':
feeder= InputFeeder('CAM')
elif args.input.endswith('.jpg') or args.input.endswith('.bmp'):
feeder= InputFeeder('image',args.input)
else:
feeder= InputFeeder('video',args.input)
if not os.path.isfile(args.input):
log.error("Specified input file doesn't exist")
sys.exit(1)
feeder.load_data()
width = feeder.width
height = feeder.height
fps = feeder.fps
out = cv2.VideoWriter('output/out.mp4', cv2.VideoWriter_fourcc(*'avc1'), fps, (width,height),True)
feeder.open()
if not feeder.opened:
log.error("Unable to open source")
while feeder.opened:
image = feeder.next_batch()
if not feeder.opened:
break
key_pressed = cv2.waitKey(1)
frame, face = face_model.predict(image)
if len(face)>0:
_,r,l = landmarks_model.predict(face)
angles= head_model.predict(face)
vector = gaze_model.predict(r,l,angles)
move(vector)
out.write(frame)
if args.visual_o == 'True':
cv2.imshow('frame',frame)
if feeder.input_type == 'image':
cv2.imwrite('output/r.jpg',r)
cv2.imwrite('output/l.jpg',l)
cv2.imwrite('output/frame.jpg',frame)
break
if key_pressed == 27:
break
out.release()
feeder.close()
def main():
log.basicConfig(level=log.INFO)
log.info("Aplication started")
args =build_argparser().parse_args()
infer_on_stream(args)
if __name__ == '__main__':
main() |
the-stack_0_4907 | from lib import rpclib
import json
import time
import re
import sys
import pickle
import platform
import os
import subprocess
import signal
from slickrpc import Proxy
from binascii import hexlify
from binascii import unhexlify
from functools import partial
from shutil import copy
operating_system = platform.system()
if operating_system != 'Win64' and operating_system != 'Windows':
import readline
def colorize(string, color):
colors = {
'blue': '\033[94m',
'magenta': '\033[95m',
'green': '\033[92m',
'red': '\033[91m'
}
if color not in colors:
return string
else:
return colors[color] + string + '\033[0m'
def rpc_connection_tui():
# TODO: possible to save multiply entries from successfull sessions and ask user to choose then
while True:
restore_choice = input("Do you want to use connection details from previous session? [y/n]: ")
if restore_choice == "y":
try:
with open("connection.json", "r") as file:
connection_json = json.load(file)
rpc_user = connection_json["rpc_user"]
rpc_password = connection_json["rpc_password"]
rpc_port = connection_json["rpc_port"]
rpc_connection = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
except FileNotFoundError:
print(colorize("You do not have cached connection details. Please select n for connection setup", "red"))
break
elif restore_choice == "n":
rpc_user = input("Input your rpc user: ")
rpc_password = input("Input your rpc password: ")
rpc_port = input("Input your rpc port: ")
connection_details = {"rpc_user": rpc_user,
"rpc_password": rpc_password,
"rpc_port": rpc_port}
connection_json = json.dumps(connection_details)
with open("connection.json", "w+") as file:
file.write(connection_json)
rpc_connection = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
break
else:
print(colorize("Please input y or n", "red"))
return rpc_connection
def def_credentials(chain):
rpcport ='';
operating_system = platform.system()
if operating_system == 'Darwin':
ac_dir = os.environ['HOME'] + '/Library/Application Support/Komodo'
elif operating_system == 'Linux':
ac_dir = os.environ['HOME'] + '/.komodo'
elif operating_system == 'Win64' or operating_system == 'Windows':
ac_dir = '%s/komodo/' % os.environ['APPDATA']
if chain == 'KMD':
coin_config_file = str(ac_dir + '/komodo.conf')
else:
coin_config_file = str(ac_dir + '/' + chain + '/' + chain + '.conf')
with open(coin_config_file, 'r') as f:
for line in f:
l = line.rstrip()
if re.search('rpcuser', l):
rpcuser = l.replace('rpcuser=', '')
elif re.search('rpcpassword', l):
rpcpassword = l.replace('rpcpassword=', '')
elif re.search('rpcport', l):
rpcport = l.replace('rpcport=', '')
if len(rpcport) == 0:
if chain == 'KMD':
rpcport = 7771
else:
print("rpcport not in conf file, exiting")
print("check "+coin_config_file)
exit(1)
return(Proxy("http://%s:%[email protected]:%d"%(rpcuser, rpcpassword, int(rpcport))))
def getinfo_tui(rpc_connection):
info_raw = rpclib.getinfo(rpc_connection)
if isinstance(info_raw, dict):
for key in info_raw:
print("{}: {}".format(key, info_raw[key]))
input("Press [Enter] to continue...")
else:
print("Error!\n")
print(info_raw)
input("\nPress [Enter] to continue...")
def token_create_tui(rpc_connection):
while True:
try:
name = input("Set your token name: ")
supply = input("Set your token supply: ")
description = input("Set your token description: ")
except KeyboardInterrupt:
break
else:
token_hex = rpclib.token_create(rpc_connection, name, supply, description)
if token_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(token_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
token_txid = rpclib.sendrawtransaction(rpc_connection,
token_hex['hex'])
except KeyError:
print(token_txid)
print("Error")
input("Press [Enter] to continue...")
break
finally:
print(colorize("Token creation transaction broadcasted: " + token_txid, "green"))
file = open("tokens_list", "a")
file.writelines(token_txid + "\n")
file.close()
print(colorize("Entry added to tokens_list file!\n", "green"))
input("Press [Enter] to continue...")
break
def oracle_create_tui(rpc_connection):
print(colorize("\nAvailiable data types:\n", "blue"))
oracles_data_types = ["Ihh -> height, blockhash, merkleroot\ns -> <256 char string\nS -> <65536 char string\nd -> <256 binary data\nD -> <65536 binary data",
"c -> 1 byte signed little endian number, C unsigned\nt -> 2 byte signed little endian number, T unsigned",
"i -> 4 byte signed little endian number, I unsigned\nl -> 8 byte signed little endian number, L unsigned",
"h -> 32 byte hash\n"]
for oracles_type in oracles_data_types:
print(str(oracles_type))
while True:
try:
name = input("Set your oracle name: ")
description = input("Set your oracle description: ")
oracle_data_type = input("Set your oracle type (e.g. Ihh): ")
except KeyboardInterrupt:
break
else:
oracle_hex = rpclib.oracles_create(rpc_connection, name, description, oracle_data_type)
if oracle_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(oracle_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
oracle_txid = rpclib.sendrawtransaction(rpc_connection, oracle_hex['hex'])
except KeyError:
print(oracle_txid)
print("Error")
input("Press [Enter] to continue...")
break
finally:
print(colorize("Oracle creation transaction broadcasted: " + oracle_txid, "green"))
file = open("oracles_list", "a")
file.writelines(oracle_txid + "\n")
file.close()
print(colorize("Entry added to oracles_list file!\n", "green"))
input("Press [Enter] to continue...")
break
def oracle_fund_tui(rpc_connection):
try:
print(colorize("Oracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
while True:
try:
oracle_id = input("Input txid of oracle you want to register to: ")
except KeyboardInterrupt:
break
oracle_fund_hex = rpclib.oracles_fund(rpc_connection, oracle_id)
if oracle_fund_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(oracle_fund_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
oracle_fund_txid = rpclib.sendrawtransaction(rpc_connection, oracle_fund_hex['hex'])
except KeyError:
print(oracle_fund_hex)
print("Error")
input("Press [Enter] to continue...")
break
else:
print(colorize("Oracle fund transaction broadcasted: " + oracle_fund_txid, "green"))
input("Press [Enter] to continue...")
break
def oracle_register_tui(rpc_connection):
#TODO: have an idea since blackjoker new RPC call
#grab all list and printout only or which owner match with node pubkey
try:
print(colorize("Oracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
while True:
try:
oracle_id = input("Input txid of oracle you want to register to: ")
data_fee = input("Set publisher datafee (in satoshis): ")
except KeyboardInterrupt:
break
oracle_register_hex = rpclib.oracles_register(rpc_connection, oracle_id, data_fee)
if oracle_register_hex['result'] == "error":
print(colorize("\nSomething went wrong!\n", "pink"))
print(oracle_register_hex)
print("\n")
input("Press [Enter] to continue...")
break
else:
try:
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex['hex'])
except KeyError:
print(oracle_register_hex)
print("Error")
input("Press [Enter] to continue...")
break
else:
print(colorize("Oracle registration transaction broadcasted: " + oracle_register_txid, "green"))
input("Press [Enter] to continue...")
break
def oracle_subscription_utxogen(rpc_connection):
# TODO: have an idea since blackjoker new RPC call
# grab all list and printout only or which owner match with node pubkey
try:
print(colorize("Oracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
while True:
try:
oracle_id = input("Input oracle ID you want to subscribe to: ")
#printout to fast copypaste publisher id
oracle_info = rpclib.oracles_info(rpc_connection, oracle_id)
publishers = 0
print(colorize("\nPublishers registered for a selected oracle: \n", "blue"))
try:
for entry in oracle_info["registered"]:
publisher = entry["publisher"]
print(publisher + "\n")
publishers = publishers + 1
print("Total publishers:{}".format(publishers))
except (KeyError, ConnectionResetError):
print(colorize("Please re-check your input. Oracle txid seems not valid.", "red"))
pass
print(colorize('_' * 65, "blue"))
print("\n")
if publishers == 0:
print(colorize("This oracle have no publishers to subscribe.\n"
"Please register as an oracle publisher first and/or wait since registration transaciton mined!", "red"))
input("Press [Enter] to continue...")
break
publisher_id = input("Input oracle publisher id you want to subscribe to: ")
data_fee = input("Input subscription fee (in COINS!): ")
utxo_num = int(input("Input how many transactions you want to broadcast: "))
except KeyboardInterrupt:
break
while utxo_num > 0:
while True:
oracle_subscription_hex = rpclib.oracles_subscribe(rpc_connection, oracle_id, publisher_id, data_fee)
oracle_subscription_txid = rpclib.sendrawtransaction(rpc_connection, oracle_subscription_hex['hex'])
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscription_txid in mempool:
break
else:
pass
print(colorize("Oracle subscription transaction broadcasted: " + oracle_subscription_txid, "green"))
utxo_num = utxo_num - 1
input("Press [Enter] to continue...")
break
def gateways_bind_tui(rpc_connection):
# main loop with keyboard interrupt handling
while True:
try:
while True:
try:
print(colorize("Tokens created from this instance by TUI: \n", "blue"))
with open("tokens_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
token_id = input("Input id of token you want to use in gw bind: ")
try:
token_name = rpclib.token_info(rpc_connection, token_id)["name"]
except KeyError:
print(colorize("Not valid tokenid. Please try again.", "red"))
input("Press [Enter] to continue...")
token_info = rpclib.token_info(rpc_connection, token_id)
print(colorize("\n{} token total supply: {}\n".format(token_id, token_info["supply"]), "blue"))
token_supply = input("Input supply for token binding: ")
try:
print(colorize("\nOracles created from this instance by TUI: \n", "blue"))
with open("oracles_list", "r") as file:
for oracle in file:
print(oracle)
print(colorize('_' * 65, "blue"))
print("\n")
except FileNotFoundError:
print("Seems like a no oracles created from this instance yet\n")
pass
oracle_id = input("Input id of oracle you want to use in gw bind: ")
try:
oracle_name = rpclib.oracles_info(rpc_connection, oracle_id)["name"]
except KeyError:
print(colorize("Not valid oracleid. Please try again.", "red"))
input("Press [Enter] to continue...")
while True:
coin_name = input("Input external coin ticker (binded oracle and token need to have same name!): ")
if token_name == oracle_name and token_name == coin_name:
break
else:
print(colorize("Token name, oracle name and external coin ticker should match!", "red"))
while True:
M = input("Input minimal amount of pubkeys needed for transaction confirmation (1 for non-multisig gw): ")
N = input("Input maximal amount of pubkeys needed for transaction confirmation (1 for non-multisig gw): ")
if (int(N) >= int(M)):
break
else:
print("Maximal amount of pubkeys should be more or equal than minimal. Please try again.")
pubkeys = []
for i in range(int(N)):
pubkeys.append(input("Input pubkey {}: ".format(i+1)))
pubtype = input("Input pubtype of external coin: ")
p2shtype = input("Input p2shtype of external coin: ")
wiftype = input("Input wiftype of external coin: ")
args = [rpc_connection, token_id, oracle_id, coin_name, token_supply, M, N]
new_args = [str(pubtype), str(p2shtype), wiftype]
args = args + pubkeys + new_args
# broadcasting block
try:
gateways_bind_hex = rpclib.gateways_bind(*args)
except Exception as e:
print(e)
input("Press [Enter] to continue...")
break
try:
gateways_bind_txid = rpclib.sendrawtransaction(rpc_connection, gateways_bind_hex["hex"])
except Exception as e:
print(e)
print(gateways_bind_hex)
input("Press [Enter] to continue...")
break
else:
print(colorize("Gateway bind transaction broadcasted: " + gateways_bind_txid, "green"))
file = open("gateways_list", "a")
file.writelines(gateways_bind_txid + "\n")
file.close()
print(colorize("Entry added to gateways_list file!\n", "green"))
input("Press [Enter] to continue...")
break
break
except KeyboardInterrupt:
break
# temporary :trollface: custom connection function solution
# to have connection to KMD daemon and cache it in separate file
def rpc_kmd_connection_tui():
while True:
restore_choice = input("Do you want to use KMD daemon connection details from previous session? [y/n]: ")
if restore_choice == "y":
try:
with open("connection_kmd.json", "r") as file:
connection_json = json.load(file)
rpc_user = connection_json["rpc_user"]
rpc_password = connection_json["rpc_password"]
rpc_port = connection_json["rpc_port"]
rpc_connection_kmd = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
try:
print(rpc_connection_kmd.getinfo())
print(colorize("Successfully connected!\n", "green"))
input("Press [Enter] to continue...")
break
except Exception as e:
print(e)
print(colorize("NOT CONNECTED!\n", "red"))
input("Press [Enter] to continue...")
break
except FileNotFoundError:
print(colorize("You do not have cached KMD daemon connection details."
" Please select n for connection setup", "red"))
input("Press [Enter] to continue...")
elif restore_choice == "n":
rpc_user = input("Input your rpc user: ")
rpc_password = input("Input your rpc password: ")
rpc_port = input("Input your rpc port: ")
connection_details = {"rpc_user": rpc_user,
"rpc_password": rpc_password,
"rpc_port": rpc_port}
connection_json = json.dumps(connection_details)
with open("connection_kmd.json", "w+") as file:
file.write(connection_json)
rpc_connection_kmd = rpclib.rpc_connect(rpc_user, rpc_password, int(rpc_port))
try:
print(rpc_connection_kmd.getinfo())
print(colorize("Successfully connected!\n", "green"))
input("Press [Enter] to continue...")
break
except Exception as e:
print(e)
print(colorize("NOT CONNECTED!\n", "red"))
input("Press [Enter] to continue...")
break
else:
print(colorize("Please input y or n", "red"))
return rpc_connection_kmd
def z_sendmany_twoaddresses(rpc_connection, sendaddress, recepient1, amount1, recepient2, amount2):
str_sending_block = "[{{\"address\":\"{}\",\"amount\":{}}},{{\"address\":\"{}\",\"amount\":{}}}]".format(recepient1, amount1, recepient2, amount2)
sending_block = json.loads(str_sending_block)
operation_id = rpc_connection.z_sendmany(sendaddress,sending_block)
return operation_id
def operationstatus_to_txid(rpc_connection, zstatus):
str_sending_block = "[\"{}\"]".format(zstatus)
sending_block = json.loads(str_sending_block)
operation_json = rpc_connection.z_getoperationstatus(sending_block)
operation_dump = json.dumps(operation_json)
operation_dict = json.loads(operation_dump)[0]
txid = operation_dict['result']['txid']
return txid
def gateways_send_kmd(rpc_connection):
# TODO: have to handle CTRL+C on text input
print(colorize("Please be carefull when input wallet addresses and amounts since all transactions doing in real KMD!", "pink"))
print("Your addresses with balances: ")
list_address_groupings = rpc_connection.listaddressgroupings()
for address in list_address_groupings:
print(str(address) + "\n")
sendaddress = input("Input address from which you transfer KMD: ")
recepient1 = input("Input address which belongs to pubkey which will receive tokens: ")
amount1 = 0.0001
recepient2 = input("Input gateway deposit address: ")
file = open("deposits_list", "a")
#have to show here deposit addresses for gateways created by user
amount2 = input("Input how many KMD you want to deposit on this gateway: ")
operation = z_sendmany_twoaddresses(rpc_connection, sendaddress, recepient1, amount1, recepient2, amount2)
print("Operation proceed! " + str(operation) + " Let's wait 2 seconds to get txid")
# trying to avoid pending status of operation
time.sleep(2)
txid = operationstatus_to_txid(rpc_connection, operation)
file.writelines(txid + "\n")
file.close()
print(colorize("KMD Transaction ID: " + str(txid) + " Entry added to deposits_list file", "green"))
input("Press [Enter] to continue...")
def gateways_deposit_tui(rpc_connection_assetchain, rpc_connection_komodo):
while True:
bind_txid = input("Input your gateway bind txid: ")
coin_name = input("Input your external coin ticker (e.g. KMD): ")
coin_txid = input("Input your deposit txid: ")
dest_pub = input("Input pubkey which claim deposit: ")
amount = input("Input amount of your deposit: ")
height = rpc_connection_komodo.getrawtransaction(coin_txid, 1)["height"]
deposit_hex = rpc_connection_komodo.getrawtransaction(coin_txid, 1)["hex"]
claim_vout = "0"
proof_sending_block = "[\"{}\"]".format(coin_txid)
proof = rpc_connection_komodo.gettxoutproof(json.loads(proof_sending_block))
deposit_hex = rpclib.gateways_deposit(rpc_connection_assetchain, bind_txid, str(height), coin_name, \
coin_txid, claim_vout, deposit_hex, proof, dest_pub, amount)
print(deposit_hex)
deposit_txid = rpclib.sendrawtransaction(rpc_connection_assetchain, deposit_hex["hex"])
print("Done! Gateways deposit txid is: " + deposit_txid + " Please not forget to claim your deposit!")
input("Press [Enter] to continue...")
break
def gateways_claim_tui(rpc_connection):
while True:
bind_txid = input("Input your gateway bind txid: ")
coin_name = input("Input your external coin ticker (e.g. KMD): ")
deposit_txid = input("Input your gatewaysdeposit txid: ")
dest_pub = input("Input pubkey which claim deposit: ")
amount = input("Input amount of your deposit: ")
claim_hex = rpclib.gateways_claim(rpc_connection, bind_txid, coin_name, deposit_txid, dest_pub, amount)
try:
claim_txid = rpclib.sendrawtransaction(rpc_connection, claim_hex["hex"])
except Exception as e:
print(e)
print(claim_hex)
input("Press [Enter] to continue...")
break
else:
print("Succesfully claimed! Claim transaction id: " + claim_txid)
input("Press [Enter] to continue...")
break
def gateways_withdrawal_tui(rpc_connection):
while True:
bind_txid = input("Input your gateway bind txid: ")
coin_name = input("Input your external coin ticker (e.g. KMD): ")
withdraw_pub = input("Input pubkey to which you want to withdraw: ")
amount = input("Input amount of withdrawal: ")
withdraw_hex = rpclib.gateways_withdraw(rpc_connection, bind_txid, coin_name, withdraw_pub, amount)
withdraw_txid = rpclib.sendrawtransaction(rpc_connection, withdraw_hex["hex"])
print(withdraw_txid)
input("Press [Enter] to continue...")
break
def print_mempool(rpc_connection):
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
tx_counter = 0
print(colorize("Transactions in mempool: \n", "magenta"))
for transaction in mempool:
print(transaction + "\n")
tx_counter = tx_counter + 1
print("Total: " + str(tx_counter) + " transactions\n")
print("R + Enter to refresh list. E + Enter to exit menu." + "\n")
is_refresh = input("Choose your destiny: ")
if is_refresh == "R":
print("\n")
pass
elif is_refresh == "E":
print("\n")
break
else:
print("\nPlease choose R or E\n")
def print_tokens_list(rpc_connection):
# TODO: have to print it with tokeninfo to have sense
pass
def print_tokens_balances(rpc_connection):
# TODO: checking tokenbalance for each token from tokenlist and reflect non zero ones
pass
def hexdump(filename, chunk_size=1<<15):
data = ""
#add_spaces = partial(re.compile(b'(..)').sub, br'\1 ')
#write = getattr(sys.stdout, 'buffer', sys.stdout).write
with open(filename, 'rb') as file:
for chunk in iter(partial(file.read, chunk_size), b''):
data += str(hexlify(chunk).decode())
return data
def convert_file_oracle_d(rpc_connection):
while True:
path = input("Input path to file you want to upload to oracle: ")
try:
hex_data = (hexdump(path, 1))[2:]
except Exception as e:
print(e)
print("Seems something goes wrong (I guess you've specified wrong path)!")
input("Press [Enter] to continue...")
break
else:
length = round(len(hex_data) / 2)
if length > 256:
print("Length: " + str(length) + " bytes")
print("File is too big for this app")
input("Press [Enter] to continue...")
break
else:
hex_length = format(length, '#04x')[2:]
data_for_oracle = str(hex_length) + hex_data
print("File hex representation: \n")
print(data_for_oracle + "\n")
print("Length: " + str(length) + " bytes")
print("File converted!")
new_oracle_hex = rpclib.oracles_create(rpc_connection, "tonyconvert", path, "d")
new_oracle_txid = rpclib.sendrawtransaction(rpc_connection, new_oracle_hex["hex"])
time.sleep(0.5)
oracle_register_hex = rpclib.oracles_register(rpc_connection, new_oracle_txid, "10000")
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex["hex"])
time.sleep(0.5)
oracle_subscribe_hex = rpclib.oracles_subscribe(rpc_connection, new_oracle_txid, rpclib.getinfo(rpc_connection)["pubkey"], "0.001")
oracle_subscribe_txid = rpclib.sendrawtransaction(rpc_connection, oracle_subscribe_hex["hex"])
time.sleep(0.5)
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscribe_txid in mempool:
print("Waiting for oracle subscribtion tx to be mined" + "\n")
time.sleep(6)
pass
else:
break
oracles_data_hex = rpclib.oracles_data(rpc_connection, new_oracle_txid, data_for_oracle)
try:
oracle_data_txid = rpclib.sendrawtransaction(rpc_connection, oracles_data_hex["hex"])
except Exception as e:
print(oracles_data_hex)
print(e)
print("Oracle created: " + str(new_oracle_txid))
print("Data published: " + str(oracle_data_txid))
input("Press [Enter] to continue...")
break
def convert_file_oracle_D(rpc_connection):
while True:
path = input("Input path to file you want to upload to oracle: ")
try:
hex_data = (hexdump(path, 1))
except Exception as e:
print(e)
print("Seems something goes wrong (I guess you've specified wrong path)!")
input("Press [Enter] to continue...")
break
else:
length = round(len(hex_data) / 2)
# if length > 800000:
# print("Too big file size to upload for this version of program. Maximum size is 800KB.")
# input("Press [Enter] to continue...")
# break
if length > 8000:
# if file is more than 8000 bytes - slicing it to <= 8000 bytes chunks (16000 symbols = 8000 bytes)
data = [hex_data[i:i + 16000] for i in range(0, len(hex_data), 16000)]
chunks_amount = len(data)
# TODO: have to create oracle but subscribe this time chunks amount times to send whole file in same block
# TODO: 2 - on some point file will not fit block - have to find this point
# TODO: 3 way how I want to implement it first will keep whole file in RAM - have to implement some way to stream chunks to oracle before whole file readed
# TODO: have to "optimise" registration fee
# Maybe just check size first by something like a du ?
print("Length: " + str(length) + " bytes.\n Chunks amount: " + str(chunks_amount))
new_oracle_hex = rpclib.oracles_create(rpc_connection, "tonyconvert_" + str(chunks_amount), path, "D")
new_oracle_txid = rpclib.sendrawtransaction(rpc_connection, new_oracle_hex["hex"])
time.sleep(0.5)
oracle_register_hex = rpclib.oracles_register(rpc_connection, new_oracle_txid, "10000")
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex["hex"])
# subscribe chunks_amount + 1 times, but lets limit our broadcasting 100 tx per block (800KB/block)
if chunks_amount > 100:
utxo_num = 101
else:
utxo_num = chunks_amount
while utxo_num > 0:
while True:
oracle_subscription_hex = rpclib.oracles_subscribe(rpc_connection, new_oracle_txid, rpclib.getinfo(rpc_connection)["pubkey"], "0.001")
oracle_subscription_txid = rpclib.sendrawtransaction(rpc_connection,
oracle_subscription_hex['hex'])
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscription_txid in mempool:
break
else:
pass
print(colorize("Oracle subscription transaction broadcasted: " + oracle_subscription_txid, "green"))
utxo_num = utxo_num - 1
# waiting for last broadcasted subscribtion transaction to be mined to be sure that money are on oracle balance
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscription_txid in mempool:
print("Waiting for oracle subscribtion tx to be mined" + "\n")
time.sleep(6)
pass
else:
break
print("Oracle preparation is finished. Oracle txid: " + new_oracle_txid)
# can publish data now
counter = 0
for chunk in data:
hex_length_bigendian = format(round(len(chunk) / 2), '#06x')[2:]
# swap to get little endian length
a = hex_length_bigendian[2:]
b = hex_length_bigendian[:2]
hex_length = a + b
data_for_oracle = str(hex_length) + chunk
counter = counter + 1
# print("Chunk number: " + str(counter) + "\n")
# print(data_for_oracle)
try:
oracles_data_hex = rpclib.oracles_data(rpc_connection, new_oracle_txid, data_for_oracle)
except Exception as e:
print(data_for_oracle)
print(e)
input("Press [Enter] to continue...")
break
# on broadcasting ensuring that previous one reached mempool before blast next one
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
oracle_data_txid = rpclib.sendrawtransaction(rpc_connection, oracles_data_hex["hex"])
#time.sleep(0.1)
if oracle_data_txid in mempool:
break
else:
pass
# blasting not more than 100 at once (so maximum capacity per block can be changed here)
# but keep in mind that registration UTXOs amount needs to be changed too !
if counter % 100 == 0 and chunks_amount > 100:
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_data_txid in mempool:
print("Waiting for previous data chunks to be mined before send new ones" + "\n")
print("Sent " + str(counter) + " chunks from " + str(chunks_amount))
time.sleep(6)
pass
else:
break
print("Last baton: " + oracle_data_txid)
input("Press [Enter] to continue...")
break
# if file suits single oraclesdata just broadcasting it straight without any slicing
else:
hex_length_bigendian = format(length, '#06x')[2:]
# swap to get little endian length
a = hex_length_bigendian[2:]
b = hex_length_bigendian[:2]
hex_length = a + b
data_for_oracle = str(hex_length) + hex_data
print("File hex representation: \n")
print(data_for_oracle + "\n")
print("Length: " + str(length) + " bytes")
print("File converted!")
new_oracle_hex = rpclib.oracles_create(rpc_connection, "tonyconvert_" + "1", path, "D")
new_oracle_txid = rpclib.sendrawtransaction(rpc_connection, new_oracle_hex["hex"])
time.sleep(0.5)
oracle_register_hex = rpclib.oracles_register(rpc_connection, new_oracle_txid, "10000")
oracle_register_txid = rpclib.sendrawtransaction(rpc_connection, oracle_register_hex["hex"])
time.sleep(0.5)
oracle_subscribe_hex = rpclib.oracles_subscribe(rpc_connection, new_oracle_txid, rpclib.getinfo(rpc_connection)["pubkey"], "0.001")
oracle_subscribe_txid = rpclib.sendrawtransaction(rpc_connection, oracle_subscribe_hex["hex"])
time.sleep(0.5)
while True:
mempool = rpclib.get_rawmempool(rpc_connection)
if oracle_subscribe_txid in mempool:
print("Waiting for oracle subscribtion tx to be mined" + "\n")
time.sleep(6)
pass
else:
break
oracles_data_hex = rpclib.oracles_data(rpc_connection, new_oracle_txid, data_for_oracle)
try:
oracle_data_txid = rpclib.sendrawtransaction(rpc_connection, oracles_data_hex["hex"])
except Exception as e:
print(oracles_data_hex)
print(e)
input("Press [Enter] to continue...")
break
else:
print("Oracle created: " + str(new_oracle_txid))
print("Data published: " + str(oracle_data_txid))
input("Press [Enter] to continue...")
break
def get_files_list(rpc_connection):
start_time = time.time()
oracles_list = rpclib.oracles_list(rpc_connection)
files_list = []
for oracle_txid in oracles_list:
oraclesinfo_result = rpclib.oracles_info(rpc_connection, oracle_txid)
description = oraclesinfo_result['description']
name = oraclesinfo_result['name']
if name[0:12] == 'tonyconvert_':
new_file = '[' + name + ': ' + description + ']: ' + oracle_txid
files_list.append(new_file)
print("--- %s seconds ---" % (time.time() - start_time))
return files_list
def display_files_list(rpc_connection):
print("Scanning oracles. Please wait...")
list_to_display = get_files_list(rpc_connection)
while True:
for file in list_to_display:
print(file + "\n")
input("Press [Enter] to continue...")
break
def files_downloader(rpc_connection):
while True:
display_files_list(rpc_connection)
print("\n")
oracle_id = input("Input oracle ID you want to download file from: ")
output_path = input("Input output path for downloaded file (name included) e.g. /home/test.txt: ")
oracle_info = rpclib.oracles_info(rpc_connection, oracle_id)
name = oracle_info['name']
latest_baton_txid = oracle_info['registered'][0]['batontxid']
if name[0:12] == 'tonyconvert_':
# downloading process here
chunks_amount = int(name[12:])
data = rpclib.oracles_samples(rpc_connection, oracle_id, latest_baton_txid, str(chunks_amount))["samples"]
for chunk in reversed(data):
with open(output_path, 'ab+') as file:
file.write(unhexlify(chunk[0]))
print("I hope that file saved to " + output_path + "\n")
input("Press [Enter] to continue...")
break
else:
print("I cant recognize file inside this oracle. I'm very sorry, boss.")
input("Press [Enter] to continue...")
break
def marmara_receive_tui(rpc_connection):
while True:
issuer_pubkey = input("Input pubkey of person who do you want to receive MARMARA from: ")
issuance_sum = input("Input amount of MARMARA you want to receive: ")
blocks_valid = input("Input amount of blocks for cheque matures: ")
try:
marmara_receive_txinfo = rpc_connection.marmarareceive(issuer_pubkey, issuance_sum, "MARMARA", blocks_valid)
marmara_receive_txid = rpc_connection.sendrawtransaction(marmara_receive_txinfo["hex"])
print("Marmara receive txid broadcasted: " + marmara_receive_txid + "\n")
print(json.dumps(marmara_receive_txinfo, indent=4, sort_keys=True) + "\n")
with open("receive_txids.txt", 'a+') as file:
file.write(marmara_receive_txid + "\n")
file.write(json.dumps(marmara_receive_txinfo, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to receive_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_receive_txinfo)
print(e)
print("Something went wrong. Please check your input")
def marmara_issue_tui(rpc_connection):
while True:
receiver_pubkey = input("Input pubkey of person who do you want to issue MARMARA: ")
issuance_sum = input("Input amount of MARMARA you want to issue: ")
maturing_block = input("Input number of block on which issuance mature: ")
approval_txid = input("Input receiving request transaction id: ")
try:
marmara_issue_txinfo = rpc_connection.marmaraissue(receiver_pubkey, issuance_sum, "MARMARA", maturing_block, approval_txid)
marmara_issue_txid = rpc_connection.sendrawtransaction(marmara_issue_txinfo["hex"])
print("Marmara issuance txid broadcasted: " + marmara_issue_txid + "\n")
print(json.dumps(marmara_issue_txinfo, indent=4, sort_keys=True) + "\n")
with open("issue_txids.txt", "a+") as file:
file.write(marmara_issue_txid + "\n")
file.write(json.dumps(marmara_issue_txinfo, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to issue_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_issue_txinfo)
print(e)
print("Something went wrong. Please check your input")
def marmara_creditloop_tui(rpc_connection):
while True:
loop_txid = input("Input transaction ID of credit loop you want to get info about: ")
try:
marmara_creditloop_info = rpc_connection.marmaracreditloop(loop_txid)
print(json.dumps(marmara_creditloop_info, indent=4, sort_keys=True) + "\n")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_creditloop_info)
print(e)
print("Something went wrong. Please check your input")
def marmara_settlement_tui(rpc_connection):
while True:
loop_txid = input("Input transaction ID of credit loop to make settlement: ")
try:
marmara_settlement_info = rpc_connection.marmarasettlement(loop_txid)
marmara_settlement_txid = rpc_connection.sendrawtransaction(marmara_settlement_info["hex"])
print("Loop " + loop_txid + " succesfully settled!\nSettlement txid: " + marmara_settlement_txid)
with open("settlement_txids.txt", "a+") as file:
file.write(marmara_settlement_txid + "\n")
file.write(json.dumps(marmara_settlement_info, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to settlement_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_settlement_info)
print(e)
print("Something went wrong. Please check your input")
input("Press [Enter] to continue...")
break
def marmara_lock_tui(rpc_connection):
while True:
amount = input("Input amount of coins you want to lock for settlement and staking: ")
unlock_height = input("Input height on which coins should be unlocked: ")
try:
marmara_lock_info = rpc_connection.marmaralock(amount, unlock_height)
marmara_lock_txid = rpc_connection.sendrawtransaction(marmara_lock_info["hex"])
with open("lock_txids.txt", "a+") as file:
file.write(marmara_lock_txid + "\n")
file.write(json.dumps(marmara_lock_info, indent=4, sort_keys=True) + "\n")
print("Transaction id is saved to lock_txids.txt file.")
input("Press [Enter] to continue...")
break
except Exception as e:
print(e)
print("Something went wrong. Please check your input")
input("Press [Enter] to continue...")
break
def marmara_info_tui(rpc_connection):
while True:
firstheight = input("Input first height (default 0): ")
if not firstheight:
firstheight = "0"
lastheight = input("Input last height (default current (0) ): ")
if not lastheight:
lastheight = "0"
minamount = input("Input min amount (default 0): ")
if not minamount:
minamount = "0"
maxamount = input("Input max amount (default 0): ")
if not maxamount:
maxamount = "0"
issuerpk = input("Optional. Input issuer public key: ")
try:
if issuerpk:
marmara_info = rpc_connection.marmarainfo(firstheight, lastheight, minamount, maxamount, "MARMARA", issuerpk)
else:
marmara_info = rpc_connection.marmarainfo(firstheight, lastheight, minamount, maxamount)
print(json.dumps(marmara_info, indent=4, sort_keys=True) + "\n")
input("Press [Enter] to continue...")
break
except Exception as e:
print(marmara_info)
print(e)
print("Something went wrong. Please check your input")
input("Press [Enter] to continue...")
break
def rogue_game_info(rpc_connection, game_txid):
game_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
game_info = rpc_connection.cclib("gameinfo", "17", game_info_arg)
return game_info
def rogue_game_register(rpc_connection, game_txid, player_txid = False):
if player_txid:
registration_info_arg = '"' + "[%22" + game_txid + "%22,%22" + player_txid + "%22]" + '"'
else:
registration_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
registration_info = rpc_connection.cclib("register", "17", registration_info_arg)
return registration_info
def rogue_pending(rpc_connection):
rogue_pending_list = rpc_connection.cclib("pending", "17")
return rogue_pending_list
def rogue_bailout(rpc_connection, game_txid):
bailout_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
bailout_info = rpc_connection.cclib("bailout", "17", bailout_info_arg)
return bailout_info
def rogue_highlander(rpc_connection, game_txid):
highlander_info_arg = '"' + "[%22" + game_txid + "%22]" + '"'
highlander_info = rpc_connection.cclib("highlander", "17", highlander_info_arg)
return highlander_info
def rogue_players_list(rpc_connection):
rogue_players_list = rpc_connection.cclib("players", "17")
return rogue_players_list
def rogue_player_info(rpc_connection, playertxid):
player_info_arg = '"' + "[%22" + playertxid + "%22]" + '"'
player_info = rpc_connection.cclib("playerinfo", "17", player_info_arg)
return player_info
def rogue_extract(rpc_connection, game_txid, pubkey):
extract_info_arg = '"' + "[%22" + game_txid + "%22,%22" + pubkey + "%22]" + '"'
extract_info = rpc_connection.cclib("extract", "17", extract_info_arg)
return extract_info
def rogue_keystrokes(rpc_connection, game_txid, keystroke):
rogue_keystrokes_arg = '"' + "[%22" + game_txid + "%22,%22" + keystroke + "%22]" + '"'
keystroke_info = rpc_connection.cclib("keystrokes", "17", rogue_keystrokes_arg)
return keystroke_info
def print_multiplayer_games_list(rpc_connection):
while True:
pending_list = rogue_pending(rpc_connection)
multiplayer_pending_list = []
for game in pending_list["pending"]:
if rogue_game_info(rpc_connection, game)["maxplayers"] > 1:
multiplayer_pending_list.append(game)
print("Multiplayer games availiable to join: \n")
for active_multiplayer_game in multiplayer_pending_list:
game_info = rogue_game_info(rpc_connection, active_multiplayer_game)
print(colorize("\n================================\n", "green"))
print("Game txid: " + game_info["gametxid"])
print("Game buyin: " + str(game_info["buyin"]))
print("Game height: " + str(game_info["gameheight"]))
print("Start height: " + str(game_info["start"]))
print("Alive players: " + str(game_info["alive"]))
print("Registered players: " + str(game_info["numplayers"]))
print("Max players: " + str(game_info["maxplayers"]))
print(colorize("\n***\n", "blue"))
print("Players in game:")
for player in game_info["players"]:
print("Slot: " + str(player["slot"]))
if "baton" in player.keys():
print("Baton: " + str(player["baton"]))
if "tokenid" in player.keys():
print("Tokenid: " + str(player["tokenid"]))
print("Is mine?: " + str(player["ismine"]))
print(colorize("\nR + Enter - refresh list.\nE + Enter - to the game choice.\nCTRL + C - back to main menu", "blue"))
is_refresh = input("Choose your destiny: ")
if is_refresh == "R":
print("\n")
pass
elif is_refresh == "E":
print("\n")
break
else:
print("\nPlease choose R or E\n")
def rogue_newgame_singleplayer(rpc_connection, is_game_a_rogue=True):
try:
new_game_txid = rpc_connection.cclib("newgame", "17", "[1]")["txid"]
print("New singleplayer training game succesfully created. txid: " + new_game_txid)
while True:
mempool = rpc_connection.getrawmempool()
if new_game_txid in mempool:
print(colorize("Waiting for game transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Game transaction is mined", "green"))
break
players_list = rogue_players_list(rpc_connection)
if len(players_list["playerdata"]) > 0:
print_players_list(rpc_connection)
while True:
is_choice_needed = input("Do you want to choose a player for this game? [y/n] ")
if is_choice_needed == "y":
player_txid = input("Please input player txid: ")
newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid, player_txid)["txid"]
break
elif is_choice_needed == "n":
set_warriors_name(rpc_connection)
newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid)["txid"]
break
else:
print("Please choose y or n !")
else:
print("No players available to select")
input("Press [Enter] to continue...")
newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid)["txid"]
while True:
mempool = rpc_connection.getrawmempool()
if newgame_regisration_txid in mempool:
print(colorize("Waiting for registration transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Registration transaction is mined", "green"))
break
game_info = rogue_game_info(rpc_connection, new_game_txid)
start_time = time.time()
while True:
if is_game_a_rogue:
subprocess.call(["cc/rogue/rogue", str(game_info["seed"]), str(game_info["gametxid"])])
else:
subprocess.call(["cc/games/tetris", str(game_info["seed"]), str(game_info["gametxid"])])
time_elapsed = time.time() - start_time
if time_elapsed > 1:
break
else:
print("Game less than 1 second. Trying to start again")
time.sleep(1)
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout", "blue"))
time.sleep(5)
else:
break
#print("\nKeystrokes of this game:\n")
#time.sleep(0.5)
while True:
keystrokes_rpc_responses = find_game_keystrokes_in_log(new_game_txid)[1::2]
if len(keystrokes_rpc_responses) < 1:
print("No keystrokes broadcasted yet. Let's wait 5 seconds")
time.sleep(5)
else:
break
#print(keystrokes_rpc_responses)
for keystroke in keystrokes_rpc_responses:
json_keystroke = json.loads(keystroke)["result"]
if "status" in json_keystroke.keys() and json_keystroke["status"] == "error":
while True:
print("Trying to re-brodcast keystroke")
keystroke_rebroadcast = rogue_keystrokes(rpc_connection, json_keystroke["gametxid"], json_keystroke["keystrokes"])
if "txid" in keystroke_rebroadcast.keys():
print("Keystroke broadcasted! txid: " + keystroke_rebroadcast["txid"])
break
else:
print("Let's try again in 5 seconds")
time.sleep(5)
# waiting for last keystroke confirmation here
last_keystroke_json = json.loads(keystrokes_rpc_responses[-1])
while True:
while True:
try:
rpc_connection.sendrawtransaction(last_keystroke_json["result"]["hex"])
except Exception as e:
pass
try:
confirmations_amount = rpc_connection.getrawtransaction(last_keystroke_json["result"]["txid"], 1)["confirmations"]
break
except Exception as e:
print(e)
print("Let's wait a little bit more")
time.sleep(5)
pass
if confirmations_amount < 2:
print("Last keystroke not confirmed yet! Let's wait a little")
time.sleep(10)
else:
print("Last keystroke confirmed!")
break
while True:
print("\nExtraction info:\n")
extraction_info = rogue_extract(rpc_connection, new_game_txid, rpc_connection.getinfo()["pubkey"])
if extraction_info["status"] == "error":
print(colorize("Your warrior died or no any information about game was saved on blockchain", "red"))
print("If warrior was alive - try to wait a little (choose n to wait for a next block). If he is dead - you can bailout now (choose y).")
else:
print("Current game state:")
print("Game txid: " + extraction_info["gametxid"])
print("Information about game saved on chain: " + extraction_info["extracted"])
print("\n")
is_bailout_needed = input("Do you want to make bailout now [y] or wait for one more block [n]? [y/n]: ")
if is_bailout_needed == "y":
bailout_info = rogue_bailout(rpc_connection, new_game_txid)
while True:
try:
confirmations_amount = rpc_connection.getrawtransaction(bailout_info["txid"], 1)["confirmations"]
break
except Exception as e:
print(e)
print("Bailout not on blockchain yet. Let's wait a little bit more")
time.sleep(20)
pass
break
elif is_bailout_needed == "n":
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout", "blue"))
time.sleep(5)
else:
break
else:
print("Please choose y or n !")
print(bailout_info)
print("\nGame is finished!\n")
bailout_txid = bailout_info["txid"]
input("Press [Enter] to continue...")
except Exception as e:
print("Something went wrong.")
print(e)
input("Press [Enter] to continue...")
def play_multiplayer_game(rpc_connection):
# printing list of user active multiplayer games
active_games_list = rpc_connection.cclib("games", "17")["games"]
active_multiplayer_games_list = []
for game in active_games_list:
gameinfo = rogue_game_info(rpc_connection, game)
if gameinfo["maxplayers"] > 1:
active_multiplayer_games_list.append(gameinfo)
games_counter = 0
for active_multiplayer_game in active_multiplayer_games_list:
games_counter = games_counter + 1
is_ready_to_start = False
try:
active_multiplayer_game["seed"]
is_ready_to_start = True
except Exception as e:
pass
print(colorize("\n================================\n", "green"))
print("Game txid: " + active_multiplayer_game["gametxid"])
print("Game buyin: " + str(active_multiplayer_game["buyin"]))
if is_ready_to_start:
print(colorize("Ready for start!", "green"))
else:
print(colorize("Not ready for start yet, wait until start height!", "red"))
print("Game height: " + str(active_multiplayer_game["gameheight"]))
print("Start height: " + str(active_multiplayer_game["start"]))
print("Alive players: " + str(active_multiplayer_game["alive"]))
print("Registered players: " + str(active_multiplayer_game["numplayers"]))
print("Max players: " + str(active_multiplayer_game["maxplayers"]))
print(colorize("\n***\n", "blue"))
print("Players in game:")
for player in active_multiplayer_game["players"]:
print("Slot: " + str(player["slot"]))
print("Baton: " + str(player["baton"]))
print("Tokenid: " + str(player["tokenid"]))
print("Is mine?: " + str(player["ismine"]))
# asking user if he want to start any of them
while True:
start_game = input("\nDo you want to start any of your pendning multiplayer games?[y/n]: ")
if start_game == "y":
new_game_txid = input("Input txid of game which you want to start: ")
game_info = rogue_game_info(rpc_connection, new_game_txid)
try:
start_time = time.time()
while True:
subprocess.call(["cc/rogue/rogue", str(game_info["seed"]), str(game_info["gametxid"])])
time_elapsed = time.time() - start_time
if time_elapsed > 1:
break
else:
print("Game less than 1 second. Trying to start again")
time.sleep(1)
except Exception as e:
print("Maybe game isn't ready for start yet or your input was not correct, sorry.")
input("Press [Enter] to continue...")
break
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout or highlander", "blue"))
time.sleep(5)
else:
break
while True:
keystrokes_rpc_responses = find_game_keystrokes_in_log(new_game_txid)[1::2]
if len(keystrokes_rpc_responses) < 1:
print("No keystrokes broadcasted yet. Let's wait 5 seconds")
time.sleep(5)
else:
break
for keystroke in keystrokes_rpc_responses:
json_keystroke = json.loads(keystroke)["result"]
if "status" in json_keystroke.keys() and json_keystroke["status"] == "error":
while True:
print("Trying to re-brodcast keystroke")
keystroke_rebroadcast = rogue_keystrokes(rpc_connection, json_keystroke["gametxid"],
json_keystroke["keystrokes"])
if "txid" in keystroke_rebroadcast.keys():
print("Keystroke broadcasted! txid: " + keystroke_rebroadcast["txid"])
break
else:
print("Let's try again in 5 seconds")
time.sleep(5)
last_keystroke_json = json.loads(keystrokes_rpc_responses[-1])
while True:
while True:
try:
confirmations_amount = rpc_connection.getrawtransaction(last_keystroke_json["result"]["txid"], 1)["confirmations"]
break
except Exception as e:
print(e)
print("Let's wait a little bit more")
rpc_connection.sendrawtransaction(last_keystroke_json["result"]["hex"])
time.sleep(5)
pass
if confirmations_amount < 2:
print("Last keystroke not confirmed yet! Let's wait a little")
time.sleep(10)
else:
print("Last keystroke confirmed!")
break
while True:
print("\nExtraction info:\n")
extraction_info = rogue_extract(rpc_connection, new_game_txid, rpc_connection.getinfo()["pubkey"])
if extraction_info["status"] == "error":
print(colorize("Your warrior died or no any information about game was saved on blockchain", "red"))
print("If warrior was alive - try to wait a little (choose n to wait for a next block). If he is dead - you can bailout now (choose y).")
else:
print("Current game state:")
print("Game txid: " + extraction_info["gametxid"])
print("Information about game saved on chain: " + extraction_info["extracted"])
print("\n")
is_bailout_needed = input(
"Do you want to make bailout now [y] or wait for one more block [n]? [y/n]: ")
if is_bailout_needed == "y":
if game_info["alive"] > 1:
bailout_info = rogue_bailout(rpc_connection, new_game_txid)
try:
bailout_txid = bailout_info["txid"]
print(bailout_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
except Exception:
highlander_info = rogue_highlander(rpc_connection, new_game_txid)
highlander_info = highlander_info["txid"]
print(highlander_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
else:
highlander_info = rogue_highlander(rpc_connection, new_game_txid)
if 'error' in highlander_info.keys() and highlander_info["error"] == 'numplayers != maxplayers':
bailout_info = rogue_bailout(rpc_connection, new_game_txid)
print(bailout_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
else:
print(highlander_info)
print("\nGame is finished!\n")
input("Press [Enter] to continue...")
break
elif is_bailout_needed == "n":
game_end_height = int(rpc_connection.getinfo()["blocks"])
while True:
current_height = int(rpc_connection.getinfo()["blocks"])
height_difference = current_height - game_end_height
if height_difference == 0:
print(current_height)
print(game_end_height)
print(colorize("Waiting for next block before bailout", "blue"))
time.sleep(5)
else:
break
break
break
if start_game == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def rogue_newgame_multiplayer(rpc_connection):
while True:
max_players = input("Input game max. players (>1): ")
if int(max_players) > 1:
break
else:
print("Please re-check your input")
input("Press [Enter] to continue...")
while True:
buyin = input("Input game buyin (>0.001): ")
if float(buyin) > 0.001:
break
else:
print("Please re-check your input")
input("Press [Enter] to continue...")
try:
new_game_txid = rpc_connection.cclib("newgame", "17", '"[' + max_players + "," + buyin + ']"')["txid"]
print(colorize("New multiplayer game succesfully created. txid: " + new_game_txid, "green"))
input("Press [Enter] to continue...")
except Exception as e:
print("Something went wrong.")
print(e)
input("Press [Enter] to continue...")
def rogue_join_multiplayer_game(rpc_connection):
while True:
try:
print_multiplayer_games_list(rpc_connection)
# TODO: optional player data txid (print players you have and ask if you want to choose one)
game_txid = input("Input txid of game you want to join: ")
try:
while True:
print_players_list(rpc_connection)
is_choice_needed = input("Do you want to choose a player for this game? [y/n] ")
if is_choice_needed == "y":
player_txid = input("Please input player txid: ")
newgame_regisration_txid = rogue_game_register(rpc_connection, game_txid, player_txid)["txid"]
break
elif is_choice_needed == "n":
set_warriors_name(rpc_connection)
newgame_regisration_txid = rogue_game_register(rpc_connection, game_txid)["txid"]
break
else:
print("Please choose y or n !")
except Exception as e:
print("Something went wrong. Maybe you're trying to register on game twice or don't have enough funds to pay buyin.")
print(e)
input("Press [Enter] to continue...")
break
print(colorize("Succesfully registered.", "green"))
while True:
mempool = rpc_connection.getrawmempool()
if newgame_regisration_txid in mempool:
print(colorize("Waiting for registration transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Registration transaction is mined", "green"))
break
print(newgame_regisration_txid)
input("Press [Enter] to continue...")
break
except KeyboardInterrupt:
break
def print_players_list(rpc_connection):
players_list = rogue_players_list(rpc_connection)
print(colorize("\nYou own " + str(players_list["numplayerdata"]) + " warriors\n", "blue"))
warrior_counter = 0
for player in players_list["playerdata"]:
warrior_counter = warrior_counter + 1
player_data = rogue_player_info(rpc_connection, player)["player"]
print(colorize("\n================================\n","green"))
print("Warrior " + str(warrior_counter))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n","blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
input("Press [Enter] to continue...")
def sell_warrior(rpc_connection):
print(colorize("Your brave warriors: \n", "blue"))
print_players_list(rpc_connection)
print("\n")
while True:
need_sell = input("Do you want to place order to sell any? [y/n]: ")
if need_sell == "y":
playertxid = input("Input playertxid of warrior you want to sell: ")
price = input("Input price (in ROGUE coins) you want to sell warrior for: ")
try:
tokenid = rogue_player_info(rpc_connection, playertxid)["player"]["tokenid"]
except Exception as e:
print(e)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
token_ask_raw = rpc_connection.tokenask("1", tokenid, price)
try:
token_ask_txid = rpc_connection.sendrawtransaction(token_ask_raw["hex"])
except Exception as e:
print(e)
print(token_ask_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Ask succesfully placed. Ask txid is: " + token_ask_txid, "green"))
input("Press [Enter] to continue...")
break
if need_sell == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
#TODO: have to combine into single scanner with different cases
def is_warrior_alive(rpc_connection, warrior_txid):
warrior_alive = False
raw_transaction = rpc_connection.getrawtransaction(warrior_txid, 1)
for vout in raw_transaction["vout"]:
if vout["value"] == 0.00000001 and rpc_connection.gettxout(raw_transaction["txid"], vout["n"]):
warrior_alive = True
return warrior_alive
def warriors_scanner(rpc_connection):
start_time = time.time()
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
elif player_info["player"]["playertxid"] in my_warriors_list["playerdata"]:
pass
elif not is_warrior_alive(rpc_connection, player_info["player"]["playertxid"]):
pass
else:
warriors_list[token] = player_info["player"]
print("--- %s seconds ---" % (time.time() - start_time))
return warriors_list
def warriors_scanner_for_rating(rpc_connection):
print("It can take some time")
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
actual_playerids = []
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
else:
while True:
if "batontxid" in player_info["player"].keys():
player_info = rogue_player_info(rpc_connection, player_info["player"]["batontxid"])
else:
actual_playerids.append(player_info["player"]["playertxid"])
break
for player_id in actual_playerids:
player_info = rogue_player_info(rpc_connection, player_id)
if not is_warrior_alive(rpc_connection, player_info["player"]["playertxid"]):
pass
else:
warriors_list[player_id] = player_info["player"]
return warriors_list
def warriors_scanner_for_dex(rpc_connection):
start_time = time.time()
token_list = rpc_connection.tokenlist()
my_warriors_list = rogue_players_list(rpc_connection)
warriors_list = {}
for token in token_list:
player_info = rogue_player_info(rpc_connection, token)
if "status" in player_info and player_info["status"] == "error":
pass
elif player_info["player"]["tokenid"] in my_warriors_list["playerdata"]:
pass
else:
warriors_list[token] = player_info["player"]
print("--- %s seconds ---" % (time.time() - start_time))
return warriors_list
def print_warrior_list(rpc_connection):
players_list = warriors_scanner(rpc_connection)
print(colorize("All warriors on ROGUE chain: \n", "blue"))
warrior_counter = 0
for player in players_list:
warrior_counter = warrior_counter + 1
player_data = rogue_player_info(rpc_connection, player)["player"]
print(colorize("\n================================\n","green"))
print("Warrior " + str(warrior_counter))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n","blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
input("Press [Enter] to continue...")
def place_bid_on_warriror(rpc_connection):
warriors_list = print_warrior_list(rpc_connection)
# TODO: have to drop my warriors or at least print my warriors ids
while True:
need_buy = input("Do you want to place order to buy some warrior? [y/n]: ")
if need_buy == "y":
playertxid = input("Input playertxid of warrior you want to place bid for: ")
price = input("Input price (in ROGUE coins) you want to buy warrior for: ")
tokenid = rogue_player_info(rpc_connection, playertxid)["player"]["tokenid"]
token_bid_raw = rpc_connection.tokenbid("1", tokenid, price)
try:
token_bid_txid = rpc_connection.sendrawtransaction(token_bid_raw["hex"])
except Exception as e:
print(e)
print(token_bid_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Bid succesfully placed. Bid txid is: " + token_bid_txid, "green"))
input("Press [Enter] to continue...")
break
if need_buy == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def check_incoming_bids(rpc_connection):
# TODO: have to scan for warriors which are in asks as well
players_list = rogue_players_list(rpc_connection)
incoming_orders = []
for player in players_list["playerdata"]:
token_id = rogue_player_info(rpc_connection, player)["player"]["tokenid"]
orders = rpc_connection.tokenorders(token_id)
if len(orders) > 0:
for order in orders:
if order["funcid"] == "b":
incoming_orders.append(order)
return incoming_orders
def print_icoming_bids(rpc_connection):
incoming_bids = check_incoming_bids(rpc_connection)
for bid in incoming_bids:
print("Recieved bid for warrior " + bid["tokenid"])
player_data = rogue_player_info(rpc_connection, bid["tokenid"])["player"]
print(colorize("\n================================\n", "green"))
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "blue"))
print("Order info: \n")
print("Bid txid: " + bid["txid"])
print("Price: " + str(bid["price"]) + "\n")
if len(incoming_bids) == 0:
print(colorize("There is no any incoming orders!", "blue"))
input("Press [Enter] to continue...")
else:
while True:
want_to_sell = input("Do you want to fill any incoming bid? [y/n]: ")
if want_to_sell == "y":
bid_txid = input("Input bid txid you want to fill: ")
for bid in incoming_bids:
if bid_txid == bid["txid"]:
tokenid = bid["tokenid"]
fill_sum = bid["totalrequired"]
fillbid_hex = rpc_connection.tokenfillbid(tokenid, bid_txid, str(fill_sum))
try:
fillbid_txid = rpc_connection.sendrawtransaction(fillbid_hex["hex"])
except Exception as e:
print(e)
print(fillbid_hex)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully sold. Txid is: " + fillbid_txid, "green"))
input("Press [Enter] to continue...")
break
if want_to_sell == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def find_warriors_asks(rpc_connection):
warriors_list = warriors_scanner_for_dex(rpc_connection)
warriors_asks = []
for player in warriors_list:
orders = rpc_connection.tokenorders(player)
if len(orders) > 0:
for order in orders:
if order["funcid"] == "s":
warriors_asks.append(order)
for ask in warriors_asks:
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + ask["tokenid"])
player_data = rogue_player_info(rpc_connection, ask["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("Order info: \n", "red"))
print("Ask txid: " + ask["txid"])
print("Price: " + str(ask["price"]) + "\n")
while True:
want_to_buy = input("Do you want to buy any warrior? [y/n]: ")
if want_to_buy == "y":
ask_txid = input("Input asktxid which you want to fill: ")
for ask in warriors_asks:
if ask_txid == ask["txid"]:
tokenid = ask["tokenid"]
try:
fillask_raw = rpc_connection.tokenfillask(tokenid, ask_txid, "1")
except Exception as e:
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
try:
fillask_txid = rpc_connection.sendrawtransaction(fillask_raw["hex"])
except Exception as e:
print(e)
print(fillask_raw)
print("Something went wrong. Be careful with input next time.")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully bought. Txid is: " + fillask_txid, "green"))
input("Press [Enter] to continue...")
break
if want_to_buy == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def warriors_orders_check(rpc_connection):
my_orders_list = rpc_connection.mytokenorders("17")
warriors_orders = {}
for order in my_orders_list:
player_info = rogue_player_info(rpc_connection, order["tokenid"])
if "status" in player_info and player_info["status"] == "error":
pass
else:
warriors_orders[order["tokenid"]] = order
bids_list = []
asks_list = []
for order in warriors_orders:
if warriors_orders[order]["funcid"] == "s":
asks_list.append(warriors_orders[order])
else:
bids_list.append(order)
print(colorize("\nYour asks:\n", "blue"))
print(colorize("\n********************************\n", "red"))
for ask in asks_list:
print("txid: " + ask["txid"])
print("Price: " + ask["price"])
print("Warrior tokenid: " + ask["tokenid"])
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + ask["tokenid"])
player_data = rogue_player_info(rpc_connection, ask["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "green"))
print(colorize("\nYour bids:\n", "blue"))
print(colorize("\n********************************\n", "red"))
for bid in bids_list:
print("txid: " + bid["txid"])
print("Price: " + bid["price"])
print("Warrior tokenid: " + bid["tokenid"])
print(colorize("\n================================\n", "green"))
print("Warrior selling on marketplace: " + bid["tokenid"])
player_data = rogue_player_info(rpc_connection, bid["tokenid"])["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print(colorize("\nInventory:\n", "blue"))
for item in player_data["pack"]:
print(item)
print("\nTotal packsize: " + str(player_data["packsize"]) + "\n")
print(colorize("\n================================\n", "green"))
while True:
need_order_change = input("Do you want to cancel any of your orders? [y/n]: ")
if need_order_change == "y":
while True:
ask_or_bid = input("Do you want cancel ask or bid? [a/b]: ")
if ask_or_bid == "a":
ask_txid = input("Input txid of ask you want to cancel: ")
warrior_tokenid = input("Input warrior token id for this ask: ")
try:
ask_cancellation_hex = rpc_connection.tokencancelask(warrior_tokenid, ask_txid)
ask_cancellation_txid = rpc_connection.sendrawtransaction(ask_cancellation_hex["hex"])
except Exception as e:
print(colorize("Please re-check your input!", "red"))
print(colorize("Ask succefully cancelled. Cancellation txid: " + ask_cancellation_txid, "green"))
break
if ask_or_bid == "b":
bid_txid = input("Input txid of bid you want to cancel: ")
warrior_tokenid = input("Input warrior token id for this bid: ")
try:
bid_cancellation_hex = rpc_connection.tokencancelbid(warrior_tokenid, bid_txid)
bid_cancellation_txid = rpc_connection.sendrawtransaction(bid_cancellation_hex["hex"])
except Exception as e:
print(colorize("Please re-check your input!", "red"))
print(colorize("Bid succefully cancelled. Cancellation txid: " + bid_cancellation_txid, "green"))
break
else:
print(colorize("Choose a or b!", "red"))
input("Press [Enter] to continue...")
break
if need_order_change == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def set_warriors_name(rpc_connection):
warriors_name = input("What warrior name do you want for legends and tales about your brave adventures?: ")
warrior_name_arg = '"' + "[%22" + warriors_name + "%22]" + '"'
set_name_status = rpc_connection.cclib("setname", "17", warrior_name_arg)
print(colorize("Warrior name succesfully set", "green"))
print("Result: " + set_name_status["result"])
print("Name: " + set_name_status["pname"])
input("Press [Enter] to continue...")
def top_warriors_rating(rpc_connection):
start_time = time.time()
warriors_list = warriors_scanner_for_rating(rpc_connection)
warriors_exp = {}
for warrior in warriors_list:
warriors_exp[warrior] = warriors_list[warrior]["experience"]
warriors_exp_sorted = {}
temp = [(k, warriors_exp[k]) for k in sorted(warriors_exp, key=warriors_exp.get, reverse=True)]
for k,v in temp:
warriors_exp_sorted[k] = v
counter = 0
for experienced_warrior in warriors_exp_sorted:
if counter < 20:
counter = counter + 1
print("\n" + str(counter) + " place.")
print(colorize("\n================================\n", "blue"))
player_data = rogue_player_info(rpc_connection, experienced_warrior)["player"]
print("Name: " + player_data["pname"] + "\n")
print("Player txid: " + player_data["playertxid"])
print("Token txid: " + player_data["tokenid"])
print("Hitpoints: " + str(player_data["hitpoints"]))
print("Strength: " + str(player_data["strength"]))
print("Level: " + str(player_data["level"]))
print("Experience: " + str(player_data["experience"]))
print("Dungeon Level: " + str(player_data["dungeonlevel"]))
print("Chain: " + player_data["chain"])
print("--- %s seconds ---" % (time.time() - start_time))
input("Press [Enter] to continue...")
def exit():
sys.exit()
def warrior_trasnfer(rpc_connection):
print(colorize("Your brave warriors: \n", "blue"))
print_players_list(rpc_connection)
print("\n")
while True:
need_transfer = input("Do you want to transfer any warrior? [y/n]: ")
if need_transfer == "y":
warrior_tokenid = input("Input warrior tokenid: ")
recepient_pubkey = input("Input recepient pubkey: ")
try:
token_transfer_hex = rpc_connection.tokentransfer(warrior_tokenid, recepient_pubkey, "1")
token_transfer_txid = rpc_connection.sendrawtransaction(token_transfer_hex["hex"])
except Exception as e:
print(e)
print("Something went wrong. Please be careful with your input next time!")
input("Press [Enter] to continue...")
break
print(colorize("Warrior succesfully transferred! Transfer txid: " + token_transfer_txid, "green"))
input("Press [Enter] to continue...")
break
if need_transfer == "n":
print("As you wish!")
input("Press [Enter] to continue...")
break
else:
print(colorize("Choose y or n!", "red"))
def check_if_config_is_here(rpc_connection, assetchain_name):
config_name = assetchain_name + ".conf"
if os.path.exists(config_name):
print(colorize("Config is already in daemon folder", "green"))
else:
if operating_system == 'Darwin':
path_to_config = os.environ['HOME'] + '/Library/Application Support/Komodo/' + assetchain_name + '/' + config_name
elif operating_system == 'Linux':
path_to_config = os.environ['HOME'] + '/.komodo/' + assetchain_name + '/' + config_name
elif operating_system == 'Win64' or operating_system == 'Windows':
path_to_config = '%s/komodo/' + assetchain_name + '/' + config_name % os.environ['APPDATA']
try:
copy(path_to_config, os.getcwd())
except Exception as e:
print(e)
print("Can't copy config to current daemon directory automatically by some reason.")
print("Please copy it manually. It's locating here: " + path_to_config)
def find_game_keystrokes_in_log(gametxid):
operating_system = platform.system()
if operating_system == 'Win64' or operating_system == 'Windows':
p1 = subprocess.Popen(["type", "keystrokes.log"], stdout=subprocess.PIPE, shell=True)
p2 = subprocess.Popen(["findstr", gametxid], stdin=p1.stdout, stdout=subprocess.PIPE, shell=True)
else:
p1 = subprocess.Popen(["cat", "keystrokes.log"], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["grep", gametxid], stdin=p1.stdout, stdout=subprocess.PIPE)
p1.stdout.close()
output = p2.communicate()[0]
keystrokes_log_for_game = bytes.decode(output).split("\n")
return keystrokes_log_for_game
def check_if_tx_in_mempool(rpc_connection, txid):
while True:
mempool = rpc_connection.getrawmempool()
if txid in mempool:
print(colorize("Waiting for " + txid + " transaction to be mined", "blue"))
time.sleep(5)
else:
print(colorize("Transaction is mined", "green"))
break
|
the-stack_0_4910 | """Define a RainMachine controller class."""
# pylint: disable=too-few-public-methods,too-many-instance-attributes
from datetime import datetime, timedelta
from typing import Awaitable, Callable, Optional
from regenmaschine.api import API
from regenmaschine.diagnostics import Diagnostics
from regenmaschine.parser import Parser
from regenmaschine.program import Program
from regenmaschine.provision import Provision
from regenmaschine.restriction import Restriction
from regenmaschine.stats import Stats
from regenmaschine.watering import Watering
from regenmaschine.zone import Zone
URL_BASE_LOCAL: str = "https://{0}:{1}/api/4"
URL_BASE_REMOTE: str = "https://api.rainmachine.com/{0}/api/4"
class Controller: # pylint: disable=too-many-instance-attributes
"""Define the controller."""
def __init__(self, request: Callable[..., Awaitable[dict]]) -> None:
"""Initialize."""
self._access_token: Optional[str] = None
self._access_token_expiration: Optional[datetime] = None
self._client_request: Callable[..., Awaitable[dict]] = request
self._host: Optional[str] = None
self._ssl: bool = True
self.api_version: Optional[str] = None
self.hardware_version: Optional[int] = None
self.mac: Optional[str] = None
self.name: Optional[str] = None
self.software_version: Optional[str] = None
# API endpoints:
self.api: API = API(self._request)
self.diagnostics: Diagnostics = Diagnostics(self._request)
self.parsers: Parser = Parser(self._request)
self.programs: Program = Program(self._request)
self.provisioning: Provision = Provision(self._request)
self.restrictions: Restriction = Restriction(self._request)
self.stats: Stats = Stats(self._request)
self.watering: Watering = Watering(self._request)
self.zones: Zone = Zone(self._request)
async def _request(
self,
method: str,
endpoint: str,
*,
headers: Optional[dict] = None,
params: Optional[dict] = None,
json: Optional[dict] = None,
ssl: bool = True,
) -> dict:
"""Wrap the generic request method to add access token, etc."""
return await self._client_request(
method,
f"{self._host}/{endpoint}",
access_token=self._access_token,
access_token_expiration=self._access_token_expiration,
headers=headers,
params=params,
json=json,
ssl=ssl,
)
class LocalController(Controller):
"""Define a controller accessed over the LAN."""
def __init__( # pylint: disable=too-many-arguments
self, request: Callable[..., Awaitable[dict]], host: str, port: int, ssl: bool
) -> None:
"""Initialize."""
super().__init__(request)
self._host: str = URL_BASE_LOCAL.format(host, port)
self._ssl: bool = ssl
async def login(self, password):
"""Authenticate against the device (locally)."""
auth_resp: dict = await self._client_request(
"post", f"{self._host}/auth/login", json={"pwd": password, "remember": 1}
)
self._access_token: str = auth_resp["access_token"]
self._access_token_expiration: datetime = datetime.now() + timedelta(
seconds=int(auth_resp["expires_in"]) - 10
)
class RemoteController(Controller):
"""Define a controller accessed over RainMachine's cloud."""
async def login(
self, stage_1_access_token: str, sprinkler_id: str, password: str
) -> None:
"""Authenticate against the device (remotely)."""
auth_resp: dict = await self._client_request(
"post",
"https://my.rainmachine.com/devices/login-sprinkler",
access_token=stage_1_access_token,
json={"sprinklerId": sprinkler_id, "pwd": password},
)
self._access_token: str = auth_resp["access_token"]
self._host: str = URL_BASE_REMOTE.format(sprinkler_id)
|
the-stack_0_4911 | import subprocess
import threading
import platform
import socket
import os
from electrum import constants
from electrum.plugin import BasePlugin, hook
from electrum.i18n import _
from electrum.util import UserFacingException
from electrum.logging import get_logger
from electrum.network import Network
_logger = get_logger('plugins.bwt')
plugin_dir = os.path.dirname(__file__)
bwt_bin = os.path.join(plugin_dir, 'bwt')
if platform.system() == 'Windows':
bwt_bin = '%s.exe' % bwt_bin
class BwtPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.proc = None
self.wallets = set()
self.enabled = config.get('bwt_enabled')
self.bitcoind_url = config.get('bwt_bitcoind_url', default_bitcoind_url())
self.bitcoind_dir = config.get('bwt_bitcoind_dir', default_bitcoind_dir())
self.bitcoind_wallet = config.get('bwt_bitcoind_wallet')
self.bitcoind_cred = config.get('bwt_bitcoind_cred')
self.rescan_since = config.get('bwt_rescan_since', 'all')
self.custom_opt = config.get('bwt_custom_opt')
self.socket_path = config.get('bwt_socket_path', default_socket_path())
self.verbose = config.get('bwt_verbose', 0)
if config.get('bwt_was_oneserver') is None:
config.set_key('bwt_was_oneserver', config.get('oneserver'))
self.start()
def start(self):
if not self.enabled or not self.wallets:
return
self.rpc_port = free_port()
args = [
'--network', get_network_name(),
'--bitcoind-url', self.bitcoind_url,
'--bitcoind-dir', self.bitcoind_dir,
'--electrum-rpc-addr', '127.0.0.1:%d' % self.rpc_port,
]
if self.bitcoind_cred:
args.extend([ '--bitcoind-cred', self.bitcoind_cred ])
if self.bitcoind_wallet:
args.extend([ '--bitcoind-wallet', self.bitcoind_wallet ])
if self.socket_path:
args.extend([ '--unix-listener-path', self.socket_path ])
for wallet in self.wallets:
for xpub in wallet.get_master_public_keys():
args.extend([ '--xpub', '%s:%s' % (xpub, self.rescan_since) ])
for i in range(self.verbose):
args.append('-v')
if self.custom_opt:
# XXX this doesn't support arguments with spaces. thankfully bwt doesn't currently have any.
args.extend(self.custom_opt.split(' '))
self.stop()
_logger.info('Starting bwt daemon')
_logger.debug('bwt options: %s' % ' '.join(args))
if platform.system() == 'Windows':
# hide the console window. can be done with subprocess.CREATE_NO_WINDOW in python 3.7.
suinfo = subprocess.STARTUPINFO()
suinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else: suinfo = None
self.proc = subprocess.Popen([ bwt_bin ] + args, startupinfo=suinfo, \
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.DEVNULL)
self.thread = threading.Thread(target=proc_logger, args=(self.proc, self.handle_log), daemon=True)
self.thread.start()
def stop(self):
if self.proc:
_logger.info('Stopping bwt daemon')
self.proc.terminate()
self.proc = None
self.thread = None
def set_server(self):
network = Network.get_instance()
net_params = network.get_parameters()._replace(
host='127.0.0.1',
port=self.rpc_port,
protocol='t',
oneserver=True,
)
network.run_from_another_thread(network.set_parameters(net_params))
@hook
def load_wallet(self, wallet, main_window):
if wallet.get_master_public_keys():
num_wallets = len(self.wallets)
self.wallets |= {wallet}
if len(self.wallets) != num_wallets:
self.start()
else:
_logger.warning('%s wallets are unsupported, skipping' % wallet.wallet_type)
@hook
def close_wallet(self, wallet):
self.wallets -= {wallet}
if not self.wallets:
self.stop()
def close(self):
BasePlugin.close(self)
self.stop()
# restore the user's previous oneserver setting when the plugin is disabled
was_oneserver = self.config.get('bwt_was_oneserver')
if was_oneserver is not None:
self.config.set_key('oneserver', was_oneserver)
self.config.set_key('bwt_was_oneserver', None)
def handle_log(self, level, pkg, msg):
if msg.startswith('Electrum RPC server running'):
self.set_server()
def proc_logger(proc, log_handler):
for line in iter(proc.stdout.readline, b''):
line = line.decode('utf-8').strip()
_logger.debug(line)
if '::' in line and '>' in line:
level, _, line = line.partition(' ')
pkg, _, msg = line.partition('>')
log_handler(level, pkg.strip(), msg.strip())
elif line.lower().startswith('error: '):
log_handler('ERROR', 'bwt', line[7:])
else:
log_handler('INFO', 'bwt', line)
def get_network_name():
if constants.net == constants.BitcoinMainnet:
return 'bitcoin'
elif constants.net == constants.BitcoinTestnet:
return 'testnet'
elif constants.net == constants.BitcoinRegtest:
return 'regtest'
raise UserFacingException(_('Unsupported network {}').format(constants.net))
def default_bitcoind_url():
return 'http://localhost:%d/' % \
{ 'bitcoin': 8332, 'testnet': 18332, 'regtest': 18443 }[get_network_name()]
def default_bitcoind_dir():
if platform.system() == 'Windows':
return os.path.expandvars('%APPDATA%\\Bitcoin')
else:
return os.path.expandvars('$HOME/.bitcoin')
def default_socket_path():
if platform.system() == 'Linux' and os.access(plugin_dir, os.W_OK | os.X_OK):
return os.path.join(plugin_dir, 'bwt-socket')
def free_port():
with socket.socket() as s:
s.bind(('',0))
return s.getsockname()[1]
|
the-stack_0_4912 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper class around TF estimator to perform training."""
import gin
import tensorflow.compat.v1 as tf
from polish.utils import tf_utils
@gin.configurable
class PpoTrainer(object):
"""Wrapper class for PPO TF estimator training.
This class mainly receives any compatible `input_fn` and `model_fn` functions
for a TF estimator and launches estimator training. `input_fn` is a function
that feeds dictionary of arrays into the model. `model_fn` is a function that
defines the network architecture and training operation.
"""
def __init__(self,
input_fn,
model_fn,
num_iterations=156160,
iterations_per_loop=320,
checkpoint_dir=gin.REQUIRED,
keep_checkpoint_max=20,
use_tpu=False):
"""Creates a PPO training class.
Args:
input_fn: The function to feed in input data during training.
model_fn: The model to train on.
num_iterations: The number of iterations to run the training for.
iterations_per_loop: Number of steps to run on TPU before outfeeding
metrics to the CPU. If the number of iterations in the loop would exceed
the number of train steps, the loop will exit before reaching
--iterations_per_loop. The larger this value is, the higher the
utilization on the TPU.
checkpoint_dir: The directory to save checkpoints to.
keep_checkpoint_max: The maximum number of checkpoints to keep.
use_tpu: If True, use TPU for model training.
"""
self._input_fn = input_fn
self._model_fn = model_fn
self._num_iterations = num_iterations
self._iterations_per_loop = iterations_per_loop
self._checkpoint_dir = checkpoint_dir
self._keep_checkpoint_max = keep_checkpoint_max
self._use_tpu = use_tpu
def get_estimator(self):
"""Obtain estimator for the working directory.
Returns:
an (TPU/non-TPU) estimator.
"""
if self._use_tpu:
return tf_utils.get_tpu_estimator(self._checkpoint_dir, self._model_fn)
run_config = tf.estimator.RunConfig(
save_summary_steps=self._iterations_per_loop,
save_checkpoints_steps=self._iterations_per_loop,
keep_checkpoint_max=self._keep_checkpoint_max)
return tf.estimator.Estimator(
self._model_fn, model_dir=self._checkpoint_dir, config=run_config)
def train(self):
"""A wrapper to launch training on the estimator."""
estimator = self.get_estimator()
hooks = [self._input_fn]
estimator.train(
input_fn=self._input_fn, hooks=hooks, max_steps=self._num_iterations)
|
the-stack_0_4914 |
import streamlit as st
import urllib3
import numpy as np
from PIL import Image
import cv2
import requests
import socket
#================================
# Message Headers
#=================================
COMMAND_START=bytes("<command>",'utf-8')
COMMAND_END=bytes("</command>","utf-8")
IMAGE_START=bytes("","utf-8")
#================================
# Web App Init Elements
#=================================
st.title("ONVIF CCTV Connect")
st.write("(C) Faizansoft International 2000-2021")
st.write("\r\n")
st.write("Note: This demo will only work with ONVIF compatible IP cameras that have the live-jpeg API.")
st.write("The reason for live jpeg being chosen over rtsp/rtmp is due to reliability on low resourced cameras.")
#=================================
# Set up Yolo V4
#=================================
class YoloV4Model:
def __init__(self,yolocfg,yoloweights,coconames):
self.CONFIDENCE_THRESHOLD = 0.2
self.NMS_THRESHOLD=0.4
#Set up neural network and configure Backend and Target
dnn_net=cv2.dnn.readNetFromDarknet(yolocfg, yoloweights)
dnn_net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
dnn_net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
#Set up the DNN Model
dnn_model=cv2.dnn_DetectionModel(dnn_net)
dnn_model.setInputParams(size=(416, 416), scale=1/255, swapRB=True)
self._dnn_model=dnn_model
#Setup the coco.names list
COCO_NAMES_LIST=[]
with open("coco.names","r") as coco_names:
COCO_NAMES_LIST=coco_names.readlines()
self._COCO_NAMES_LIST=COCO_NAMES_LIST
def DetectObjects_retFrameDetList(self,frame):
_classes,_scores,_boxes=self._dnn_model.detect(frame,self.CONFIDENCE_THRESHOLD,self.NMS_THRESHOLD)
#Text List for detections
DET_LIST=[]
for (_class,_score,_box) in zip(_classes,_scores,_boxes):
_class=_class.tolist()
_score=_score.tolist()
_box=_box.tolist()
cv2.rectangle(frame,_box, (0,255,0), 2)
cv2.putText(frame, self._COCO_NAMES_LIST[_class[0]], (_box[0], _box[1] - 10), cv2.FONT_HERSHEY_SIMPLEX,5,(0,255,255), 2)
DET_LIST.extend("Detected {} @ {},{}.".format(self._COCO_NAMES_LIST[_class[0]],_box[0],_box[1]))
return (frame,DET_LIST)
_yoloV4=YoloV4Model("yolov4.cfg","yolov4.weights","coco.names")
#================================
# Getting Camera Variables
#=================================
# Here we try to get the variables for the user's camera.
# This includes ip_addr,uname and password
with st.form(key="ip_cctv_connect"):
st.write("Please enter the credentials for your ONVIF Capable IP Camera.")
ip_address=st.text_input("Enter your camera's IP Address:")
username=st.text_input("Enter your Camera's Username:")
password=st.text_input("Enter your camera's password:")
command=st.text_input("Enter the image processing command: ")
cmd_connect=st.form_submit_button(label="Connect!")
#=====================================
# Disconnect Button
#==========================================
cmd_disconnect=st.button("Disconnect!")
#===============================
# URLLIB 3 HTTP OBject
#===============================
http=urllib3.PoolManager()
#===============================
# Streamlit Placeholders
#===============================
#Create the Place Holders
img_ph_1=st.image([])
img_ph_2=st.image([])
def grab_frame_cctv():
#http://admin:[email protected]/tmpfs/auto.jpg
_url="http://{0}:{1}@{2}/tmpfs/auto.jpg".format(username,password,ip_address)
img=Image.open(requests.get(_url,stream=True).raw)
cvFrame=np.array(img)
return cvFrame
if cmd_connect:
while True:
frame=grab_frame_cctv()
img_ph_1.image(frame)
img_ph_2.image(_yoloV4.DetectObjects_retFrameDetList(frame)[0])
if cmd_disconnect:
break
|
the-stack_0_4916 | #!/usr/bin/python3
#
# Copyright (c) Siemens AG, 2020
# [email protected]
#
# SPDX-License-Identifier: MIT
#
#
# NOTE this was tested on Python 3.6.9
# NOTE subprocess seems to return empty stdout when ASan reports an error
import sys
import os
import os.path
import signal
import subprocess
import uuid
from pprint import pprint as pp
import util
import results
import config
def check():
"""Check security issues according to config and pass results to next tools."""
overall_report = dict()
# source code analysis
# ====================
# currently empty
# compile
# =======
ret_makefile = subprocess.run([config.compiler] + config.compiler_args, # command
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.PIPE, # capture stderr
universal_newlines=True) # use text mode for std* file objects
overall_report['makefile'] = ret_makefile
# runtime analysis
# ================
with open('compile.txt', 'r') as f:
if 'error' not in f.read().lower(): # if compilation succeeded
overall_report, test_case_report_list = runtime_analysis(config, overall_report)
# pass this info to next tools for subsequent processing
# ======================================================
pp(overall_report)
# results from runtime analysis
if 'runtime_analysis_done' in overall_report:
success_count = 0
for report in test_case_report_list:
if 'timeout' in report:
util.addFinding("Time limit exceeded!", 0, "", "TEST_080006")
elif report['return_code'] != 0:
if report['stderr_stream'] != '': # ASan/LeakSan/Stack protector probably reported something
pass # but these findings will be added by analyze.py
else:
util.addFinding("It seems your program might have crashed.", 0,"","TEST_100006")
# output_match == None means the user might have tried to print to outfile
elif report['stdout_stream'] != '' or report['output_match'] is None:
util.addFinding("A test case failed! Make sure you are not trying to print something.",
0,"","TEST_100006")
elif not all(report['output_match']): # not all test cases passed
util.addFinding("A test case failed!", 0, "", "TEST_100006")
else:
success_count += 1
with open('stderr.txt', 'a') as f:
f.write(report['stderr_stream'])
with open('stdout.txt', 'a') as f:
f.write(report['outfile'])
if success_count == len(test_case_report_list):
util.addFinding("Program behaves as expected!", 1, "CHALLENGE_PASS", "TEST_900006")
util.dumpFindings()
# next tools
subprocess.run(["./analyse.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
subprocess.run(["./ai.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def runtime_analysis(config, overall_report):
"""Run test suites on executable and return a list containing the result of each test suite.
Each list item is a dictionary describing the result of running that test suite.
"""
test_case_report_list = []
for test_suite in config.get_test_suite():
report = dict()
report['stdout_stream'] = ''
report['stderr_stream'] = ''
report['outfile'] = ''
input_for_stdin = config.get_test_suite_input_for_stdin(test_suite)
# using Popen instead of run because I need access to the pid
# See comment under "except subprocess.TimeoutExpired:"
infile = "xinfile_" + uuid.uuid4().hex[0:16] + ".txt"
outfile = "xoutfile_" + uuid.uuid4().hex[0:16] + ".txt"
p = subprocess.Popen(['./run_jail.sh',
config.output_filename,
str(len(test_suite)), infile, outfile], # command
stdout=subprocess.PIPE, # capture stdout
stderr=subprocess.PIPE, # capture stderr
stdin=subprocess.PIPE, # capture stdin
universal_newlines=True, # use text mode for std* file objects
start_new_session=True, # otherwise killing the process group will also kill the Python interpreter
)
try:
# send test suite input
with open(infile, "w") as f:
f.write(input_for_stdin)
(stdout_stream, stderr_stream) = p.communicate(timeout=config.timeout)
report['return_code'] = p.returncode
report['stderr_stream'] += stderr_stream
report['stdout_stream'] += stdout_stream
with open(outfile, "r") as f:
current_outfile = f.read()
report['outfile'] += current_outfile
# check if test cases passed
ret_output_match = config.check_for_output_match(current_outfile, test_suite)
report['test_suite'] = test_suite
report['output_match'] = ret_output_match
except subprocess.TimeoutExpired:
# kill the process group so that all child processes spawned by the process are also killed
# The child need to be killed because, in addition to wasting CPU cycles,
# it can hold stdout and then Python will wait indefinitely even if the timeout is expired
os.killpg(os.getpgid(p.pid), signal.SIGKILL)
report['timeout'] = True
finally:
test_case_report_list.append(report)
overall_report['runtime_analysis_done'] = True
return overall_report, test_case_report_list
if __name__ == '__main__':
try:
check() # run checker
except Exception as e:
print("EXCEPTION IN CHECKER: " + str(e))
util.dumpFindings();
|
the-stack_0_4918 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf import settings
from django.test import TestCase, override_settings
from unittest import skip
from zerver.lib.avatar import avatar_url
from zerver.lib.bugdown import url_filename
from zerver.lib.test_helpers import AuthedTestCase
from zerver.lib.test_runner import slow
from zerver.lib.upload import sanitize_name, S3UploadBackend, \
upload_message_image, delete_message_image, LocalUploadBackend
import zerver.lib.upload
from zerver.models import Attachment, Recipient, get_user_profile_by_email, \
get_old_unclaimed_attachments, Message, UserProfile
from zerver.lib.actions import do_delete_old_unclaimed_attachments
import ujson
from six.moves import urllib
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from six.moves import StringIO
import os
import shutil
import re
import datetime
import requests
import base64
from datetime import timedelta
from django.utils import timezone
from moto import mock_s3
TEST_AVATAR_DIR = os.path.join(os.path.dirname(__file__), 'images')
def destroy_uploads():
# type: () -> None
if os.path.exists(settings.LOCAL_UPLOADS_DIR):
shutil.rmtree(settings.LOCAL_UPLOADS_DIR)
class FileUploadTest(AuthedTestCase):
def test_rest_endpoint(self):
# type: () -> None
"""
Tests the /api/v1/user_uploads api endpoint. Here a single file is uploaded
and downloaded using a username and api_key
"""
fp = StringIO("zulip!")
fp.name = "zulip.txt"
# Upload file via API
auth_headers = self.api_auth('[email protected]')
result = self.client.post('/api/v1/user_uploads', {'file': fp}, **auth_headers)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
# Download file via API
self.client.post('/accounts/logout/')
response = self.client.get(uri, **auth_headers)
data = b"".join(response.streaming_content)
self.assertEquals(b"zulip!", data)
# Files uploaded through the API should be accesible via the web client
self.login("[email protected]")
response = self.client.get(uri)
data = b"".join(response.streaming_content)
self.assertEquals(b"zulip!", data)
def test_multiple_upload_failure(self):
# type: () -> None
"""
Attempting to upload two files should fail.
"""
self.login("[email protected]")
fp = StringIO("bah!")
fp.name = "a.txt"
fp2 = StringIO("pshaw!")
fp2.name = "b.txt"
result = self.client.post("/json/upload_file", {'f1': fp, 'f2': fp2})
self.assert_json_error(result, "You may only upload one file at a time")
def test_no_file_upload_failure(self):
# type: () -> None
"""
Calling this endpoint with no files should fail.
"""
self.login("[email protected]")
result = self.client.post("/json/upload_file")
self.assert_json_error(result, "You must specify a file to upload")
# This test will go through the code path for uploading files onto LOCAL storage
# when zulip is in DEVELOPMENT mode.
def test_file_upload_authed(self):
# type: () -> None
"""
A call to /json/upload_file should return a uri and actually create an
entry in the database. This entry will be marked unclaimed till a message
refers it.
"""
self.login("[email protected]")
fp = StringIO("zulip!")
fp.name = "zulip.txt"
result = self.client.post("/json/upload_file", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
# In the future, local file requests will follow the same style as S3
# requests; they will be first authenthicated and redirected
response = self.client.get(uri)
data = b"".join(response.streaming_content)
self.assertEquals(b"zulip!", data)
# check if DB has attachment marked as unclaimed
entry = Attachment.objects.get(file_name='zulip.txt')
self.assertEquals(entry.is_claimed(), False)
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertIn('title="zulip.txt"', self.get_last_message().rendered_content)
def test_delete_old_unclaimed_attachments(self):
# type: () -> None
# Upload some files and make them older than a weeek
self.login("[email protected]")
d1 = StringIO("zulip!")
d1.name = "dummy_1.txt"
result = self.client.post("/json/upload_file", {'file': d1})
json = ujson.loads(result.content)
uri = json["uri"]
d1_path_id = re.sub('/user_uploads/', '', uri)
d2 = StringIO("zulip!")
d2.name = "dummy_2.txt"
result = self.client.post("/json/upload_file", {'file': d2})
json = ujson.loads(result.content)
uri = json["uri"]
d2_path_id = re.sub('/user_uploads/', '', uri)
two_week_ago = timezone.now() - datetime.timedelta(weeks=2)
d1_attachment = Attachment.objects.get(path_id = d1_path_id)
d1_attachment.create_time = two_week_ago
d1_attachment.save()
d2_attachment = Attachment.objects.get(path_id = d2_path_id)
d2_attachment.create_time = two_week_ago
d2_attachment.save()
# Send message refering only dummy_1
self.subscribe_to_stream("[email protected]", "Denmark")
body = "Some files here ...[zulip.txt](http://localhost:9991/user_uploads/" + d1_path_id + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
# dummy_2 should not exist in database or the uploads folder
do_delete_old_unclaimed_attachments(2)
self.assertTrue(not Attachment.objects.filter(path_id = d2_path_id).exists())
self.assertTrue(not delete_message_image(d2_path_id))
def test_multiple_claim_attachments(self):
# type: () -> None
"""
This test tries to claim the same attachment twice. The messages field in
the Attachment model should have both the messages in its entry.
"""
self.login("[email protected]")
d1 = StringIO("zulip!")
d1.name = "dummy_1.txt"
result = self.client.post("/json/upload_file", {'file': d1})
json = ujson.loads(result.content)
uri = json["uri"]
d1_path_id = re.sub('/user_uploads/', '', uri)
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991/user_uploads/" + d1_path_id + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
body = "Second message ...[zulip.txt](http://localhost:9991/user_uploads/" + d1_path_id + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertEquals(Attachment.objects.get(path_id=d1_path_id).messages.count(), 2)
def test_check_attachment_reference_update(self):
f1 = StringIO("file1")
f1.name = "file1.txt"
f2 = StringIO("file2")
f2.name = "file2.txt"
f3 = StringIO("file3")
f3.name = "file3.txt"
self.login("[email protected]")
result = self.client.post("/json/upload_file", {'file': f1})
json = ujson.loads(result.content)
uri = json["uri"]
f1_path_id = re.sub('/user_uploads/', '', uri)
result = self.client.post("/json/upload_file", {'file': f2})
json = ujson.loads(result.content)
uri = json["uri"]
f2_path_id = re.sub('/user_uploads/', '', uri)
self.subscribe_to_stream("[email protected]", "test")
body = ("[f1.txt](http://localhost:9991/user_uploads/" + f1_path_id + ")"
"[f2.txt](http://localhost:9991/user_uploads/" + f2_path_id + ")")
msg_id = self.send_message("[email protected]", "test", Recipient.STREAM, body, "test")
result = self.client.post("/json/upload_file", {'file': f3})
json = ujson.loads(result.content)
uri = json["uri"]
f3_path_id = re.sub('/user_uploads/', '', uri)
new_body = ("[f3.txt](http://localhost:9991/user_uploads/" + f3_path_id + ")"
"[f2.txt](http://localhost:9991/user_uploads/" + f2_path_id + ")")
result = self.client.post("/json/update_message", {
'message_id': msg_id,
'content': new_body
})
self.assert_json_success(result)
message = Message.objects.get(id=msg_id)
f1_attachment = Attachment.objects.get(path_id=f1_path_id)
f2_attachment = Attachment.objects.get(path_id=f2_path_id)
f3_attachment = Attachment.objects.get(path_id=f2_path_id)
self.assertTrue(message not in f1_attachment.messages.all())
self.assertTrue(message in f2_attachment.messages.all())
self.assertTrue(message in f3_attachment.messages.all())
def tearDown(self):
# type: () -> None
destroy_uploads()
class AvatarTest(AuthedTestCase):
def test_multiple_upload_failure(self):
# type: () -> None
"""
Attempting to upload two files should fail.
"""
self.login("[email protected]")
fp1 = open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb')
fp2 = open(os.path.join(TEST_AVATAR_DIR, 'img.png'), 'rb')
result = self.client.post("/json/set_avatar", {'f1': fp1, 'f2': fp2})
self.assert_json_error(result, "You must upload exactly one avatar.")
def test_no_file_upload_failure(self):
# type: () -> None
"""
Calling this endpoint with no files should fail.
"""
self.login("[email protected]")
result = self.client.post("/json/set_avatar")
self.assert_json_error(result, "You must upload exactly one avatar.")
correct_files = [
('img.png', 'png_resized.png'),
('img.gif', 'gif_resized.png'),
('img.tif', 'tif_resized.png')
]
corrupt_files = ['text.txt', 'corrupt.png', 'corrupt.gif']
def test_get_gravatar_avatar(self):
# type: () -> None
self.login("[email protected]")
cordelia = get_user_profile_by_email('[email protected]')
cordelia.avatar_source = UserProfile.AVATAR_FROM_GRAVATAR
cordelia.save()
with self.settings(ENABLE_GRAVATAR=True):
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertEqual(redirect_url, avatar_url(cordelia) + '&foo=bar')
with self.settings(ENABLE_GRAVATAR=False):
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertTrue(redirect_url.endswith(avatar_url(cordelia) + '&foo=bar'))
def test_get_user_avatar(self):
# type: () -> None
self.login("[email protected]")
cordelia = get_user_profile_by_email('[email protected]')
cordelia.avatar_source = UserProfile.AVATAR_FROM_USER
cordelia.save()
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertTrue(redirect_url.endswith(avatar_url(cordelia) + '&foo=bar'))
def test_get_system_generated_avatar(self):
# type: () -> None
self.login("[email protected]")
cordelia = get_user_profile_by_email('[email protected]')
cordelia.avatar_source = UserProfile.AVATAR_FROM_SYSTEM
cordelia.save()
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
self.assertTrue(redirect_url.endswith(avatar_url(cordelia) + '&foo=bar'))
def test_non_valid_user_avatar(self):
# type: () -> None
# It's debatable whether we should generate avatars for non-users,
# but this test just validates the current code's behavior.
self.login("[email protected]")
response = self.client.get("/avatar/[email protected]?foo=bar")
redirect_url = response['Location']
actual_url = 'https://secure.gravatar.com/avatar/444258b521f152129eb0c162996e572d?d=identicon&foo=bar'
self.assertEqual(redirect_url, actual_url)
def test_valid_avatars(self):
# type: () -> None
"""
A call to /json/set_avatar with a valid file should return a url and actually create an avatar.
"""
for fname, rfname in self.correct_files:
# TODO: use self.subTest once we're exclusively on python 3 by uncommenting the line below.
# with self.subTest(fname=fname):
self.login("[email protected]")
fp = open(os.path.join(TEST_AVATAR_DIR, fname), 'rb')
result = self.client.post("/json/set_avatar", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("avatar_url", json)
url = json["avatar_url"]
base = '/user_avatars/'
self.assertEquals(base, url[:len(base)])
rfp = open(os.path.join(TEST_AVATAR_DIR, rfname), 'rb')
response = self.client.get(url)
data = b"".join(response.streaming_content)
self.assertEquals(rfp.read(), data)
def test_invalid_avatars(self):
# type: () -> None
"""
A call to /json/set_avatar with an invalid file should fail.
"""
for fname in self.corrupt_files:
# with self.subTest(fname=fname):
self.login("[email protected]")
fp = open(os.path.join(TEST_AVATAR_DIR, fname), 'rb')
result = self.client.post("/json/set_avatar", {'file': fp})
self.assert_json_error(result, "Could not decode avatar image; did you upload an image file?")
def tearDown(self):
# type: () -> None
destroy_uploads()
class LocalStorageTest(AuthedTestCase):
def test_file_upload_local(self):
# type: () -> None
sender_email = "[email protected]"
user_profile = get_user_profile_by_email(sender_email)
uri = upload_message_image(u'dummy.txt', u'text/plain', b'zulip!', user_profile)
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
path_id = re.sub('/user_uploads/', '', uri)
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, 'files', path_id)
self.assertTrue(os.path.isfile(file_path))
def test_delete_message_image_local(self):
# type: () -> None
self.login("[email protected]")
fp = StringIO("zulip!")
fp.name = "zulip.txt"
result = self.client.post("/json/upload_file", {'file': fp})
json = ujson.loads(result.content)
uri = json["uri"]
path_id = re.sub('/user_uploads/', '', uri)
self.assertTrue(delete_message_image(path_id))
def tearDown(self):
# type: () -> None
destroy_uploads()
def use_s3_backend(method):
@mock_s3
@override_settings(LOCAL_UPLOADS_DIR=None)
def new_method(*args, **kwargs):
zerver.lib.upload.upload_backend = S3UploadBackend()
try:
return method(*args, **kwargs)
finally:
zerver.lib.upload.upload_backend = LocalUploadBackend()
return new_method
class S3Test(AuthedTestCase):
@use_s3_backend
def test_file_upload_s3(self):
# type: () -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = conn.create_bucket(settings.S3_AUTH_UPLOADS_BUCKET)
sender_email = "[email protected]"
user_profile = get_user_profile_by_email(sender_email)
uri = upload_message_image(u'dummy.txt', u'text/plain', b'zulip!', user_profile)
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
path_id = re.sub('/user_uploads/', '', uri)
self.assertEquals(b"zulip!", bucket.get_key(path_id).get_contents_as_string())
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertIn('title="dummy.txt"', self.get_last_message().rendered_content)
@use_s3_backend
def test_message_image_delete_s3(self):
# type: () -> None
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
conn.create_bucket(settings.S3_AUTH_UPLOADS_BUCKET)
sender_email = "[email protected]"
user_profile = get_user_profile_by_email(sender_email)
uri = upload_message_image(u'dummy.txt', u'text/plain', b'zulip!', user_profile)
path_id = re.sub('/user_uploads/', '', uri)
self.assertTrue(delete_message_image(path_id))
@use_s3_backend
def test_file_upload_authed(self):
# type: () -> None
"""
A call to /json/upload_file should return a uri and actually create an object.
"""
conn = S3Connection(settings.S3_KEY, settings.S3_SECRET_KEY)
conn.create_bucket(settings.S3_AUTH_UPLOADS_BUCKET)
self.login("[email protected]")
fp = StringIO("zulip!")
fp.name = "zulip.txt"
result = self.client.post("/json/upload_file", {'file': fp})
self.assert_json_success(result)
json = ujson.loads(result.content)
self.assertIn("uri", json)
uri = json["uri"]
base = '/user_uploads/'
self.assertEquals(base, uri[:len(base)])
response = self.client.get(uri)
redirect_url = response['Location']
self.assertEquals(b"zulip!", urllib.request.urlopen(redirect_url).read().strip())
self.subscribe_to_stream("[email protected]", "Denmark")
body = "First message ...[zulip.txt](http://localhost:9991" + uri + ")"
self.send_message("[email protected]", "Denmark", Recipient.STREAM, body, "test")
self.assertIn('title="zulip.txt"', self.get_last_message().rendered_content)
class UploadTitleTests(TestCase):
def test_upload_titles(self):
# type: () -> None
self.assertEqual(url_filename("http://localhost:9991/user_uploads/1/LUeQZUG5jxkagzVzp1Ox_amr/dummy.txt"), "dummy.txt")
self.assertEqual(url_filename("http://localhost:9991/user_uploads/1/94/SzGYe0RFT-tEcOhQ6n-ZblFZ/zulip.txt"), "zulip.txt")
self.assertEqual(url_filename("https://zulip.com/user_uploads/4142/LUeQZUG5jxkagzVzp1Ox_amr/pasted_image.png"), "pasted_image.png")
self.assertEqual(url_filename("https://zulip.com/integrations"), "https://zulip.com/integrations")
self.assertEqual(url_filename("https://example.com"), "https://example.com")
class SanitizeNameTests(TestCase):
def test_file_name(self):
# type: () -> None
self.assertEquals(sanitize_name(u'test.txt'), u'test.txt')
self.assertEquals(sanitize_name(u'.hidden'), u'.hidden')
self.assertEquals(sanitize_name(u'.hidden.txt'), u'.hidden.txt')
self.assertEquals(sanitize_name(u'tarball.tar.gz'), u'tarball.tar.gz')
self.assertEquals(sanitize_name(u'.hidden_tarball.tar.gz'), u'.hidden_tarball.tar.gz')
self.assertEquals(sanitize_name(u'Testing{}*&*#().ta&&%$##&&r.gz'), u'Testing.tar.gz')
self.assertEquals(sanitize_name(u'*testingfile?*.txt'), u'testingfile.txt')
self.assertEquals(sanitize_name(u'snowman☃.txt'), u'snowman.txt')
self.assertEquals(sanitize_name(u'테스트.txt'), u'테스트.txt')
self.assertEquals(sanitize_name(u'~/."\`\?*"u0`000ssh/test.t**{}ar.gz'), u'.u0000sshtest.tar.gz')
|
the-stack_0_4920 | import numpy as np
import cv2
import math
import argparse
import time
def calculate_area(contours):
""" Calculate contour area
Paramters:
contours: List[numpy.ndarray]
Returns:
List[numpy.ndarray]: contours_area
"""
contours_area = []
# calculate area and filter into new array
for con in contours:
area = cv2.contourArea(con)
if 10000 < area < 60000:
contours_area.append(con)
return contours_area
def check_circularity(con):
""" Check circularity of contours and
calculate center coords and radius of resulting circle
Paramters:
con: numpy.ndarray
Returns:
float: circularity
int: cX
int: cY
int: r
"""
perimeter = cv2.arcLength(con, True)
area = cv2.contourArea(con)
if perimeter == 0:
return 0, 0, 0, 0
circularity = 4*math.pi*(area/(perimeter*perimeter))
M = cv2.moments(con)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
r = int(math.sqrt(area/(math.pi)))
return circularity, cX, cY, r
def detect_contour(gray, img):
""" Perform Gaussian Blur to smoothen image, binary threshold image to extract features
Detects contours on filtered image
Paramters:
gray: numpy.ndarray
img: numpy.ndarray
Returns:
img: numpy.ndarray
"""
filter_img = cv2.GaussianBlur(gray, (11, 11), 11)
#filter_img = cv2.bilateralFilter(img, 7, 50, 50)
_, thresh = cv2.threshold(filter_img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours_area = calculate_area(contours)
contours_circles = []
# check if contour is of circular shape
for con in contours_area:
circularity, cX, cY, r = check_circularity(con)
if 0.7 < circularity < 1.1:
contours_circles.append(con)
cv2.circle(img, (cX, cY), 3, (0,255,0), 3)
else:
cv2.circle(img, (cX, cY), r, (0,255,0), 3)
cv2.circle(img, (cX, cY), 3, (0,255,0), 3)
cv2.drawContours(img, contours_circles, -1, (0, 255, 0), 3)
return img, filter_img, thresh
def extract_roi(img):
""" Extract region of interest in frame to perform image processing pipelines
Paramters:
img: numpy.ndarray
Returns:
numpy.ndarray: eye_ROI
"""
polygons = np.array([(300, 900), (300, 200), (1050, 200), (1050, 900)])
mask = np.zeros_like(img)
cv2.fillConvexPoly(mask, polygons, 255)
eye_ROI = cv2.bitwise_and(img, mask)
return eye_ROI
def fps_overlay(img, fps):
""" Overlay FPS onto output img
Paramters:
img: numpy.ndarray
fps: float
Returns:
numpy.ndarray: img
"""
text = "FPS: {:.2f}".format(fps)
return cv2.putText(img, text, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
def image_reader(filename, image_layers):
""" Image reader and performs image processing pipeline on
image file
Paramters:
filename: str
"""
img = cv2.imread(filename)
img_cp = img.copy()
gray = cv2.cvtColor(img_cp, cv2.COLOR_BGR2GRAY)
gray = extract_roi(gray)
img_cp, filter_img, thresh = detect_contour(gray, img_cp)
if image_layers:
cv2.namedWindow("ROI")
cv2.namedWindow("Gaussian Blur")
cv2.namedWindow("Thresholding")
cv2.namedWindow("Output")
cv2.imshow("ROI", gray)
cv2.imshow("Gaussian Blur", filter_img)
cv2.imshow("Thresholding", thresh)
cv2.imshow("Output", img_cp)
else:
cv2.imshow("Output", img_cp)
key = cv2.waitKey(0)
if key == ord('q'):
cv2.destroyAllWindows()
def video_reader(filename, image_layers):
""" Video capture reader and performs image processing pipeline on
captured frames
Paramters:
filename: str
"""
cap = cv2.VideoCapture(filename)
while(True):
ret, img = cap.read()
tic = time.time()
if not ret:
break
img_cp = img.copy()
gray = cv2.cvtColor(img_cp, cv2.COLOR_BGR2GRAY)
gray = extract_roi(gray)
img_cp, filter_img, thresh = detect_contour(gray, img_cp)
toc = time.time()
fps = 1/(toc-tic)
img_cp = fps_overlay(img_cp, fps)
if image_layers:
cv2.namedWindow("ROI")
cv2.namedWindow("Gaussian Blur")
cv2.namedWindow("Thresholding")
cv2.namedWindow("Output")
cv2.imshow("ROI", gray)
cv2.imshow("Gaussian Blur", filter_img)
cv2.imshow("Thresholding", thresh)
cv2.imshow("Output", img_cp)
else:
cv2.imshow("Output", img_cp)
key = cv2.waitKey(1)
if key == ord('q'):
break
if key == ord('p'):
cv2.waitKey(-1)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_format',
type=int,
dest='input_format',
default=1,
help='Image(0) or Video(1)')
parser.add_argument('--input_file',
type=str,
dest='input_file',
default='/home/indra/Documents/Telemedc/pupil_detector/assets/sample.mkv',
help='Path to input file (image or video)')
parser.add_argument('--image_layers',
type=bool,
dest='image_layers',
default=False,
help='Open CV Windows to see intermediate processing')
args = parser.parse_args()
if args.input_format:
video_reader(args.input_file, args.image_layers)
else:
image_reader(args.input_file, args.image_layers) |
the-stack_0_4921 | import matplotlib.pyplot as plt
import pymc3 as pm
import numpy as np
# import pydevd
# pydevd.set_pm_excepthook()
np.seterr(invalid='raise')
data = np.random.normal(size=(2, 20))
model = pm.Model()
with model:
x = pm.Normal('x', mu=.5, tau=2. ** -2, shape=(2, 1))
z = pm.Beta('z', alpha=10, beta=5.5)
d = pm.Normal('data', mu=x, tau=.75 ** -2, observed=data)
step = pm.NUTS()
def run(n=1000):
if n == "short":
n = 50
with model:
trace = pm.sample(n, step)
plt.subplot(2, 2, 1)
plt.plot(trace[x][:, 0, 0])
plt.subplot(2, 2, 2)
plt.hist(trace[x][:, 0, 0])
plt.subplot(2, 2, 3)
plt.plot(trace[x][:, 1, 0])
plt.subplot(2, 2, 4)
plt.hist(trace[x][:, 1, 0])
plt.show()
if __name__ == '__main__':
run()
|
the-stack_0_4922 | import numpy as np
import pandas as pd
import os
import sys
from scipy import sparse
import utils
PAPER_COUNT_FILE = sys.argv[1]
YEAR = int(sys.argv[3])
WINDOW_LENGTH = int(sys.argv[4])
OUTPUT_NODE_FILE = sys.argv[5]
OUTPUT_EDGE_FILE = sys.argv[6]
year = YEAR
if __name__ == "__main__":
# Connect to the database
graph = utils.get_db()
# Load the paper count
pcount = pd.read_csv(PAPER_COUNT_FILE, sep="\t")
# Count the number of papers for each journal
ys = YEAR - WINDOW_LENGTH
yf = YEAR
query = """
MATCH (jtrg:Journal)<-[:published_from]-(trg:Paper)<-[:cites]-(src:Paper {Year:%d})-[:published_from]->(jsrc:Journal)
where trg.Year<%d and trg.Year >= %d
return toInteger(jsrc.JournalId) as source, toInteger(jtrg.JournalId) as target, ID(trg) as p_target, ID(src) as s_target
""" % (
yf,
yf,
ys,
)
edges = graph.run(query).to_data_frame()
#print(query, edges)
# Make a node table
ccount = edges.groupby(["target"])["s_target"].nunique()
nodes = pd.DataFrame({"ccount": ccount})
nodes = nodes.reset_index().rename(columns={"target": "id"})
# Slice the paper counts between ys and yf
s = (ys <= pcount.year) & (pcount.year < yf)
_pcount = pcount[s].copy()
_pcount = _pcount.groupby("id").agg("sum")["pcount"].reset_index()
# Merge the pcount to the node table
nodes = pd.merge(left=nodes, right=_pcount, left_on="id", right_on="id", how="left")
# Uniqify and count
edges = edges.groupby(["source", "target"]).size().reset_index(name="w")
# Add citations from retracted papers
if year == 2010 or year == 2011:
if year == 2010:
added_edges = [
["medical science monitor", "cell transplantation", 445],
["the scientific world journal", "cell transplantation", 96],
["medical science monitor", "medical science monitor", 44],
["the scientific world journal", "the scientific world journal", 26],
]
elif year == 2011:
added_edges = [
["medical science monitor", "cell transplantation", 87],
["medical science monitor", "medical science monitor", 32],
["the scientific world journal", "cell transplantation", 109],
["the scientific world journal", "the scientific world journal", 29],
["cell transplantation", "technology and innovation", 24],
]
journal_list = list(
set([x[0] for x in added_edges] + [x[1] for x in added_edges])
)
query = """
MATCH (n:Journal)
WHERE n.NormalizedName in [{journals}]
return toInteger(n.JournalId) as id, n.NormalizedName as name
""".format(
journals=",".join(["'%s'" % x for x in journal_list])
)
node_table = graph.run(query).to_data_frame()
name2id = {x["name"]: x["id"] for i, x in node_table.iterrows()}
edge_list = [
{"source": name2id[x[0]], "target": name2id[x[1]], "w": x[2]}
for x in added_edges
]
added_edges = pd.DataFrame(edge_list)
edges = pd.concat([edges, added_edges], ignore_index=True)
# Save to the result
nodes.to_csv(OUTPUT_NODE_FILE, sep="\t")
edges.to_csv(OUTPUT_EDGE_FILE, sep="\t")
|
the-stack_0_4924 |
from pathlib import Path
import requests
import re
from one import params
from one.webclient import http_download_file
import SimpleITK as sitk
def download_histology_data(subject, lab):
if lab == 'hoferlab':
lab_temp = 'mrsicflogellab'
elif lab == 'churchlandlab_ucla':
lab_temp = 'churchlandlab'
else:
lab_temp = lab
par = params.get()
try:
FLAT_IRON_HIST_REL_PATH = Path('histology', lab_temp, subject,
'downsampledStacks_25', 'sample2ARA')
baseurl = (par.HTTP_DATA_SERVER + '/' + '/'.join(FLAT_IRON_HIST_REL_PATH.parts))
r = requests.get(baseurl, auth=(par.HTTP_DATA_SERVER_LOGIN, par.HTTP_DATA_SERVER_PWD))
r.raise_for_status()
except Exception as err:
print(err)
try:
subject_rem = subject.replace("_", "")
FLAT_IRON_HIST_REL_PATH = Path('histology', lab_temp, subject_rem,
'downsampledStacks_25', 'sample2ARA')
baseurl = (par.HTTP_DATA_SERVER + '/' + '/'.join(FLAT_IRON_HIST_REL_PATH.parts))
r = requests.get(baseurl, auth=(par.HTTP_DATA_SERVER_LOGIN, par.HTTP_DATA_SERVER_PWD))
r.raise_for_status()
except Exception as err:
print(err)
path_to_nrrd = None
return path_to_nrrd
tif_files = []
for line in r.text.splitlines():
result = re.findall('href="(.*).tif"', line)
if result:
tif_files.append(result[0] + '.tif')
CACHE_DIR = params.get_cache_dir().joinpath(lab, 'Subjects', subject, 'histology')
CACHE_DIR.mkdir(exist_ok=True, parents=True)
path_to_files = []
for file in tif_files:
path_to_image = Path(CACHE_DIR, file)
if not path_to_image.exists():
url = (baseurl + '/' + file)
http_download_file(url, cache_dir=CACHE_DIR,
username=par.HTTP_DATA_SERVER_LOGIN,
password=par.HTTP_DATA_SERVER_PWD)
path_to_nrrd = tif2nrrd(path_to_image)
path_to_files.append(path_to_nrrd)
if len(path_to_files) > 3:
path_to_files = path_to_files[1:3]
return path_to_files
def tif2nrrd(path_to_image):
path_to_nrrd = Path(path_to_image.parent, path_to_image.parts[-1][:-3] + 'nrrd')
if not path_to_nrrd.exists():
reader = sitk.ImageFileReader()
reader.SetImageIO("TIFFImageIO")
reader.SetFileName(str(path_to_image))
img = reader.Execute()
new_img = sitk.PermuteAxes(img, [2, 1, 0])
new_img = sitk.Flip(new_img, [True, False, False])
new_img.SetSpacing([1, 1, 1])
writer = sitk.ImageFileWriter()
writer.SetImageIO("NrrdImageIO")
writer.SetFileName(str(path_to_nrrd))
writer.Execute(new_img)
return path_to_nrrd
|
the-stack_0_4925 | """
sphinx.domains.python
~~~~~~~~~~~~~~~~~~~~~
The Python domain.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import builtins
import inspect
import re
import sys
import typing
import warnings
from inspect import Parameter
from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Optional, Tuple, Type, cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from docutils.parsers.rst.states import Inliner
from sphinx import addnodes
from sphinx.addnodes import desc_signature, pending_xref, pending_xref_condition
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx50Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, Index, IndexEntry, ObjType
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.pycode.ast import ast
from sphinx.pycode.ast import parse as ast_parse
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.inspect import signature_from_str
from sphinx.util.nodes import find_pending_xref_condition, make_id, make_refnode
from sphinx.util.typing import OptionSpec, TextlikeNode
logger = logging.getLogger(__name__)
# REs for Python signatures
py_sig_re = re.compile(
r'''^ ([\w.]*\.)? # class name(s)
(\w+) \s* # thing name
(?: \(\s*(.*)\s*\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
''', re.VERBOSE)
pairindextypes = {
'module': _('module'),
'keyword': _('keyword'),
'operator': _('operator'),
'object': _('object'),
'exception': _('exception'),
'statement': _('statement'),
'builtin': _('built-in function'),
}
class ObjectEntry(NamedTuple):
docname: str
node_id: str
objtype: str
aliased: bool
class ModuleEntry(NamedTuple):
docname: str
node_id: str
synopsis: str
platform: str
deprecated: bool
def type_to_xref(text: str, env: BuildEnvironment = None) -> addnodes.pending_xref:
"""Convert a type string to a cross reference node."""
if text == 'None':
reftype = 'obj'
else:
reftype = 'class'
if env:
kwargs = {'py:module': env.ref_context.get('py:module'),
'py:class': env.ref_context.get('py:class')}
else:
kwargs = {}
if env.config.python_use_unqualified_type_names:
# Note: It would be better to use qualname to describe the object to support support
# nested classes. But python domain can't access the real python object because this
# module should work not-dynamically.
shortname = text.split('.')[-1]
contnodes: List[Node] = [pending_xref_condition('', shortname, condition='resolved'),
pending_xref_condition('', text, condition='*')]
else:
contnodes = [nodes.Text(text)]
return pending_xref('', *contnodes,
refdomain='py', reftype=reftype, reftarget=text, **kwargs)
def _parse_annotation(annotation: str, env: BuildEnvironment = None) -> List[Node]:
"""Parse type annotation."""
def unparse(node: ast.AST) -> List[Node]:
if isinstance(node, ast.Attribute):
return [nodes.Text("%s.%s" % (unparse(node.value)[0], node.attr))]
elif isinstance(node, ast.BinOp):
result: List[Node] = unparse(node.left)
result.extend(unparse(node.op))
result.extend(unparse(node.right))
return result
elif isinstance(node, ast.BitOr):
return [addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation('', '|'),
addnodes.desc_sig_space()]
elif isinstance(node, ast.Constant): # type: ignore
if node.value is Ellipsis:
return [addnodes.desc_sig_punctuation('', "...")]
elif isinstance(node.value, bool):
return [addnodes.desc_sig_keyword('', repr(node.value))]
elif isinstance(node.value, int):
return [addnodes.desc_sig_literal_number('', repr(node.value))]
elif isinstance(node.value, str):
return [addnodes.desc_sig_literal_string('', repr(node.value))]
else:
# handles None, which is further handled by type_to_xref later
# and fallback for other types that should be converted
return [nodes.Text(repr(node.value))]
elif isinstance(node, ast.Expr):
return unparse(node.value)
elif isinstance(node, ast.Index):
return unparse(node.value)
elif isinstance(node, ast.List):
result = [addnodes.desc_sig_punctuation('', '[')]
if node.elts:
# check if there are elements in node.elts to only pop the
# last element of result if the for-loop was run at least
# once
for elem in node.elts:
result.extend(unparse(elem))
result.append(addnodes.desc_sig_punctuation('', ','))
result.append(addnodes.desc_sig_space())
result.pop()
result.pop()
result.append(addnodes.desc_sig_punctuation('', ']'))
return result
elif isinstance(node, ast.Module):
return sum((unparse(e) for e in node.body), [])
elif isinstance(node, ast.Name):
return [nodes.Text(node.id)]
elif isinstance(node, ast.Subscript):
result = unparse(node.value)
result.append(addnodes.desc_sig_punctuation('', '['))
result.extend(unparse(node.slice))
result.append(addnodes.desc_sig_punctuation('', ']'))
# Wrap the Text nodes inside brackets by literal node if the subscript is a Literal
if result[0] in ('Literal', 'typing.Literal'):
for i, subnode in enumerate(result[1:], start=1):
if isinstance(subnode, nodes.Text):
result[i] = nodes.literal('', '', subnode)
return result
elif isinstance(node, ast.Tuple):
if node.elts:
result = []
for elem in node.elts:
result.extend(unparse(elem))
result.append(addnodes.desc_sig_punctuation('', ','))
result.append(addnodes.desc_sig_space())
result.pop()
result.pop()
else:
result = [addnodes.desc_sig_punctuation('', '('),
addnodes.desc_sig_punctuation('', ')')]
return result
else:
if sys.version_info < (3, 8):
if isinstance(node, ast.Ellipsis):
return [addnodes.desc_sig_punctuation('', "...")]
elif isinstance(node, ast.NameConstant):
return [nodes.Text(node.value)]
raise SyntaxError # unsupported syntax
if env is None:
warnings.warn("The env parameter for _parse_annotation becomes required now.",
RemovedInSphinx50Warning, stacklevel=2)
try:
tree = ast_parse(annotation)
result = unparse(tree)
for i, node in enumerate(result):
if isinstance(node, nodes.literal):
result[i] = node[0]
elif isinstance(node, nodes.Text) and node.strip():
result[i] = type_to_xref(str(node), env)
return result
except SyntaxError:
return [type_to_xref(annotation, env)]
def _parse_arglist(arglist: str, env: BuildEnvironment = None) -> addnodes.desc_parameterlist:
"""Parse a list of arguments using AST parser"""
params = addnodes.desc_parameterlist(arglist)
sig = signature_from_str('(%s)' % arglist)
last_kind = None
for param in sig.parameters.values():
if param.kind != param.POSITIONAL_ONLY and last_kind == param.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
params += addnodes.desc_parameter('', '', addnodes.desc_sig_operator('', '/'))
if param.kind == param.KEYWORD_ONLY and last_kind in (param.POSITIONAL_OR_KEYWORD,
param.POSITIONAL_ONLY,
None):
# PEP-3102: Separator for Keyword Only Parameter: *
params += addnodes.desc_parameter('', '', addnodes.desc_sig_operator('', '*'))
node = addnodes.desc_parameter()
if param.kind == param.VAR_POSITIONAL:
node += addnodes.desc_sig_operator('', '*')
node += addnodes.desc_sig_name('', param.name)
elif param.kind == param.VAR_KEYWORD:
node += addnodes.desc_sig_operator('', '**')
node += addnodes.desc_sig_name('', param.name)
else:
node += addnodes.desc_sig_name('', param.name)
if param.annotation is not param.empty:
children = _parse_annotation(param.annotation, env)
node += addnodes.desc_sig_punctuation('', ':')
node += addnodes.desc_sig_space()
node += addnodes.desc_sig_name('', '', *children) # type: ignore
if param.default is not param.empty:
if param.annotation is not param.empty:
node += addnodes.desc_sig_space()
node += addnodes.desc_sig_operator('', '=')
node += addnodes.desc_sig_space()
else:
node += addnodes.desc_sig_operator('', '=')
node += nodes.inline('', param.default, classes=['default_value'],
support_smartquotes=False)
params += node
last_kind = param.kind
if last_kind == Parameter.POSITIONAL_ONLY:
# PEP-570: Separator for Positional Only Parameter: /
params += addnodes.desc_parameter('', '', addnodes.desc_sig_operator('', '/'))
return params
def _pseudo_parse_arglist(signode: desc_signature, arglist: str) -> None:
""""Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
brackets. Currently, this will split at any comma, even if it's inside a
string literal (e.g. default argument value).
"""
paramlist = addnodes.desc_parameterlist()
stack: List[Element] = [paramlist]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(
'', '', addnodes.desc_sig_name(argument, argument))
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# if there are too few or too many elements on the stack, just give up
# and treat the whole argument list as one argument, discarding the
# already partially populated paramlist node
paramlist = addnodes.desc_parameterlist()
paramlist += addnodes.desc_parameter(arglist, arglist)
signode += paramlist
else:
signode += paramlist
# This override allows our inline type specifiers to behave like :class: link
# when it comes to handling "." and "~" prefixes.
class PyXrefMixin:
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> Node:
# we use inliner=None to make sure we get the old behaviour with a single
# pending_xref node
result = super().make_xref(rolename, domain, target, # type: ignore
innernode, contnode,
env, inliner=None, location=None)
result['refspecific'] = True
result['py:module'] = env.ref_context.get('py:module')
result['py:class'] = env.ref_context.get('py:class')
if target.startswith(('.', '~')):
prefix, result['reftarget'] = target[0], target[1:]
if prefix == '.':
text = target[1:]
elif prefix == '~':
text = target.split('.')[-1]
for node in result.traverse(nodes.Text):
node.parent[node.parent.index(node)] = nodes.Text(text)
break
elif isinstance(result, pending_xref) and env.config.python_use_unqualified_type_names:
children = result.children
result.clear()
shortname = target.split('.')[-1]
textnode = innernode('', shortname)
contnodes = [pending_xref_condition('', '', textnode, condition='resolved'),
pending_xref_condition('', '', *children, condition='*')]
result.extend(contnodes)
return result
def make_xrefs(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> List[Node]:
delims = r'(\s*[\[\]\(\),](?:\s*or\s)?\s*|\s+or\s+|\s*\|\s*|\.\.\.)'
delims_re = re.compile(delims)
sub_targets = re.split(delims, target)
split_contnode = bool(contnode and contnode.astext() == target)
results = []
for sub_target in filter(None, sub_targets):
if split_contnode:
contnode = nodes.Text(sub_target)
if delims_re.match(sub_target):
results.append(contnode or innernode(sub_target, sub_target))
else:
results.append(self.make_xref(rolename, domain, sub_target,
innernode, contnode, env, inliner, location))
return results
class PyField(PyXrefMixin, Field):
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> Node:
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
return super().make_xref(rolename, domain, target, innernode, contnode,
env, inliner, location)
class PyGroupedField(PyXrefMixin, GroupedField):
pass
class PyTypedField(PyXrefMixin, TypedField):
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None,
inliner: Inliner = None, location: Node = None) -> Node:
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
return super().make_xref(rolename, domain, target, innernode, contnode,
env, inliner, location)
class PyObject(ObjectDescription[Tuple[str, str]]):
"""
Description of a general Python object.
:cvar allow_nesting: Class is an object that allows for nested namespaces
:vartype allow_nesting: bool
"""
option_spec: OptionSpec = {
'noindex': directives.flag,
'noindexentry': directives.flag,
'module': directives.unchanged,
'canonical': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
PyTypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument',
'keyword', 'kwarg', 'kwparam'),
typerolename='class', typenames=('paramtype', 'type'),
can_collapse=True),
PyTypedField('variable', label=_('Variables'),
names=('var', 'ivar', 'cvar'),
typerolename='class', typenames=('vartype',),
can_collapse=True),
PyGroupedField('exceptions', label=_('Raises'), rolename='exc',
names=('raises', 'raise', 'exception', 'except'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
PyField('returntype', label=_('Return type'), has_arg=False,
names=('rtype',), bodyrolename='class'),
]
allow_nesting = False
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
"""May return a prefix to put before the object name in the
signature.
"""
return []
def needs_arglist(self) -> bool:
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = py_sig_re.match(sig)
if m is None:
raise ValueError
prefix, name, arglist, retann = m.groups()
# determine module and class name (if applicable), as well as full name
modname = self.options.get('module', self.env.ref_context.get('py:module'))
classname = self.env.ref_context.get('py:class')
if classname:
add_module = False
if prefix and (prefix == classname or
prefix.startswith(classname + ".")):
fullname = prefix + name
# class name is given again in the signature
prefix = prefix[len(classname):].lstrip('.')
elif prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
add_module = True
if prefix:
classname = prefix.rstrip('.')
fullname = prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(str(sig_prefix), '', *sig_prefix)
if prefix:
signode += addnodes.desc_addname(prefix, prefix)
elif modname and add_module and self.env.config.add_module_names:
nodetext = modname + '.'
signode += addnodes.desc_addname(nodetext, nodetext)
signode += addnodes.desc_name(name, name)
if arglist:
try:
signode += _parse_arglist(arglist, self.env)
except SyntaxError:
# fallback to parse arglist original parser.
# it supports to represent optional arguments (ex. "func(foo [, bar])")
_pseudo_parse_arglist(signode, arglist)
except NotImplementedError as exc:
logger.warning("could not parse arglist (%r): %s", arglist, exc,
location=signode)
_pseudo_parse_arglist(signode, arglist)
else:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
children = _parse_annotation(retann, self.env)
signode += addnodes.desc_returns(retann, '', *children)
anno = self.options.get('annotation')
if anno:
signode += addnodes.desc_annotation(' ' + anno, '',
addnodes.desc_sig_space(),
nodes.Text(anno))
return fullname, prefix
def get_index_text(self, modname: str, name: Tuple[str, str]) -> str:
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls: Tuple[str, str], sig: str,
signode: desc_signature) -> None:
modname = self.options.get('module', self.env.ref_context.get('py:module'))
fullname = (modname + '.' if modname else '') + name_cls[0]
node_id = make_id(self.env, self.state.document, '', fullname)
signode['ids'].append(node_id)
# Assign old styled node_id(fullname) not to break old hyperlinks (if possible)
# Note: Will removed in Sphinx-5.0 (RemovedInSphinx50Warning)
if node_id != fullname and fullname not in self.state.document.ids:
signode['ids'].append(fullname)
self.state.document.note_explicit_target(signode)
domain = cast(PythonDomain, self.env.get_domain('py'))
domain.note_object(fullname, self.objtype, node_id, location=signode)
canonical_name = self.options.get('canonical')
if canonical_name:
domain.note_object(canonical_name, self.objtype, node_id, aliased=True,
location=signode)
if 'noindexentry' not in self.options:
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext, node_id, '', None))
def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`PyObject` represents Python language constructs. For
constructs that are nestable, such as a Python classes, this method will
build up a stack of the nesting hierarchy so that it can be later
de-nested correctly, in :py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
"""
prefix = None
if self.names:
# fullname and name_prefix come from the `handle_signature` method.
# fullname represents the full object name that is constructed using
# object nesting and explicit prefixes. `name_prefix` is the
# explicit prefix given in a signature
(fullname, name_prefix) = self.names[-1]
if self.allow_nesting:
prefix = fullname
elif name_prefix:
prefix = name_prefix.strip('.')
if prefix:
self.env.ref_context['py:class'] = prefix
if self.allow_nesting:
classes = self.env.ref_context.setdefault('py:classes', [])
classes.append(prefix)
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
modules.append(self.env.ref_context.get('py:module'))
self.env.ref_context['py:module'] = self.options['module']
def after_content(self) -> None:
"""Handle object de-nesting after content
If this class is a nestable object, removing the last nested class prefix
ends further nesting in the object.
If this class is not a nestable object, the list of classes should not
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
classes = self.env.ref_context.setdefault('py:classes', [])
if self.allow_nesting:
try:
classes.pop()
except IndexError:
pass
self.env.ref_context['py:class'] = (classes[-1] if len(classes) > 0
else None)
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
if modules:
self.env.ref_context['py:module'] = modules.pop()
else:
self.env.ref_context.pop('py:module')
class PyFunction(PyObject):
"""Description of a function."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'async': directives.flag,
})
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
if 'async' in self.options:
return [addnodes.desc_sig_keyword('', 'async'),
addnodes.desc_sig_space()]
else:
return []
def needs_arglist(self) -> bool:
return True
def add_target_and_index(self, name_cls: Tuple[str, str], sig: str,
signode: desc_signature) -> None:
super().add_target_and_index(name_cls, sig, signode)
if 'noindexentry' not in self.options:
modname = self.options.get('module', self.env.ref_context.get('py:module'))
node_id = signode['ids'][0]
name, cls = name_cls
if modname:
text = _('%s() (in module %s)') % (name, modname)
self.indexnode['entries'].append(('single', text, node_id, '', None))
else:
text = '%s; %s()' % (pairindextypes['builtin'], name)
self.indexnode['entries'].append(('pair', text, node_id, '', None))
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
# add index in own add_target_and_index() instead.
return None
class PyDecoratorFunction(PyFunction):
"""Description of a decorator."""
def run(self) -> List[Node]:
# a decorator function is a function after all
self.name = 'py:function'
return super().run()
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
ret = super().handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self) -> bool:
return False
class PyVariable(PyObject):
"""Description of a variable."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'type': directives.unchanged,
'value': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get('type')
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(typ, '',
addnodes.desc_sig_punctuation('', ':'),
addnodes.desc_sig_space(), *annotations)
value = self.options.get('value')
if value:
signode += addnodes.desc_annotation(value, '',
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation('', '='),
addnodes.desc_sig_space(),
nodes.Text(value))
return fullname, prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return _('%s (built-in variable)') % name
class PyClasslike(PyObject):
"""
Description of a class-like object (classes, interfaces, exceptions).
"""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'final': directives.flag,
})
allow_nesting = True
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
if 'final' in self.options:
return [nodes.Text('final'), addnodes.desc_sig_space(),
nodes.Text(self.objtype), addnodes.desc_sig_space()]
else:
return [nodes.Text(self.objtype), addnodes.desc_sig_space()]
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
if self.objtype == 'class':
if not modname:
return _('%s (built-in class)') % name_cls[0]
return _('%s (class in %s)') % (name_cls[0], modname)
elif self.objtype == 'exception':
return name_cls[0]
else:
return ''
class PyMethod(PyObject):
"""Description of a method."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'abstractmethod': directives.flag,
'async': directives.flag,
'classmethod': directives.flag,
'final': directives.flag,
'property': directives.flag,
'staticmethod': directives.flag,
})
def needs_arglist(self) -> bool:
if 'property' in self.options:
return False
else:
return True
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
prefix: List[nodes.Node] = []
if 'final' in self.options:
prefix.append(nodes.Text('final'))
prefix.append(addnodes.desc_sig_space())
if 'abstractmethod' in self.options:
prefix.append(nodes.Text('abstract'))
prefix.append(addnodes.desc_sig_space())
if 'async' in self.options:
prefix.append(nodes.Text('async'))
prefix.append(addnodes.desc_sig_space())
if 'classmethod' in self.options:
prefix.append(nodes.Text('classmethod'))
prefix.append(addnodes.desc_sig_space())
if 'property' in self.options:
prefix.append(nodes.Text('property'))
prefix.append(addnodes.desc_sig_space())
if 'staticmethod' in self.options:
prefix.append(nodes.Text('static'))
prefix.append(addnodes.desc_sig_space())
return prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, methname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return '%s()' % name
if 'classmethod' in self.options:
return _('%s() (%s class method)') % (methname, clsname)
elif 'property' in self.options:
return _('%s (%s property)') % (methname, clsname)
elif 'staticmethod' in self.options:
return _('%s() (%s static method)') % (methname, clsname)
else:
return _('%s() (%s method)') % (methname, clsname)
class PyClassMethod(PyMethod):
"""Description of a classmethod."""
option_spec: OptionSpec = PyObject.option_spec.copy()
def run(self) -> List[Node]:
self.name = 'py:method'
self.options['classmethod'] = True
return super().run()
class PyStaticMethod(PyMethod):
"""Description of a staticmethod."""
option_spec: OptionSpec = PyObject.option_spec.copy()
def run(self) -> List[Node]:
self.name = 'py:method'
self.options['staticmethod'] = True
return super().run()
class PyDecoratorMethod(PyMethod):
"""Description of a decoratormethod."""
def run(self) -> List[Node]:
self.name = 'py:method'
return super().run()
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
ret = super().handle_signature(sig, signode)
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self) -> bool:
return False
class PyAttribute(PyObject):
"""Description of an attribute."""
option_spec: OptionSpec = PyObject.option_spec.copy()
option_spec.update({
'type': directives.unchanged,
'value': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get('type')
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(typ, '',
addnodes.desc_sig_punctuation('', ':'),
addnodes.desc_sig_space(),
*annotations)
value = self.options.get('value')
if value:
signode += addnodes.desc_annotation(value, '',
addnodes.desc_sig_space(),
addnodes.desc_sig_punctuation('', '='),
addnodes.desc_sig_space(),
nodes.Text(value))
return fullname, prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, attrname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
return _('%s (%s attribute)') % (attrname, clsname)
class PyProperty(PyObject):
"""Description of an attribute."""
option_spec = PyObject.option_spec.copy()
option_spec.update({
'abstractmethod': directives.flag,
'classmethod': directives.flag,
'type': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
fullname, prefix = super().handle_signature(sig, signode)
typ = self.options.get('type')
if typ:
annotations = _parse_annotation(typ, self.env)
signode += addnodes.desc_annotation(typ, '',
addnodes.desc_sig_punctuation('', ':'),
addnodes.desc_sig_space(),
*annotations)
return fullname, prefix
def get_signature_prefix(self, sig: str) -> List[nodes.Node]:
prefix: List[nodes.Node] = []
if 'abstractmethod' in self.options:
prefix.append(nodes.Text('abstract'))
prefix.append(addnodes.desc_sig_space())
if 'classmethod' in self.options:
prefix.append(nodes.Text('class'))
prefix.append(addnodes.desc_sig_space())
prefix.append(nodes.Text('property'))
prefix.append(addnodes.desc_sig_space())
return prefix
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, attrname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
return _('%s (%s property)') % (attrname, clsname)
class PyDecoratorMixin:
"""
Mixin for decorator directives.
"""
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
for cls in self.__class__.__mro__:
if cls.__name__ != 'DirectiveAdapter':
warnings.warn('PyDecoratorMixin is deprecated. '
'Please check the implementation of %s' % cls,
RemovedInSphinx50Warning, stacklevel=2)
break
else:
warnings.warn('PyDecoratorMixin is deprecated',
RemovedInSphinx50Warning, stacklevel=2)
ret = super().handle_signature(sig, signode) # type: ignore
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self) -> bool:
return False
class PyModule(SphinxDirective):
"""
Directive to mark description of a new module.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: OptionSpec = {
'platform': lambda x: x,
'synopsis': lambda x: x,
'noindex': directives.flag,
'deprecated': directives.flag,
}
def run(self) -> List[Node]:
domain = cast(PythonDomain, self.env.get_domain('py'))
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
self.env.ref_context['py:module'] = modname
ret: List[Node] = []
if not noindex:
# note module to the domain
node_id = make_id(self.env, self.state.document, 'module', modname)
target = nodes.target('', '', ids=[node_id], ismod=True)
self.set_source_info(target)
# Assign old styled node_id not to break old hyperlinks (if possible)
# Note: Will removed in Sphinx-5.0 (RemovedInSphinx50Warning)
old_node_id = self.make_old_id(modname)
if node_id != old_node_id and old_node_id not in self.state.document.ids:
target['ids'].append(old_node_id)
self.state.document.note_explicit_target(target)
domain.note_module(modname,
node_id,
self.options.get('synopsis', ''),
self.options.get('platform', ''),
'deprecated' in self.options)
domain.note_object(modname, 'module', node_id, location=target)
# the platform and synopsis aren't printed; in fact, they are only
# used in the modindex currently
ret.append(target)
indextext = '%s; %s' % (pairindextypes['module'], modname)
inode = addnodes.index(entries=[('pair', indextext, node_id, '', None)])
ret.append(inode)
return ret
def make_old_id(self, name: str) -> str:
"""Generate old styled node_id.
Old styled node_id is incompatible with docutils' node_id.
It can contain dots and hyphens.
.. note:: Old styled node_id was mainly used until Sphinx-3.0.
"""
return 'module-%s' % name
class PyCurrentModule(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting
stuff in module foo, but links to module foo won't lead here.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec: OptionSpec = {}
def run(self) -> List[Node]:
modname = self.arguments[0].strip()
if modname == 'None':
self.env.ref_context.pop('py:module', None)
else:
self.env.ref_context['py:module'] = modname
return []
class PyXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element,
has_explicit_title: bool, title: str, target: str) -> Tuple[str, str]:
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
title = title.lstrip('.') # only has a meaning for the target
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
def filter_meta_fields(app: Sphinx, domain: str, objtype: str, content: Element) -> None:
"""Filter ``:meta:`` field from its docstring."""
if domain != 'py':
return
for node in content:
if isinstance(node, nodes.field_list):
fields = cast(List[nodes.field], node)
for field in fields:
field_name = cast(nodes.field_body, field[0]).astext().strip()
if field_name == 'meta' or field_name.startswith('meta '):
node.remove(field)
break
class PythonModuleIndex(Index):
"""
Index subclass to provide the Python module index.
"""
name = 'modindex'
localname = _('Python Module Index')
shortname = _('modules')
def generate(self, docnames: Iterable[str] = None
) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]:
content: Dict[str, List[IndexEntry]] = {}
# list of prefixes to ignore
ignores: List[str] = self.domain.env.config['modindex_common_prefix']
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(self.domain.data['modules'].items(),
key=lambda x: x[0].lower())
# sort out collapsible modules
prev_modname = ''
num_toplevels = 0
for modname, (docname, node_id, synopsis, platforms, deprecated) in modules:
if docnames and docname not in docnames:
continue
for ignore in ignores:
if modname.startswith(ignore):
modname = modname[len(ignore):]
stripped = ignore
break
else:
stripped = ''
# we stripped the whole module name?
if not modname:
modname, stripped = stripped, ''
entries = content.setdefault(modname[0].lower(), [])
package = modname.split('.')[0]
if package != modname:
# it's a submodule
if prev_modname == package:
# first submodule - make parent a group head
if entries:
last = entries[-1]
entries[-1] = IndexEntry(last[0], 1, last[2], last[3],
last[4], last[5], last[6])
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
entries.append(IndexEntry(stripped + package, 1, '', '', '', '', ''))
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = _('Deprecated') if deprecated else ''
entries.append(IndexEntry(stripped + modname, subtype, docname,
node_id, platforms, qualifier, synopsis))
prev_modname = modname
# apply heuristics when to collapse modindex at page load:
# only collapse if number of toplevel modules is larger than
# number of submodules
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first letter
sorted_content = sorted(content.items())
return sorted_content, collapse
class PythonDomain(Domain):
"""Python language domain."""
name = 'py'
label = 'Python'
object_types: Dict[str, ObjType] = {
'function': ObjType(_('function'), 'func', 'obj'),
'data': ObjType(_('data'), 'data', 'obj'),
'class': ObjType(_('class'), 'class', 'exc', 'obj'),
'exception': ObjType(_('exception'), 'exc', 'class', 'obj'),
'method': ObjType(_('method'), 'meth', 'obj'),
'classmethod': ObjType(_('class method'), 'meth', 'obj'),
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'property': ObjType(_('property'), 'attr', '_prop', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
}
directives = {
'function': PyFunction,
'data': PyVariable,
'class': PyClasslike,
'exception': PyClasslike,
'method': PyMethod,
'classmethod': PyClassMethod,
'staticmethod': PyStaticMethod,
'attribute': PyAttribute,
'property': PyProperty,
'module': PyModule,
'currentmodule': PyCurrentModule,
'decorator': PyDecoratorFunction,
'decoratormethod': PyDecoratorMethod,
}
roles = {
'data': PyXRefRole(),
'exc': PyXRefRole(),
'func': PyXRefRole(fix_parens=True),
'class': PyXRefRole(),
'const': PyXRefRole(),
'attr': PyXRefRole(),
'meth': PyXRefRole(fix_parens=True),
'mod': PyXRefRole(),
'obj': PyXRefRole(),
}
initial_data: Dict[str, Dict[str, Tuple[Any]]] = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
}
indices = [
PythonModuleIndex,
]
@property
def objects(self) -> Dict[str, ObjectEntry]:
return self.data.setdefault('objects', {}) # fullname -> ObjectEntry
def note_object(self, name: str, objtype: str, node_id: str,
aliased: bool = False, location: Any = None) -> None:
"""Note a python object for cross reference.
.. versionadded:: 2.1
"""
if name in self.objects:
other = self.objects[name]
if other.aliased and aliased is False:
# The original definition found. Override it!
pass
elif other.aliased is False and aliased:
# The original definition is already registered.
return
else:
# duplicated
logger.warning(__('duplicate object description of %s, '
'other instance in %s, use :noindex: for one of them'),
name, other.docname, location=location)
self.objects[name] = ObjectEntry(self.env.docname, node_id, objtype, aliased)
@property
def modules(self) -> Dict[str, ModuleEntry]:
return self.data.setdefault('modules', {}) # modname -> ModuleEntry
def note_module(self, name: str, node_id: str, synopsis: str,
platform: str, deprecated: bool) -> None:
"""Note a python module for cross reference.
.. versionadded:: 2.1
"""
self.modules[name] = ModuleEntry(self.env.docname, node_id,
synopsis, platform, deprecated)
def clear_doc(self, docname: str) -> None:
for fullname, obj in list(self.objects.items()):
if obj.docname == docname:
del self.objects[fullname]
for modname, mod in list(self.modules.items()):
if mod.docname == docname:
del self.modules[modname]
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
# XXX check duplicates?
for fullname, obj in otherdata['objects'].items():
if obj.docname in docnames:
self.objects[fullname] = obj
for modname, mod in otherdata['modules'].items():
if mod.docname in docnames:
self.modules[modname] = mod
def find_obj(self, env: BuildEnvironment, modname: str, classname: str,
name: str, type: str, searchmode: int = 0
) -> List[Tuple[str, ObjectEntry]]:
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
matches: List[Tuple[str, ObjectEntry]] = []
newname = None
if searchmode == 1:
if type is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if fullname in self.objects and self.objects[fullname].objtype in objtypes:
newname = fullname
if not newname:
if modname and modname + '.' + name in self.objects and \
self.objects[modname + '.' + name].objtype in objtypes:
newname = modname + '.' + name
elif name in self.objects and self.objects[name].objtype in objtypes:
newname = name
else:
# "fuzzy" searching mode
searchname = '.' + name
matches = [(oname, self.objects[oname]) for oname in self.objects
if oname.endswith(searchname) and
self.objects[oname].objtype in objtypes]
else:
# NOTE: searching for exact match, object type is not considered
if name in self.objects:
newname = name
elif type == 'mod':
# only exact matches allowed for modules
return []
elif classname and classname + '.' + name in self.objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in self.objects:
newname = modname + '.' + name
elif modname and classname and \
modname + '.' + classname + '.' + name in self.objects:
newname = modname + '.' + classname + '.' + name
if newname is not None:
matches.append((newname, self.objects[newname]))
return matches
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
type: str, target: str, node: pending_xref, contnode: Element
) -> Optional[Element]:
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = 1 if node.hasattr('refspecific') else 0
matches = self.find_obj(env, modname, clsname, target,
type, searchmode)
if not matches and type == 'attr':
# fallback to meth (for property; Sphinx-2.4.x)
# this ensures that `:attr:` role continues to refer to the old property entry
# that defined by ``method`` directive in old reST files.
matches = self.find_obj(env, modname, clsname, target, 'meth', searchmode)
if not matches and type == 'meth':
# fallback to attr (for property)
# this ensures that `:meth:` in the old reST files can refer to the property
# entry that defined by ``property`` directive.
#
# Note: _prop is a secret role only for internal look-up.
matches = self.find_obj(env, modname, clsname, target, '_prop', searchmode)
if not matches:
return None
elif len(matches) > 1:
canonicals = [m for m in matches if not m[1].aliased]
if len(canonicals) == 1:
matches = canonicals
else:
logger.warning(__('more than one target found for cross-reference %r: %s'),
target, ', '.join(match[0] for match in matches),
type='ref', subtype='python', location=node)
name, obj = matches[0]
if obj[2] == 'module':
return self._make_module_refnode(builder, fromdocname, name, contnode)
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, 'resolved')
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
return make_refnode(builder, fromdocname, obj[0], obj[1], children, name)
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
modname = node.get('py:module')
clsname = node.get('py:class')
results: List[Tuple[str, Element]] = []
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[2] == 'module':
results.append(('py:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
# determine the content of the reference by conditions
content = find_pending_xref_condition(node, 'resolved')
if content:
children = content.children
else:
# if not found, use contnode
children = [contnode]
results.append(('py:' + self.role_for_objtype(obj[2]),
make_refnode(builder, fromdocname, obj[0], obj[1],
children, name)))
return results
def _make_module_refnode(self, builder: Builder, fromdocname: str, name: str,
contnode: Node) -> Element:
# get additional info for modules
module = self.modules[name]
title = name
if module.synopsis:
title += ': ' + module.synopsis
if module.deprecated:
title += _(' (deprecated)')
if module.platform:
title += ' (' + module.platform + ')'
return make_refnode(builder, fromdocname, module.docname, module.node_id,
contnode, title)
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
for modname, mod in self.modules.items():
yield (modname, modname, 'module', mod.docname, mod.node_id, 0)
for refname, obj in self.objects.items():
if obj.objtype != 'module': # modules are already handled
if obj.aliased:
# aliased names are not full-text searchable.
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, -1)
else:
yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1)
def get_full_qualified_name(self, node: Element) -> Optional[str]:
modname = node.get('py:module')
clsname = node.get('py:class')
target = node.get('reftarget')
if target is None:
return None
else:
return '.'.join(filter(None, [modname, clsname, target]))
def builtin_resolver(app: Sphinx, env: BuildEnvironment,
node: pending_xref, contnode: Element) -> Element:
"""Do not emit nitpicky warnings for built-in types."""
def istyping(s: str) -> bool:
if s.startswith('typing.'):
s = s.split('.', 1)[1]
return s in typing.__all__ # type: ignore
if node.get('refdomain') != 'py':
return None
elif node.get('reftype') in ('class', 'obj') and node.get('reftarget') == 'None':
return contnode
elif node.get('reftype') in ('class', 'exc'):
reftarget = node.get('reftarget')
if inspect.isclass(getattr(builtins, reftarget, None)):
# built-in class
return contnode
elif istyping(reftarget):
# typing class
return contnode
return None
def setup(app: Sphinx) -> Dict[str, Any]:
app.setup_extension('sphinx.directives')
app.add_domain(PythonDomain)
app.add_config_value('python_use_unqualified_type_names', False, 'env')
app.connect('object-description-transform', filter_meta_fields)
app.connect('missing-reference', builtin_resolver, priority=900)
return {
'version': 'builtin',
'env_version': 3,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
the-stack_0_4926 | """
In this file one can find the implementation of helpful class and functions in order to handle the given dataset, in the
aspect of its structure.
Here is the implementation of helpful class and functions that handle the given dataset.
"""
import json
import csv
from scipy.stats import zscore
from torch import Tensor
from torch.nn import ConstantPad2d
from torch.utils.data import Dataset, DataLoader
from collections import Counter
from feature_calculators import FeatureMeta
from features_processor import FeaturesProcessor, log_norm
from graph_features import GraphFeatures
from loggers import PrintLogger
from multi_graph import MultiGraph
from dataset.dataset_external_data import ExternalData
import os
import pandas as pd
import networkx as nx
import pickle
import numpy as np
from vertices.betweenness_centrality import BetweennessCentralityCalculator
from vertices.bfs_moments import BfsMomentsCalculator
from sklearn.preprocessing import MinMaxScaler
# some important shortenings
PKL_DIR = "pkl"
NORM_REDUCED = "NORM_REDUCED"
NORM_REDUCED_SYMMETRIC = "NORM_REDUCED_SYMMETRIC"
IDENTITY = "IDENTITY"
RAW_FORM = "RAW_FORM"
DEG = "DEG"
IN_DEG = "IN_DEG"
OUT_DEG = "OUT_DEG"
CENTRALITY = ("betweenness_centrality", FeatureMeta(BetweennessCentralityCalculator, {"betweenness"}))
BFS = ("bfs_moments", FeatureMeta(BfsMomentsCalculator, {"bfs"}))
class GraphsDataset(Dataset):
def __init__(self, params, external_data: ExternalData = None):
# load the params file (json) in the "graphs_data" section.
self._params = params if type(params) is dict else json.load(open(params, "rt"))
self._dataset_name = self._params["dataset_name"]
self._params = self._params["graphs_data"]
self._logger = PrintLogger("logger")
# path to base directory
self._base_dir = __file__.replace("/", os.sep)
self._base_dir = os.path.join(self._base_dir.rsplit(os.sep, 1)[0], "..")
self._external_data = external_data
# init ftr_meta dictionary and other ftr attributes
self._init_ftrs()
self._src_file_path = os.path.join(self._params["file_path"])
self._multi_graph, self._labels, self._label_to_idx, self._idx_to_label = self._build_multi_graph()
self._data, self._idx_to_name = self._build_data()
@property
def all_labels(self):
return self._idx_to_label
@property
def label_count(self):
return Counter([v[3] for name, v in self._data.items()])
def label(self, idx):
return self._data[self._idx_to_name[idx]][3]
@property
def len_features(self):
return self._data[self._idx_to_name[0]][1].shape[1]
# Initialization of the requested features
def _init_ftrs(self):
self._deg, self._in_deg, self._out_deg, self._is_ftr, self._ftr_meta = False, False, False, False, {}
self._is_external_data = False if self._external_data is None else True
# params.FEATURES contains string and list of two elements (matching to key: value)
# should Deg/In-Deg/Out-Deg be calculated
for ftr in self._params["features"]:
ftr = globals()[ftr]
if ftr == DEG:
self._deg = True
elif ftr == IN_DEG:
self._in_deg = True
elif ftr == OUT_DEG:
self._out_deg = True
else:
self._ftr_meta[ftr[0]] = ftr[1]
# add directories for pickles
if len(self._ftr_meta) > 0:
self._ftr_path = os.path.join(self._base_dir, PKL_DIR, "ftr", self._dataset_name)
if not os.path.exists(self._ftr_path):
os.mkdir(self._ftr_path)
# if there are another features except degrees such as Betweeness
self._is_ftr = True
"""
build multi graph according to csv
each community is a single graph, no consideration to time
"""
def _build_multi_graph(self):
# percentage is the "amount" of the graph we take. For example, percentage=1 means the whole graph is taken,
# percentage=0.6 means 60% of the graph is taken , ....
path_pkl = os.path.join(self._base_dir, PKL_DIR, self._dataset_name + "_split_" +
str(self._params["percentage"]) + "_mg.pkl")
# a path to where the pickles will be was created, if it exists it means the graph has already be built, thus we
# load the pickle and return it
if os.path.exists(path_pkl):
return pickle.load(open(path_pkl, "rb"))
multi_graph_dict = {}
labels = {}
label_to_idx = {}
# open basic data csv (with all edges of all times)
data_df = pd.read_csv(self._src_file_path)
stop = data_df.shape[0] * self._params["percentage"]
for index, edge in data_df.iterrows():
if index > stop:
break
# write edge to dictionary
graph_id = str(edge[self._params["graph_col"]])
src = str(edge[self._params["src_col"]])
dst = str(edge[self._params["dst_col"]])
multi_graph_dict[graph_id] = multi_graph_dict.get(graph_id, []) + [(src, dst)]
label = edge[self._params["label_col"]]
label_to_idx[label] = len(label_to_idx) if label not in label_to_idx else label_to_idx[label]
labels[graph_id] = label_to_idx[label]
mg = MultiGraph(self._dataset_name, graphs_source=multi_graph_dict,
directed=self._params["directed"], logger=self._logger)
idx_to_label = [l for l in sorted(label_to_idx, key=lambda x: label_to_idx[x])]
mg.suspend_logger()
# make directories
os.makedirs(os.path.join(self._base_dir, PKL_DIR), exist_ok=True)
pickle.dump((mg, labels, label_to_idx, idx_to_label), open(path_pkl, "wb"))
mg.wake_logger()
return mg, labels, label_to_idx, idx_to_label
"""
returns a vector x for gnx
basic version returns degree for each node
"""
def _gnx_vec(self, gnx_id, gnx: nx.Graph, node_order):
# final vector that will have matrices of features
final_vec = []
# calculate degree for each node
if self._deg:
degrees = gnx.degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# calculate in degree for each node
if self._in_deg:
degrees = gnx.in_degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# calculate out degree for each node
if self._out_deg:
degrees = gnx.out_degree(gnx.nodes)
final_vec.append(np.matrix([np.log(degrees[d] + 1e-3) for d in node_order]).T)
# if external data is given, add its feature too
if self._is_external_data and self._external_data.is_continuous:
final_vec.append(np.matrix([self._external_data.continuous_feature(gnx_id, d) for d in node_order]))
# if the are more features except degrees and external ones, such as betweeness.
if self._is_ftr:
name = str(gnx_id)
# create a path if it does not exist yet
gnx_dir_path = os.path.join(self._ftr_path, name)
if not os.path.exists(gnx_dir_path):
os.mkdir(gnx_dir_path)
# Graph Feature is a class from the package "graph features" which calculates the given features
raw_ftr = GraphFeatures(gnx, self._ftr_meta, dir_path=gnx_dir_path, is_max_connected=False,
logger=PrintLogger("logger"))
raw_ftr.build(should_dump=True) # build features
final_vec.append(FeaturesProcessor(raw_ftr).as_matrix(norm_func=log_norm))
# the list of all matrices of features is stacked in order to create an only matrix for all features
return np.hstack(final_vec)
# calculate degree matrix
def _degree_matrix(self, gnx, nodelist):
degrees = gnx.degree(gnx.nodes)
return np.diag([degrees[d] for d in nodelist])
# function to standarize the data with zscore, min-max and more
def _standardize_data(self, data):
all_data_continuous_vec = [] # stack all vectors for all graphs
key_to_idx_map = [] # keep ordered list (g_id, num_nodes) according to stack order
# stack
for g_id, (A, gnx_vec, embed_vec, label) in data.items():
all_data_continuous_vec.append(gnx_vec)
key_to_idx_map.append((g_id, gnx_vec.shape[0])) # g_id, number of nodes ... ordered
all_data_continuous_vec = np.vstack(all_data_continuous_vec)
# z-score data
if self._params["standardization"] == "zscore":
standardized_data = zscore(all_data_continuous_vec, axis=0)
# scale data (still hasn't bee implemented)
elif self._params["standardization"] == "scale":
pass
# min-max data
elif self._params["standardization"] == "min_max":
scalar = MinMaxScaler()
standardized_data = scalar.fit_transform(all_data_continuous_vec)
# rebuild data to original form -> split stacked matrix according to <list: (g_id, num_nodes)>
new_data_dict = {}
start_idx = 0
for g_id, num_nodes in key_to_idx_map:
new_data_dict[g_id] = (data[g_id][0], standardized_data[start_idx: start_idx+num_nodes],
data[g_id][2], data[g_id][3])
start_idx += num_nodes
return new_data_dict
# For the GCN the adjacency matrix needs to be normalized
def _norm_adjacency(self, A, gnx, node_order):
if self._params["adjacency_norm"] == NORM_REDUCED:
# D^-0.5 A D^-0.5
D = self._degree_matrix(gnx, nodelist=node_order)
D_sqrt = np.matrix(np.sqrt(D))
adjacency = D_sqrt * np.matrix(A) * D_sqrt
elif self._params["adjacency_norm"] == NORM_REDUCED_SYMMETRIC:
# D^-0.5 [A + A.T + I] D^-0.5
D = self._degree_matrix(gnx, nodelist=node_order)
D_sqrt = np.matrix(np.sqrt(D))
adjacency = D_sqrt * np.matrix(A + A.T + np.identity(A.shape[0])) * D_sqrt
elif self._params["adjacency_norm"] == IDENTITY:
# identity matrix instead of adjacency matrix
adjacency = np.identity(A.shape[0])
elif self._params["adjacency_norm"] == RAW_FORM:
# don't do any normalization
adjacency = A
else:
print("Error in adjacency_norm: " + self._params["adjacency_norm"] + "is not a valid option")
exit(1)
return adjacency
"""
builds a data dictionary
{ ... graph_name: ( A = Adjacency_matrix, x = graph_vec, label ) ... }
We use all the above functions to finally build the whole data model
We use all the above functions to finally build the whole data modelst
"""
def _build_data(self):
ext_data_id = "None" if not self._is_external_data else "_embed_ftr_" + "_".join(self._external_data.embed_headers)\
+ "_continuous_ftr_" + "_".join(self._external_data.continuous_headers) \
+ "standardization_" + self._params["standardization"]
pkl_path = os.path.join(self._base_dir, PKL_DIR, self._dataset_name + ext_data_id + "_data.pkl")
if os.path.exists(pkl_path):
return pickle.load(open(pkl_path, "rb"))
data = {}
idx_to_name = []
for gnx_id, gnx in zip(self._multi_graph.graph_names(), self._multi_graph.graphs()):
# if gnx.number_of_nodes() < 5:
# continue
node_order = list(gnx.nodes)
idx_to_name.append(gnx_id)
adjacency = self._norm_adjacency(nx.adjacency_matrix(gnx, nodelist=node_order).todense(), gnx, node_order)
gnx_vec = self._gnx_vec(gnx_id, gnx, node_order)
embed_vec = [self._external_data.embed_feature(gnx_id, d) for d in node_order] \
if self._is_external_data and self._external_data.is_embed else None
data[gnx_id] = (adjacency, gnx_vec, embed_vec, self._labels[gnx_id])
data = self._standardize_data(data)
pickle.dump((data, idx_to_name), open(pkl_path, "wb"))
return data, idx_to_name
def collate_fn(self, batch):
lengths_sequences = []
# calculate max word len + max char len
for A, x, e, l in batch:
lengths_sequences.append(A.shape[0])
# in order to pad all batch to a single dimension max length is needed
seq_max_len = np.max(lengths_sequences)
# new batch variables
adjacency_batch = []
x_batch = []
embeddings_batch = []
labels_batch = []
for A, x, e, l in batch:
# pad word vectors
adjacency_pad = ConstantPad2d((0, seq_max_len - A.shape[0], 0, seq_max_len - A.shape[0]), 0)
adjacency_batch.append(adjacency_pad(A).tolist())
vec_pad = ConstantPad2d((0, 0, 0, seq_max_len - A.shape[0]), 0)
x_batch.append(vec_pad(x).tolist())
embeddings_batch.append(vec_pad(e).tolist() if self._is_external_data and self._external_data.is_embed else e)
labels_batch.append(l)
return Tensor(adjacency_batch), Tensor(x_batch), Tensor(embeddings_batch).long(), Tensor(labels_batch).long()
def __getitem__(self, index):
gnx_id = self._idx_to_name[index]
A, x, embed, label = self._data[gnx_id]
embed = 0 if embed is None else Tensor(embed).long()
return Tensor(A), Tensor(x), embed, label
def __len__(self):
return len(self._idx_to_name)
|
the-stack_0_4927 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import functools
import hashlib
import time
import warnings
from datetime import timedelta
from typing import Any, Callable, Iterable, Optional, Union
from airflow import settings
from airflow.configuration import conf
from airflow.exceptions import (
AirflowException,
AirflowRescheduleException,
AirflowSensorTimeout,
AirflowSkipException,
)
from airflow.models.baseoperator import BaseOperator
from airflow.models.sensorinstance import SensorInstance
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskreschedule import TaskReschedule
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
from airflow.utils import timezone
from airflow.utils.context import Context
# We need to keep the import here because GCSToLocalFilesystemOperator released in
# Google Provider before 3.0.0 imported apply_defaults from here.
# See https://github.com/apache/airflow/issues/16035
from airflow.utils.decorators import apply_defaults # noqa: F401
from airflow.utils.docs import get_docs_url
# As documented in https://dev.mysql.com/doc/refman/5.7/en/datetime.html.
_MYSQL_TIMESTAMP_MAX = datetime.datetime(2038, 1, 19, 3, 14, 7, tzinfo=timezone.utc)
@functools.lru_cache(maxsize=None)
def _is_metadatabase_mysql() -> bool:
if settings.engine is None:
raise AirflowException("Must initialize ORM first")
return settings.engine.url.get_backend_name() == "mysql"
class PokeReturnValue:
"""
Sensors can optionally return an instance of the PokeReturnValue class in the poke method.
If an XCom value is supplied when the sensor is done, then the XCom value will be
pushed through the operator return value.
:param is_done: Set to true to indicate the sensor can stop poking.
:param xcom_value: An optional XCOM value to be returned by the operator.
"""
def __init__(self, is_done: bool, xcom_value: Optional[Any] = None) -> None:
self.xcom_value = xcom_value
self.is_done = is_done
def __bool__(self) -> bool:
return self.is_done
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:param poke_interval: Time in seconds that the job should wait in
between each tries
:param timeout: Time, in seconds before the task times out and fails.
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
:param exponential_backoff: allow progressive longer waits between
pokes by using exponential backoff algorithm
"""
ui_color = '#e6f1f2' # type: str
valid_modes = ['poke', 'reschedule'] # type: Iterable[str]
# As the poke context in smart sensor defines the poking job signature only,
# The execution_fields defines other execution details
# for this tasks such as the customer defined timeout, the email and the alert
# setup. Smart sensor serialize these attributes into a different DB column so
# that smart sensor service is able to handle corresponding execution details
# without breaking the sensor poking logic with dedup.
execution_fields = (
'poke_interval',
'retries',
'execution_timeout',
'timeout',
'email',
'email_on_retry',
'email_on_failure',
)
# Adds one additional dependency for all sensor operators that checks if a
# sensor task instance can be rescheduled.
deps = BaseOperator.deps | {ReadyToRescheduleDep()}
def __init__(
self,
*,
poke_interval: float = 60,
timeout: float = conf.getfloat('sensors', 'default_timeout'),
soft_fail: bool = False,
mode: str = 'poke',
exponential_backoff: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
self.mode = mode
self.exponential_backoff = exponential_backoff
self._validate_input_values()
self.sensor_service_enabled = conf.getboolean('smart_sensor', 'use_smart_sensor')
self.sensors_support_sensor_service = set(
map(lambda l: l.strip(), conf.get('smart_sensor', 'sensors_enabled').split(','))
)
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise AirflowException("The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise AirflowException("The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise AirflowException(
f"The mode must be one of {self.valid_modes},'{self.dag.dag_id if self.has_dag() else ''} "
f".{self.task_id}'; received '{self.mode}'."
)
# Sanity check for poke_interval isn't immediately over MySQL's TIMESTAMP limit.
# This check is only rudimentary to catch trivial user errors, e.g. mistakenly
# set the value to milliseconds instead of seconds. There's another check when
# we actually try to reschedule to ensure database sanity.
if self.reschedule and _is_metadatabase_mysql():
if timezone.utcnow() + datetime.timedelta(seconds=self.poke_interval) > _MYSQL_TIMESTAMP_MAX:
raise AirflowException(
f"Cannot set poke_interval to {self.poke_interval} seconds in reschedule "
f"mode since it will take reschedule time over MySQL's TIMESTAMP limit."
)
def poke(self, context: Context) -> Union[bool, PokeReturnValue]:
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def is_smart_sensor_compatible(self):
check_list = [
not self.sensor_service_enabled,
self.on_success_callback,
self.on_retry_callback,
self.on_failure_callback,
]
if any(check_list):
return False
operator = self.__class__.__name__
return operator in self.sensors_support_sensor_service
def register_in_sensor_service(self, ti, context):
"""
Register ti in smart sensor service
:param ti: Task instance object.
:param context: TaskInstance template context from the ti.
:return: boolean
"""
docs_url = get_docs_url('concepts/smart-sensors.html#migrating-to-deferrable-operators')
warnings.warn(
'Your sensor is using Smart Sensors, which are deprecated.'
f' Please use Deferrable Operators instead. See {docs_url} for more info.',
DeprecationWarning,
)
poke_context = self.get_poke_context(context)
execution_context = self.get_execution_context(context)
return SensorInstance.register(ti, poke_context, execution_context)
def get_poke_context(self, context):
"""
Return a dictionary with all attributes in poke_context_fields. The
poke_context with operator class can be used to identify a unique
sensor job.
:param context: TaskInstance template context.
:return: A dictionary with key in poke_context_fields.
"""
if not context:
self.log.info("Function get_poke_context doesn't have a context input.")
poke_context_fields = getattr(self.__class__, "poke_context_fields", None)
result = {key: getattr(self, key, None) for key in poke_context_fields}
return result
def get_execution_context(self, context):
"""
Return a dictionary with all attributes in execution_fields. The
execution_context include execution requirement for each sensor task
such as timeout setup, email_alert setup.
:param context: TaskInstance template context.
:return: A dictionary with key in execution_fields.
"""
if not context:
self.log.info("Function get_execution_context doesn't have a context input.")
execution_fields = self.__class__.execution_fields
result = {key: getattr(self, key, None) for key in execution_fields}
if result['execution_timeout'] and isinstance(result['execution_timeout'], datetime.timedelta):
result['execution_timeout'] = result['execution_timeout'].total_seconds()
return result
def execute(self, context: Context) -> Any:
started_at: Union[datetime.datetime, float]
if self.reschedule:
# If reschedule, use the start date of the first try (first try can be either the very
# first execution of the task, or the first execution after the task was cleared.)
first_try_number = context['ti'].max_tries - self.retries + 1
task_reschedules = TaskReschedule.find_for_task_instance(
context['ti'], try_number=first_try_number
)
if not task_reschedules:
start_date = timezone.utcnow()
else:
start_date = task_reschedules[0].start_date
started_at = start_date
def run_duration() -> float:
# If we are in reschedule mode, then we have to compute diff
# based on the time in a DB, so can't use time.monotonic
return (timezone.utcnow() - start_date).total_seconds()
else:
started_at = start_monotonic = time.monotonic()
def run_duration() -> float:
return time.monotonic() - start_monotonic
try_number = 1
log_dag_id = self.dag.dag_id if self.has_dag() else ""
xcom_value = None
while True:
poke_return = self.poke(context)
if poke_return:
if isinstance(poke_return, PokeReturnValue):
xcom_value = poke_return.xcom_value
break
if run_duration() > self.timeout:
# If sensor is in soft fail mode but times out raise AirflowSkipException.
if self.soft_fail:
raise AirflowSkipException(f"Snap. Time is OUT. DAG id: {log_dag_id}")
else:
raise AirflowSensorTimeout(f"Snap. Time is OUT. DAG id: {log_dag_id}")
if self.reschedule:
next_poke_interval = self._get_next_poke_interval(started_at, run_duration, try_number)
reschedule_date = timezone.utcnow() + timedelta(seconds=next_poke_interval)
if _is_metadatabase_mysql() and reschedule_date > _MYSQL_TIMESTAMP_MAX:
raise AirflowSensorTimeout(
f"Cannot reschedule DAG {log_dag_id} to {reschedule_date.isoformat()} "
f"since it is over MySQL's TIMESTAMP storage limit."
)
raise AirflowRescheduleException(reschedule_date)
else:
time.sleep(self._get_next_poke_interval(started_at, run_duration, try_number))
try_number += 1
self.log.info("Success criteria met. Exiting.")
return xcom_value
def _get_next_poke_interval(
self,
started_at: Union[datetime.datetime, float],
run_duration: Callable[[], float],
try_number: int,
) -> float:
"""Using the similar logic which is used for exponential backoff retry delay for operators."""
if not self.exponential_backoff:
return self.poke_interval
min_backoff = int(self.poke_interval * (2 ** (try_number - 2)))
run_hash = int(
hashlib.sha1(f"{self.dag_id}#{self.task_id}#{started_at}#{try_number}".encode()).hexdigest(),
16,
)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(modded_hash, timedelta.max.total_seconds() - 1)
new_interval = min(self.timeout - int(run_duration()), delay_backoff_in_seconds)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
def prepare_for_execution(self) -> BaseOperator:
task = super().prepare_for_execution()
# Sensors in `poke` mode can block execution of DAGs when running
# with single process executor, thus we change the mode to`reschedule`
# to allow parallel task being scheduled and executed
if conf.get('core', 'executor') == "DebugExecutor":
self.log.warning("DebugExecutor changes sensor mode to 'reschedule'.")
task.mode = 'reschedule'
return task
@property
def reschedule(self):
"""Define mode rescheduled sensors."""
return self.mode == 'reschedule'
def poke_mode_only(cls):
"""
Class Decorator for child classes of BaseSensorOperator to indicate
that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
"""
def decorate(cls_type):
def mode_getter(_):
return 'poke'
def mode_setter(_, value):
if value != 'poke':
raise ValueError("cannot set mode to 'poke'.")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(
f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}."
)
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
|
the-stack_0_4929 | from os.path import join
from os.path import exists
import torch
import argparse
import os
import torch.nn.functional as F
import models
from evaluation.PerceptualSimilarity.models import PerceptualLoss
from evaluation.PerceptualSimilarity.util import util
import glob
import pickle
import numpy as np
def plot_vid(vids, boxes_gt=None, boxes_pred=None):
vids = vids.cpu().numpy()
vids = np.transpose(vids, [0, 2, 3, 1])
output_imgs = []
for i in range(0, vids.shape[0], 1):
img = np.clip((vids[i] * np.array([0.229, 0.224, 0.225]) + np.array([0.485, 0.456, 0.406])) * 255, 0,
255).astype('uint8').copy()
normalized_img = util.im2tensor(img) # RGB image from [-1,1]
# normalized_img = F.interpolate(normalized_img, size=64)
output_imgs.append(normalized_img)
return torch.cat(output_imgs)
def get_video_from_pkl(ff):
video_tensor = ff['image']
# Remove the first batch dim if exits
if len(video_tensor.size()) == 5:
video_tensor = video_tensor.squeeze()
video = plot_vid(video_tensor)
return video
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d0', '--dir0', type=str, default='./imgs/ex_dir0')
parser.add_argument('-d1', '--dir1', type=str, default='./imgs/ex_dir1')
parser.add_argument('-o', '--out', type=str, default='./imgs/example_dists.txt')
parser.add_argument('--use_gpu', action='store_true', help='turn on flag to use GPU')
opt = parser.parse_args()
## Initializing the model
model = PerceptualLoss(model='net-lin', net='alex', use_gpu=opt.use_gpu)
# crawl directories
files = glob.glob(opt.dir0 + '/*.pkl')
videos = [os.path.basename(fl) for fl in files]
res = []
for vid in videos:
if exists(join(opt.dir1, vid)):
# Load pickles
f0 = pickle.load(open(join(opt.dir0, vid), 'rb'))
f1 = pickle.load(open(join(opt.dir1, vid), 'rb'))
img0 = get_video_from_pkl(f0) # RGB images from [-1,1]
img1 = get_video_from_pkl(f1)
# Load images
# img0 = util.im2tensor(util.load_image(os.path.join(opt.dir0, folder, file)))
# img1 = util.im2tensor(util.load_image(os.path.join(opt.dir1, folder, file)))
if (opt.use_gpu):
img0 = img0.cuda()
img1 = img1.cuda()
# Compute distance
dist01 = model.forward(img0, img1)
# print('%s: %.3f' % (file, dist01))
res.append(dist01.mean())
# Save
np.save(opt.out, torch.stack(res).data.cpu().numpy())
mean = torch.mean(torch.stack(res))
std = torch.std(torch.stack(res))
print("Diversity: {}±{}".format(mean, std))
|
the-stack_0_4931 | """
Define the NonlinearRunOnce class.
This is a simple nonlinear solver that just runs the system once.
"""
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.solvers.solver import NonlinearSolver
from openmdao.utils.general_utils import warn_deprecation
from openmdao.utils.mpi import multi_proc_fail_check
class NonlinearRunOnce(NonlinearSolver):
"""
Simple solver that runs the containing system once.
This is done without iteration or norm calculation.
"""
SOLVER = 'NL: RUNONCE'
def solve(self):
"""
Run the solver.
"""
system = self._system
with Recording('NLRunOnce', 0, self) as rec:
# If this is a parallel group, transfer all at once then run each subsystem.
if len(system._subsystems_myproc) != len(system._subsystems_allprocs):
system._transfer('nonlinear', 'fwd')
with multi_proc_fail_check(system.comm):
for subsys in system._subsystems_myproc:
subsys._solve_nonlinear()
system._check_child_reconf()
# If this is not a parallel group, transfer for each subsystem just prior to running it.
else:
self._gs_iter()
rec.abs = 0.0
rec.rel = 0.0
def _declare_options(self):
"""
Declare options before kwargs are processed in the init method.
"""
# Remove unused options from base options here, so that users
# attempting to set them will get KeyErrors.
self.options.undeclare("atol")
self.options.undeclare("rtol")
# this solver does not iterate
self.options.undeclare("maxiter")
self.options.undeclare("err_on_maxiter") # Deprecated option.
self.options.undeclare("err_on_non_converge")
class NonLinearRunOnce(NonlinearRunOnce):
"""
Deprecated. See NonlinearRunOnce.
"""
def __init__(self, *args, **kwargs):
"""
Deprecated.
Parameters
----------
*args : list of object
Positional args.
**kwargs : dict
Named args.
"""
super(NonLinearRunOnce, self).__init__(*args, **kwargs)
warn_deprecation('NonLinearRunOnce is deprecated. Use NonlinearRunOnce instead.')
|
the-stack_0_4932 | # -*- coding: utf-8 -*-
#
# txThings asyncio branch documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 4 09:40:16 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# maybe required for readthedocs
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'aiocoap_index',
'sphinxarg.ext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'aiocoap'
copyright = u'2014, Maciej Wasilak, Christian Amsüss'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4'
# The full version, including alpha/beta/rc tags.
release = '0.4b2.post0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logo.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logo-square.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# it's in most cases just autodoc text
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiocoap'
autodoc_member_order = 'bysource'
man_pages = [
('module/aiocoap.cli.client', 'aiocoap-client', 'query CoAP servers from the command line', '', 1),
('module/aiocoap.cli.proxy', 'aiocoap-proxy', 'forward and reverse proxy server for CoAP', '', 1),
('module/aiocoap.cli.rd', 'aiocoap-rd', 'Resource Directory server', '', 1),
('module/aiocoap.cli.fileserver', 'aiocoap-fileserver', 'File server for CoAP', '', 1),
]
|
the-stack_0_4934 | import tempfile
import time
import os
import os.path
from ovos_utils.log import LOG
def get_ipc_directory(domain=None, config=None):
"""Get the directory used for Inter Process Communication
Files in this folder can be accessed by different processes on the
machine. Useful for communication. This is often a small RAM disk.
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
config (dict): mycroft.conf, to read ipc directory from
Returns:
str: a path to the IPC directory
"""
if config is None:
from ovos_utils.configuration import read_mycroft_config
config = read_mycroft_config()
path = config.get("ipc_path")
if not path:
# If not defined, use /tmp/mycroft/ipc
path = os.path.join(tempfile.gettempdir(), "mycroft", "ipc")
return ensure_directory_exists(path, domain)
def ensure_directory_exists(directory, domain=None):
""" Create a directory and give access rights to all
Args:
domain (str): The IPC domain. Basically a subdirectory to prevent
overlapping signal filenames.
Returns:
str: a path to the directory
"""
if domain:
directory = os.path.join(directory, domain)
# Expand and normalize the path
directory = os.path.normpath(directory)
directory = os.path.expanduser(directory)
if not os.path.isdir(directory):
try:
save = os.umask(0)
os.makedirs(directory, 0o777) # give everyone rights to r/w here
except OSError:
LOG.warning("Failed to create: " + directory)
pass
finally:
os.umask(save)
return directory
def create_file(filename):
""" Create the file filename and create any directories needed
Args:
filename: Path to the file to be created
"""
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, 'w') as f:
f.write('')
def create_signal(signal_name, config=None):
"""Create a named signal
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
config (dict): mycroft.conf, to read ipc directory from
"""
try:
path = os.path.join(get_ipc_directory(config=config),
"signal", signal_name)
create_file(path)
return os.path.isfile(path)
except IOError:
return False
def check_for_signal(signal_name, sec_lifetime=0, config=None):
"""See if a named signal exists
Args:
signal_name (str): The signal's name. Must only contain characters
valid in filenames.
sec_lifetime (int, optional): How many seconds the signal should
remain valid. If 0 or not specified, it is a single-use signal.
If -1, it never expires.
config (dict): mycroft.conf, to read ipc directory from
Returns:
bool: True if the signal is defined, False otherwise
"""
path = os.path.join(get_ipc_directory(config=config),
"signal", signal_name)
if os.path.isfile(path):
if sec_lifetime == 0:
# consume this single-use signal
os.remove(path)
elif sec_lifetime == -1:
return True
elif int(os.path.getctime(path) + sec_lifetime) < int(time.time()):
# remove once expired
os.remove(path)
return False
return True
# No such signal exists
return False
|
the-stack_0_4935 | """
Author: Zeliha Ural Merpez
Date: March,13 2021
"""
import requests
import json
import pandas as pd
from bs4 import BeautifulSoup
import altair as alt
import numpy as np
from math import sin, cos, sqrt, atan2, radians
import matplotlib.pyplot as plt
def get_keys(path):
with open(path) as f:
return json.load(f)
def get_address_google(name_list, API_Key, city_list=0):
placesAPI_data = pd.DataFrame(
columns=["name", "formatted_address", "geometry", "permanently_closed"]
) # initialize dataframe
if isinstance(name_list, str):
name_list = [name_list]
for i in range(len(name_list)):
if city_list == 0:
city = ""
else:
city = city_list[i]
name = (
name_list[i].replace(" ", "%20").replace("&", "%26")
) # make sure there are no blank spaces for the URL and deal with &
b = "!@#$()"
for char in b:
name = name.replace(char, "")
address_search = name + ",%20" + city
url = (
"https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input="
+ address_search
+ "&inputtype=textquery&fields=name,formatted_address,geometry,permanently_closed&key="
+ API_Key
)
response = requests.get(url).json()
placesAPI_data = pd.concat(
[placesAPI_data, pd.DataFrame(response["candidates"])],
ignore_index=True,
sort=False,
) # append retrieved information to a dataframe
google_data = placesAPI_data
lat_list = []
lng_list = []
for i in range(google_data.shape[0]):
lat_list.append(google_data["geometry"][i]["location"]["lat"])
lng_list.append(google_data["geometry"][i]["location"]["lng"])
google_data["lat"] = lat_list
google_data["lng"] = lng_list
return google_data
def format_coordinate(dataframe, drop=False):
df = dataframe
if drop:
df = df.dropna(subset=["Geom"])
df[["drop_this", "location"]] = df.Geom.str.split("[", expand=True)
df[["lng", "lat"]] = df.location.str.split(",", expand=True)
df["lat"] = df.lat.str.replace("]}", "")
df = df.drop(columns=["drop_this", "location"])
return df
def get_distance_by_coordinate(formatted_business, name, API_Key):
google_data = get_address_google(name, API_Key)
google_name = google_data["name"][0]
filter_name = formatted_business[formatted_business["BusinessName"] == name]
if filter_name.shape[0] == 0:
warn = (
"No geometric information provided for "
+ filter_name
+ " in Business Licences data."
)
return warn, 5000
else:
lat = float(filter_name[["lat"]].iloc[0])
lng = float(filter_name[["lng"]].iloc[0])
if google_data.shape[0] == 0:
warn = "Could not find information about " + filter_name + " on Google maps."
return warn, 5000
else:
google_lat = google_data["lat"][0]
google_lng = google_data["lng"][0]
warn = "Giving distance between geometric information obtained."
dlon = radians(lng) - radians(google_lng)
dlat = radians(lat) - radians(google_lat)
a = (sin(dlat / 2)) ** 2 + cos(radians(lat)) * cos(radians(google_lat)) * (
sin(dlon / 2)
) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
R = 6373.0
distance = R * c
return warn, distance, google_name
def get_comparison_dataframe(funerals, API_Key):
name_list = list(funerals["BusinessName"])
distance_list = []
warn_list = []
google_name_list = []
for i in range(len(name_list)):
warn, distance, google_name = get_distance_by_coordinate(
funerals, name=name_list[i], API_Key = API_Key
)
distance_list.append(distance)
warn_list.append(warn)
google_name_list.append(google_name)
distance_data = pd.DataFrame(
{
"Name": name_list,
"Google Name": google_name_list,
"Distance(km)": distance_list,
"Warning": warn_list,
}
)
return distance_data
def google_map_figure(output_data):
chart = alt.Chart(output_data, title = "Comparing Distance Between locations of Businesses (Licence vs GoogleMaps)").mark_circle(size = 50).encode(
x = alt.X('Distance(km)'),
y = alt.Y('Name',axis=alt.Axis(title=" ")),
color = alt.Color('Google Name'),
tooltip = 'Google Name'
)
return chart
def fancy_table(data, col_width=3.0, row_height=0.625, row_colors=['#f1f1f2', 'w'],
header_columns=0, ax=None, **kwargs):
"""[Modified from ref: https://stackoverflow.com/questions/19726663/how-to-save-the-pandas-dataframe-series-data-as-a-figure]
[Prints given dataframe in a nice format, that is easy to save]
Parameters
----------
data : [data frame]
[data frame]
col_width : float, optional
[column width], by default 3.0
row_height : float, optional
[row height], by default 0.625
row_colors : list, optional
[row color], by default ['#f1f1f2', 'w']
header_columns : int, optional
[header columns], by default 0
ax : [type], optional
[plotting table, by default None
Returns
-------
[object]
[figure]
"""
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=[0, 0, 1, 1], colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(14)
for k, cell in mpl_table._cells.items():
cell.set_edgecolor('w')
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor('firebrick')
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return ax.get_figure(), ax
def generate_dataset_overview(data_frame):
"""
Generates an overview of the dataset.
Also saves resulting table as file in given output folder.
Parameters:
-----------
data_frame : pandas.DataFrame
input path to be verified
output_folder : str
output folder path to save the chart
file_name : str
file name for generated chart image
Returns:
-----------
None
"""
data_overview = [
{"Dataset": "Number of features", "Value": len(data_frame.columns)},
{"Dataset": "Number of characters", "Value": len(data_frame)},
{"Dataset": "Number of Missing cells", "Value": (data_frame.isnull()).sum().sum()},
{"Dataset": "Percentage of Missing cells", "Value": round((data_frame.isnull()).sum().sum()/data_frame.size*100, 2)}
]
overview_frame = pd.DataFrame(data_overview)
return overview_frame
def generate_feature_overview(data_frame):
"""
Generates an overview of the features in dataset.
Also saves resulting table as file in given output folder.
Parameters:
-----------
data_frame : pandas.DataFrame
input path to be verified
output_folder : str
output folder path to save the chart
file_name : str
file name for generated chart image
Returns:
-----------
None
"""
distinct_class = dict()
nonnull_count = dict()
for col in data_frame.columns:
nonnull_count[col]=len(data_frame)-data_frame[col].isnull().sum()
distinct_class[col]=len(list(data_frame[col].unique()))
features_frame=pd.DataFrame([distinct_class, nonnull_count]).T.reset_index()
features_frame.columns=["Features","Distinct Class", "Non-Null Count"]
features_frame["Missing Percentage"]=round((len(data_frame) - features_frame["Non-Null Count"])/len(data_frame)*100,2)
return features_frame
|
the-stack_0_4937 | # Load selenium components
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time
def course_desc():
# Establish chrome driver and go to report site URL
url = "https://enr-apps.as.cmu.edu/open/SOC/SOCServlet/search"
driver = webdriver.Chrome()
driver.maximize_window()
driver.get(url)
count = 0
driver.find_element_by_xpath("/html/body/div/div[2]/form/div[3]/div/div/button[1]").click()
tables = driver.find_elements_by_id("search-results-table")
input_path = './data/temp/course_description_1.txt'
f = open(input_path,"a+")
# Crawl course description through full x_path matching
for tab_num in range(2,len(tables)):
courses = tables[tab_num].find_elements_by_tag_name('tr')
for i in range(1,len(courses)):
path = "/html/body/div/div[2]/table["+str(tab_num+1)+"]/tbody/tr["+str(i)+"]/td[1]/a"
try:
handler = driver.find_element_by_xpath(path)
cID = handler.text
driver.execute_script("arguments[0].scrollIntoView();", handler)
handler.click()
# If the row is a subrow of a specific course, skip it
except:
continue
# Wait for the website to response
time.sleep(3)
description = driver.find_element_by_class_name("text-left").text
f.write(cID+":"+description)
f.write('\n')
driver.find_element_by_class_name("close").click()
f.close()
|
the-stack_0_4938 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals
import unittest
import os
from pymatgen.io.feff.outputs import LDos, Xmu
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
test_dir_reci = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files', 'feff_reci_dos')
class FeffLdosTest(unittest.TestCase):
filepath1 = os.path.join(test_dir, 'feff.inp')
filepath2 = os.path.join(test_dir, 'ldos')
l = LDos.from_file(filepath1, filepath2)
reci_feffinp = os.path.join(test_dir_reci, 'feff.inp')
reci_ldos = os.path.join(test_dir_reci, 'ldos')
reci_dos = LDos.from_file(reci_feffinp, reci_ldos)
def test_init(self):
efermi = FeffLdosTest.l.complete_dos.efermi
self.assertEqual(efermi, -11.430,
"Did not read correct Fermi energy from ldos file")
def test_complete_dos(self):
complete_dos = FeffLdosTest.l.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
- 11.430,
"Failed to construct complete_dos dict properly")
def test_as_dict_and_from_dict(self):
l2 = FeffLdosTest.l.charge_transfer_to_string()
d = FeffLdosTest.l.as_dict()
l3 = LDos.from_dict(d).charge_transfer_to_string()
self.assertEqual(l2, l3, "Feffldos to and from dict does not match")
def test_reci_init(self):
efermi = FeffLdosTest.reci_dos.complete_dos.efermi
self.assertEqual(efermi, -9.672,
"Did not read correct Fermi energy from ldos file")
def test_reci_complete_dos(self):
complete_dos = FeffLdosTest.reci_dos.complete_dos
self.assertEqual(complete_dos.as_dict()['spd_dos']["s"]['efermi'],
-9.672,
"Failed to construct complete_dos dict properly")
def test_reci_charge(self):
charge_trans = FeffLdosTest.reci_dos.charge_transfer
self.assertEqual(charge_trans['0']['Na']['s'], 0.241)
self.assertEqual(charge_trans['1']['O']['tot'], -0.594)
class XmuTest(unittest.TestCase):
def test_init(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
self.assertEqual(x.absorbing_atom, 'O',
"failed to read xmu.dat file properly")
def test_as_dict_and_from_dict(self):
filepath1 = os.path.join(test_dir, 'xmu.dat')
filepath2 = os.path.join(test_dir, 'feff.inp')
x = Xmu.from_file(filepath1, filepath2)
data=x.data.tolist()
d=x.as_dict()
x2 = Xmu.from_dict(d)
data2= x2.data.tolist()
self.assertEqual(data, data2, "Xmu to and from dict does not match")
if __name__ == '__main__':
unittest.main()
|
the-stack_0_4939 | _base_ = [
'../_base_/models/mask_rcnn_r50_fpn.py',
'../_base_/datasets/lvis_instance.py',
'../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py'
]
model = dict(
roi_head=dict(
bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)))
test_cfg = dict(
rcnn=dict(
score_thr=0.0001,
# LVIS allows up to 300
max_per_img=300))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='Resize',
img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736),
(1333, 768), (1333, 800)],
multiscale_mode='value',
keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
|
the-stack_0_4940 | """Lazy version of the dataset for training TDC and CMC."""
from typing import Any, List, Tuple
import cv2
import librosa
import numpy as np
import torch
from skvideo.io import FFmpegReader
from torch.utils.data import Dataset
class LazyTDCCMCDataset(Dataset):
"""
Dataset for training TDC and CMC.
Dataset for sampling video frames and audio snippets with distance
labels to train the embedding networks.
Parameters
----------
filenames : list of str
List of filenames of video files.
trims : list of float
List of tuples `(begin_idx, end_idx)` that specify what frame
of the video to start and end.
crops : list of tuple
List of tuples `(x_1, y_1, x_2, y_2)` that define the clip
window of each video.
frame_rate : int
Frame rate to sample video. Default to 15.
"""
def __init__(
self,
filenames: List[str],
trims: List[Tuple[int, int]],
crops: List[Tuple[int, int, int, int]],
frame_rate: float = 15,
):
# TDCCMCDataset is an unconvential dataset, where each data is
# dynamically sampled whenever needed instead of a static dataset.
# Therefore, in `__init__`, we do not define a static dataset. Instead,
# we simply preprocess the video and audio for faster `__getitem__`.
super().__init__()
self.filenames = filenames
self.trims = trims
self.crops = crops
self.audios: List[np.ndarray] = []
self.readers: List[Any] = []
for filename in filenames:
# Get video frames with scikit-video
reader = FFmpegReader(
filename + ".mp4",
inputdict={"-r": str(frame_rate)},
outputdict={"-r": str(frame_rate)},
)
self.readers.append(reader)
# STFT audio
# TODO Magic number sr=2000, n_fft=510
y, _ = librosa.load(filename + ".wav", sr=2000)
D = librosa.core.stft(y, n_fft=510)
D = np.abs(D)
# Save audio
self.audios.append(D)
def __len__(self) -> int:
# Return a high number since this dataset in dynamic. Don't use
# this explicitly!
return np.iinfo(np.int64).max
def __getitem__(
self, index: int
) -> Tuple[
torch.FloatTensor,
torch.FloatTensor,
torch.FloatTensor,
torch.LongTensor,
torch.LongTensor,
]:
"""
Return a sample from the dynamic dataset.
Each sample contains two video frames, one audio snippet, one
TDC label and one CMC label. In other words, the format is
(frame_v, frame_w, audio_a, tdc_label, cmc_label).
Parameters
----------
index : int
Returns
-------
frame_v : torch.FloatTensor
frame_w : torch.FloatTensor
audio_a
tdc_label : torch.LongTensor
cmc_label : torch.LongTensor
"""
# Below is a paragraph from the original paper:
#
# To generate training data, we sample input pairs (v^i, w^i) (where
# v^i and w^i are sampled from the same domain) as follows. First, we
# sample a demonstration sequence from our three training videos. Next,
# we sample both an interval, d_k ∈ {[0], [1], [2], [3 - 4], [5 - 20],
# [21 - 200]}, and a distance, ∆t ∈ dk. Finally, we randomly select a
# pair of frames from the sequence with temporal distance ∆t. The model
# is trained with Adam using a learning rate of 10^-4 and batch size of
# 32 for 200,000 steps.
#
# From Section 5: Implementation Details
# 1) Sample video from videos
src_idx = np.random.choice(len(self.audios))
reader = self.readers[src_idx]
audio = self.audios[src_idx]
trim = self.trims[src_idx]
crop = self.crops[src_idx]
# 2) Sample tdc_label and cmc_label
tdc_label = self._sample_label()
cmc_label = self._sample_label()
# 3) Sample tdc_distance, cmc_distance
tdc_distance = self._sample_distance_from_label(tdc_label)
cmc_distance = self._sample_distance_from_label(cmc_label)
# 4) Sample framestack_v from video (check limits carefully)
framestack_v_idx = np.random.randint(0, reader.getShape()[0] - tdc_distance - 4)
framestack_v = self._sample_framestack(framestack_v_idx, reader, trim, crop)
# 5) Sample frame_w from video
framestack_w_idx = framestack_v_idx + tdc_distance
framestack_w = self._sample_framestack(framestack_w_idx, reader, trim, crop)
# 6) Sample audio_a from audio
audio_a_idx = framestack_v_idx + cmc_distance
audio_a = audio[:, audio_a_idx : audio_a_idx + 137]
audio_a = torch.FloatTensor(audio_a)
# 7) Crop Frames from 140x140 to 128x128
# TODO Is it correct to use same crop for both v and w?
y = np.random.randint(0, 140 - 128)
x = np.random.randint(0, 140 - 128)
framestack_v = framestack_v[:, :, y : y + 128, x : x + 128]
framestack_w = framestack_w[:, :, y : y + 128, x : x + 128]
# 8) Switch 4 x 3 x 128 x 128 to 1 x 12 x 128 x 128
framestack_v = torch.FloatTensor(framestack_v).view(-1, 128, 128)
framestack_w = torch.FloatTensor(framestack_w).view(-1, 128, 128)
# 9) Scale image values from 0~255 to 0~1
framestack_v /= 255.0
framestack_w /= 255.0
# 10) Return (frame_v, frame_w, audio_a, tdc_label, cmc_label)
return (
framestack_v,
framestack_w,
audio_a,
torch.LongTensor([tdc_label]),
torch.LongTensor([cmc_label]),
)
def _sample_label(self) -> int:
"""
Sample randomly from label.
Returns
-------
label : int
Label sampled from 0 ~ 5.
"""
return np.random.choice(6)
def _sample_distance_from_label(self, label: int) -> int:
"""
Sample randomly from distance from label.
Label 0: Distance 0
Label 1: Distance 1
Label 2: Distance 2
Label 3: Distance sampled from [3, 4]
Label 4: Distance sampled from [5, 20]
Label 5: Distance sampled from [21, 200]
Parameters
----------
label : int
Label sampled randomly.
Returns
-------
distance: int
Distance sampled according to the label.
"""
if label == 0: # [0]
distance = 0
elif label == 1: # [1]
distance = 1
elif label == 2: # [2]
distance = 2
elif label == 3: # [3 - 4]
distance = np.random.choice(np.arange(3, 4 + 1))
elif label == 4: # [5 - 20]
distance = np.random.choice(np.arange(5, 20 + 1))
else: # [21 - 200]
distance = np.random.choice(np.arange(21, 200 + 1))
return distance
def _sample_framestack(
self,
start_frame: int,
reader: Any,
trim: Tuple[int, int],
crop: Tuple[int, int, int, int],
) -> np.ndarray:
assert start_frame + trim[0] + 4 < reader.getShape()[0]
framestack = []
for frame_idx, frame in enumerate(reader.nextFrame()):
# Trim video (time)
if start_frame + trim[0] <= frame_idx < start_frame + trim[0] + 4:
# Crop frames (space)
frame = frame[crop[1] : crop[3], crop[0] : crop[2], :]
framestack.append(cv2.resize(frame, (140, 140)))
if frame_idx == start_frame + trim[0] + 4:
break
# Change to NumPy array with PyTorch dimension format
framestack = np.array(framestack, dtype=float)
framestack = np.transpose(framestack, axes=(0, 3, 1, 2))
return framestack
|
the-stack_0_4941 | import requests
import json
host = "s-platform.api.opendns.com"
api_key = "a0b1c2d3-e4f5-g6h7-i8j9-kalbmcndoepf"
print(f"\n==> Finding all of the domains in a custom enforcement list")
url = f"https://{host}/1.0/domains?customerKey={api_key}"
headers = {'Authorization':'Bearer ' + api_key}
try:
response = requests.get(url, headers=headers)
except:
response.raise_for_status()
print (response.json()) |
the-stack_0_4942 | """Tests for device finding functionality."""
import unittest
from unittest.mock import patch
from pysyncdroid.exceptions import DeviceException
from pysyncdroid.find_device import (
get_connection_details,
get_mtp_details,
lsusb,
)
mock_lsub_parts = [
"Bus 002 Device 001: ID 0123:0001 test_vendor test_model1",
"Bus 002 Device 002: ID 0456:0002 test_vendor test_model2",
"Bus 002 Device 003: ID 0789:0003 test_vendor test_model3",
"Bus 002 Device 001: ID 1d6b:0002 Linux Foundation 2.0 root hub",
"Bus 004 Device 001: ID 1d6b:0003 Linux Foundation 3.0 root hub",
]
MOCK_LSUB_RESULT = "\n".join(mock_lsub_parts)
class TestLsusb(unittest.TestCase):
def setUp(self):
self.patcher = patch("pysyncdroid.find_device.run_bash_cmd")
self.mock_run_bash_cmd = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_lsusb(self):
lsusb()
self.mock_run_bash_cmd.assert_called_with(["lsusb"])
class TestFindDevice(unittest.TestCase):
def setUp(self):
self.patcher = patch("pysyncdroid.find_device.lsusb")
self.mock_lsusb = self.patcher.start()
self.mock_lsusb.return_value = MOCK_LSUB_RESULT
def tearDown(self):
self.patcher.stop()
def test_get_connection_details_device_exception(self):
"""
Test 'get_connection_details' raises a DeviceException with an
appropriate error message when trying to find a non-existent device.
"""
with self.assertRaises(DeviceException) as exc:
get_connection_details(
vendor="non-existent-vendor", model="non-existent-model"
)
exc_msg_parts = (
'Device "non-existent-vendor non-existent-model" not found.',
'No "non-existent-vendor" devices were found.',
)
self.assertEqual(str(exc.exception), "\n".join(exc_msg_parts))
def test_get_connection_details_device_exception_message(self):
"""
Test 'get_connection_details' raises a DeviceException and the provided
error message lists all vendor devices when trying to find a
non-existent model.
"""
with self.assertRaises(DeviceException) as exc:
get_connection_details(vendor="linux", model="non-existent-model")
exc_msg_parts = (
'Device "linux non-existent-model" not found.',
'Following "linux" devices were found:',
"Linux Foundation 2.0 root hub",
"Linux Foundation 3.0 root hub",
)
self.assertEqual(str(exc.exception), "\n".join(exc_msg_parts))
def test_get_connection_details_multiple_devices(self):
"""
Test 'get_connection_details' is able to find the given device in case
of multiple devices from the same vendor (i.e. it doesn't pick up the
first device for a certain vendor).
"""
connection_details = get_connection_details(
vendor="test_vendor", model="test_model3"
)
self.assertIsInstance(connection_details, tuple)
self.assertEqual(connection_details[0], "002")
self.assertEqual(connection_details[1], "003")
def test_get_mtp_details(self):
"""
Test 'get_mtp_details' returns a valid MTP url gvfs path.
"""
usb_bus, device = get_connection_details(vendor="linux", model="root")
mtp_details = get_mtp_details(usb_bus, device)
self.assertIsInstance(mtp_details, tuple)
for mtp_detail in mtp_details:
self.assertIn(device, mtp_detail)
self.assertIn(usb_bus, mtp_detail)
|
the-stack_0_4944 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Handles database requests from other nova services."""
import collections
import contextlib
import copy
import eventlet
import functools
import sys
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import versionutils
import six
from nova.accelerator import cyborg
from nova import availability_zones
from nova.compute import instance_actions
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute.utils import wrap_instance_event
from nova.compute import vm_states
from nova.conductor.tasks import cross_cell_migrate
from nova.conductor.tasks import live_migrate
from nova.conductor.tasks import migrate
from nova import context as nova_context
from nova.db import base
from nova import exception
from nova.i18n import _
from nova.image import glance
from nova import manager
from nova.network import neutron
from nova import notifications
from nova import objects
from nova.objects import base as nova_object
from nova.objects import fields
from nova import profiler
from nova import rpc
from nova.scheduler.client import query
from nova.scheduler.client import report
from nova.scheduler import utils as scheduler_utils
from nova import servicegroup
from nova import utils
from nova.volume import cinder
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def targets_cell(fn):
"""Wrap a method and automatically target the instance's cell.
This decorates a method with signature func(self, context, instance, ...)
and automatically targets the context with the instance's cell
mapping. It does this by looking up the InstanceMapping.
"""
@functools.wraps(fn)
def wrapper(self, context, *args, **kwargs):
instance = kwargs.get('instance') or args[0]
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
LOG.error('InstanceMapping not found, unable to target cell',
instance=instance)
except db_exc.CantStartEngineError:
# Check to see if we can ignore API DB connection failures
# because we might already be in the cell conductor.
with excutils.save_and_reraise_exception() as err_ctxt:
if CONF.api_database.connection is None:
err_ctxt.reraise = False
else:
LOG.debug('Targeting cell %(cell)s for conductor method %(meth)s',
{'cell': im.cell_mapping.identity,
'meth': fn.__name__})
# NOTE(danms): Target our context to the cell for the rest of
# this request, so that none of the subsequent code needs to
# care about it.
nova_context.set_target_cell(context, im.cell_mapping)
return fn(self, context, *args, **kwargs)
return wrapper
class ConductorManager(manager.Manager):
"""Mission: Conduct things.
The methods in the base API for nova-conductor are various proxy operations
performed on behalf of the nova-compute service running on compute nodes.
Compute nodes are not allowed to directly access the database, so this set
of methods allows them to get specific work done without locally accessing
the database.
The nova-conductor service also exposes an API in the 'compute_task'
namespace. See the ComputeTaskManager class for details.
"""
target = messaging.Target(version='3.0')
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
*args, **kwargs)
self.compute_task_mgr = ComputeTaskManager()
self.additional_endpoints.append(self.compute_task_mgr)
# NOTE(hanlind): This can be removed in version 4.0 of the RPC API
def provider_fw_rule_get_all(self, context):
# NOTE(hanlind): Simulate an empty db result for compat reasons.
return []
def _object_dispatch(self, target, method, args, kwargs):
"""Dispatch a call to an object method.
This ensures that object methods get called and any exception
that is raised gets wrapped in an ExpectedException for forwarding
back to the caller (without spamming the conductor logs).
"""
try:
# NOTE(danms): Keep the getattr inside the try block since
# a missing method is really a client problem
return getattr(target, method)(*args, **kwargs)
except Exception:
raise messaging.ExpectedException()
def object_class_action_versions(self, context, objname, objmethod,
object_versions, args, kwargs):
objclass = nova_object.NovaObject.obj_class_from_name(
objname, object_versions[objname])
args = tuple([context] + list(args))
result = self._object_dispatch(objclass, objmethod, args, kwargs)
# NOTE(danms): The RPC layer will convert to primitives for us,
# but in this case, we need to honor the version the client is
# asking for, so we do it before returning here.
# NOTE(hanlind): Do not convert older than requested objects,
# see bug #1596119.
if isinstance(result, nova_object.NovaObject):
target_version = object_versions[objname]
requested_version = versionutils.convert_version_to_tuple(
target_version)
actual_version = versionutils.convert_version_to_tuple(
result.VERSION)
do_backport = requested_version < actual_version
other_major_version = requested_version[0] != actual_version[0]
if do_backport or other_major_version:
result = result.obj_to_primitive(
target_version=target_version,
version_manifest=object_versions)
return result
def object_action(self, context, objinst, objmethod, args, kwargs):
"""Perform an action on an object."""
oldobj = objinst.obj_clone()
result = self._object_dispatch(objinst, objmethod, args, kwargs)
updates = dict()
# NOTE(danms): Diff the object with the one passed to us and
# generate a list of changes to forward back
for name, field in objinst.fields.items():
if not objinst.obj_attr_is_set(name):
# Avoid demand-loading anything
continue
if (not oldobj.obj_attr_is_set(name) or
getattr(oldobj, name) != getattr(objinst, name)):
updates[name] = field.to_primitive(objinst, name,
getattr(objinst, name))
# This is safe since a field named this would conflict with the
# method anyway
updates['obj_what_changed'] = objinst.obj_what_changed()
return updates, result
def object_backport_versions(self, context, objinst, object_versions):
target = object_versions[objinst.obj_name()]
LOG.debug('Backporting %(obj)s to %(ver)s with versions %(manifest)s',
{'obj': objinst.obj_name(),
'ver': target,
'manifest': ','.join(
['%s=%s' % (name, ver)
for name, ver in object_versions.items()])})
return objinst.obj_to_primitive(target_version=target,
version_manifest=object_versions)
def reset(self):
objects.Service.clear_min_version_cache()
@contextlib.contextmanager
def try_target_cell(context, cell):
"""If cell is not None call func with context.target_cell.
This is a method to help during the transition period. Currently
various mappings may not exist if a deployment has not migrated to
cellsv2. If there is no mapping call the func as normal, otherwise
call it in a target_cell context.
"""
if cell:
with nova_context.target_cell(context, cell) as cell_context:
yield cell_context
else:
yield context
@contextlib.contextmanager
def obj_target_cell(obj, cell):
"""Run with object's context set to a specific cell"""
with try_target_cell(obj._context, cell) as target:
with obj.obj_alternate_context(target):
yield target
@profiler.trace_cls("rpc")
class ComputeTaskManager(base.Base):
"""Namespace for compute methods.
This class presents an rpc API for nova-conductor under the 'compute_task'
namespace. The methods here are compute operations that are invoked
by the API service. These methods see the operation to completion, which
may involve coordinating activities on multiple compute nodes.
"""
target = messaging.Target(namespace='compute_task', version='1.23')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.volume_api = cinder.API()
self.image_api = glance.API()
self.network_api = neutron.API()
self.servicegroup_api = servicegroup.API()
self.query_client = query.SchedulerQueryClient()
self.report_client = report.SchedulerReportClient()
self.notifier = rpc.get_notifier('compute', CONF.host)
# Help us to record host in EventReporter
self.host = CONF.host
def reset(self):
LOG.info('Reloading compute RPC API')
compute_rpcapi.LAST_VERSION = None
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
# TODO(tdurakov): remove `live` parameter here on compute task api RPC
# version bump to 2.x
# TODO(danms): remove the `reservations` parameter here on compute task api
# RPC version bump to 2.x
@messaging.expected_exceptions(
exception.NoValidHost,
exception.ComputeServiceUnavailable,
exception.ComputeHostNotFound,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.UnsupportedPolicyException)
@targets_cell
@wrap_instance_event(prefix='conductor')
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True, request_spec=None, host_list=None):
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE: Remove this when we drop support for v1 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
elif not live and not rebuild and flavor:
instance_uuid = instance.uuid
with compute_utils.EventReporter(context, 'cold_migrate',
self.host, instance_uuid):
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties'],
clean_shutdown, request_spec,
host_list)
else:
raise NotImplementedError()
@staticmethod
def _get_request_spec_for_cold_migrate(context, instance, flavor,
filter_properties, request_spec):
# NOTE(sbauza): If a reschedule occurs when prep_resize(), then
# it only provides filter_properties legacy dict back to the
# conductor with no RequestSpec part of the payload for <Stein
# computes.
# TODO(mriedem): We can remove this compat code for no request spec
# coming to conductor in ComputeTaskAPI RPC API version 2.0
if not request_spec:
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
# Make sure we hydrate a new RequestSpec object with the new flavor
# and not the nested one from the instance
request_spec = objects.RequestSpec.from_components(
context, instance.uuid, image_meta,
flavor, instance.numa_topology, instance.pci_requests,
filter_properties, None, instance.availability_zone,
project_id=instance.project_id, user_id=instance.user_id)
elif not isinstance(request_spec, objects.RequestSpec):
# Prior to compute RPC API 5.1 conductor would pass a legacy dict
# version of the request spec to compute and Stein compute
# could be sending that back to conductor on reschedule, so if we
# got a dict convert it to an object.
# TODO(mriedem): We can drop this compat code when we only support
# compute RPC API >=6.0.
request_spec = objects.RequestSpec.from_primitives(
context, request_spec, filter_properties)
# We don't have to set the new flavor on the request spec because
# if we got here it was due to a reschedule from the compute and
# the request spec would already have the new flavor in it from the
# else block below.
else:
# NOTE(sbauza): Resizes means new flavor, so we need to update the
# original RequestSpec object for make sure the scheduler verifies
# the right one and not the original flavor
request_spec.flavor = flavor
return request_spec
def _cold_migrate(self, context, instance, flavor, filter_properties,
clean_shutdown, request_spec, host_list):
request_spec = self._get_request_spec_for_cold_migrate(
context, instance, flavor, filter_properties, request_spec)
task = self._build_cold_migrate_task(context, instance, flavor,
request_spec, clean_shutdown, host_list)
try:
task.execute()
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
# if the flavor IDs match, it's migrate; otherwise resize
if flavor.id == instance.instance_type_id:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
except Exception as ex:
with excutils.save_and_reraise_exception():
# Refresh the instance so we don't overwrite vm_state changes
# set after we executed the task.
try:
instance.refresh()
# Passing vm_state is kind of silly but it's expected in
# set_vm_state_and_notify.
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
except exception.InstanceNotFound:
# We can't send the notification because the instance is
# gone so just log it.
LOG.info('During %s the instance was deleted.',
'resize' if instance.instance_type_id != flavor.id
else 'cold migrate', instance=instance)
# NOTE(sbauza): Make sure we persist the new flavor in case we had
# a successful scheduler call if and only if nothing bad happened
if request_spec.obj_what_changed():
request_spec.save()
def _set_vm_state_and_notify(self, context, instance_uuid, method, updates,
ex, request_spec):
scheduler_utils.set_vm_state_and_notify(
context, instance_uuid, 'compute_task', method, updates,
ex, request_spec)
def _cleanup_allocated_networks(
self, context, instance, requested_networks):
try:
# If we were told not to allocate networks let's save ourselves
# the trouble of calling the network API.
if not (requested_networks and requested_networks.no_allocate):
self.network_api.deallocate_for_instance(
context, instance, requested_networks=requested_networks)
except Exception:
LOG.exception('Failed to deallocate networks', instance=instance)
return
instance.system_metadata['network_allocated'] = 'False'
try:
instance.save()
except exception.InstanceNotFound:
# NOTE: It's possible that we're cleaning up the networks
# because the instance was deleted. If that's the case then this
# exception will be raised by instance.save()
pass
@targets_cell
@wrap_instance_event(prefix='conductor')
def live_migrate_instance(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec)
def _live_migrate(self, context, instance, scheduler_hint,
block_migration, disk_over_commit, request_spec):
destination = scheduler_hint.get("host")
def _set_vm_state(context, instance, ex, vm_state=None,
task_state=None):
request_spec = {'instance_properties': {
'uuid': instance.uuid, },
}
scheduler_utils.set_vm_state_and_notify(context,
instance.uuid,
'compute_task', 'migrate_server',
dict(vm_state=vm_state,
task_state=task_state,
expected_task_state=task_states.MIGRATING,),
ex, request_spec)
migration = objects.Migration(context=context.elevated())
migration.dest_compute = destination
migration.status = 'accepted'
migration.instance_uuid = instance.uuid
migration.source_compute = instance.host
migration.migration_type = fields.MigrationType.LIVE_MIGRATION
if instance.obj_attr_is_set('flavor'):
migration.old_instance_type_id = instance.flavor.id
migration.new_instance_type_id = instance.flavor.id
else:
migration.old_instance_type_id = instance.instance_type_id
migration.new_instance_type_id = instance.instance_type_id
migration.create()
task = self._build_live_migrate_task(context, instance, destination,
block_migration, disk_over_commit,
migration, request_spec)
try:
task.execute()
except (exception.NoValidHost,
exception.ComputeHostNotFound,
exception.ComputeServiceUnavailable,
exception.InvalidHypervisorType,
exception.InvalidCPUInfo,
exception.UnableToMigrateToSelf,
exception.DestinationHypervisorTooOld,
exception.InvalidLocalStorage,
exception.InvalidSharedStorage,
exception.HypervisorUnavailable,
exception.InstanceInvalidState,
exception.MigrationPreCheckError,
exception.MigrationSchedulerRPCError) as ex:
with excutils.save_and_reraise_exception():
_set_vm_state(context, instance, ex, instance.vm_state)
migration.status = 'error'
migration.save()
except Exception as ex:
LOG.error('Migration of instance %(instance_id)s to host'
' %(dest)s unexpectedly failed.',
{'instance_id': instance.uuid, 'dest': destination},
exc_info=True)
# Reset the task state to None to indicate completion of
# the operation as it is done in case of known exceptions.
_set_vm_state(context, instance, ex, vm_states.ERROR,
task_state=None)
migration.status = 'error'
migration.save()
raise exception.MigrationError(reason=six.text_type(ex))
def _build_live_migrate_task(self, context, instance, destination,
block_migration, disk_over_commit, migration,
request_spec=None):
return live_migrate.LiveMigrationTask(context, instance,
destination, block_migration,
disk_over_commit, migration,
self.compute_rpcapi,
self.servicegroup_api,
self.query_client,
self.report_client,
request_spec)
def _build_cold_migrate_task(self, context, instance, flavor, request_spec,
clean_shutdown, host_list):
return migrate.MigrationTask(context, instance, flavor,
request_spec, clean_shutdown,
self.compute_rpcapi,
self.query_client, self.report_client,
host_list, self.network_api)
def _destroy_build_request(self, context, instance):
# The BuildRequest needs to be stored until the instance is mapped to
# an instance table. At that point it will never be used again and
# should be deleted.
build_request = objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
# TODO(alaski): Sync API updates of the build_request to the
# instance before it is destroyed. Right now only locked_by can
# be updated before this is destroyed.
build_request.destroy()
def _populate_instance_mapping(self, context, instance, host):
try:
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# NOTE(alaski): If nova-api is up to date this exception should
# never be hit. But during an upgrade it's possible that an old
# nova-api didn't create an instance_mapping during this boot
# request.
LOG.debug('Instance was not mapped to a cell, likely due '
'to an older nova-api service running.',
instance=instance)
return None
else:
try:
host_mapping = objects.HostMapping.get_by_host(context,
host.service_host)
except exception.HostMappingNotFound:
# NOTE(alaski): For now this exception means that a
# deployment has not migrated to cellsv2 and we should
# remove the instance_mapping that has been created.
# Eventually this will indicate a failure to properly map a
# host to a cell and we may want to reschedule.
inst_mapping.destroy()
return None
else:
inst_mapping.cell_mapping = host_mapping.cell_mapping
inst_mapping.save()
return inst_mapping
def _validate_existing_attachment_ids(self, context, instance, bdms):
"""Ensure any attachment ids referenced by the bdms exist.
New attachments will only be created if the attachment ids referenced
by the bdms no longer exist. This can happen when an instance is
rescheduled after a failure to spawn as cleanup code on the previous
host will delete attachments before rescheduling.
"""
for bdm in bdms:
if bdm.is_volume and bdm.attachment_id:
try:
self.volume_api.attachment_get(context, bdm.attachment_id)
except exception.VolumeAttachmentNotFound:
attachment = self.volume_api.attachment_create(
context, bdm.volume_id, instance.uuid)
bdm.attachment_id = attachment['id']
bdm.save()
def _cleanup_when_reschedule_fails(
self, context, instance, exception, legacy_request_spec,
requested_networks):
"""Set the instance state and clean up.
It is only used in case build_instance fails while rescheduling the
instance
"""
updates = {'vm_state': vm_states.ERROR,
'task_state': None}
self._set_vm_state_and_notify(
context, instance.uuid, 'build_instances', updates, exception,
legacy_request_spec)
self._cleanup_allocated_networks(
context, instance, requested_networks)
compute_utils.delete_arqs_if_needed(context, instance)
# NOTE(danms): This is never cell-targeted because it is only used for
# n-cpu reschedules which go to the cell conductor and thus are always
# cell-specific.
def build_instances(self, context, instances, image, filter_properties,
admin_password, injected_files, requested_networks,
security_groups, block_device_mapping=None, legacy_bdm=True,
request_spec=None, host_lists=None):
# TODO(ndipanov): Remove block_device_mapping and legacy_bdm in version
# 2.0 of the RPC API.
# TODO(danms): Remove this in version 2.0 of the RPC API
if (requested_networks and
not isinstance(requested_networks,
objects.NetworkRequestList)):
requested_networks = objects.NetworkRequestList.from_tuples(
requested_networks)
# TODO(melwitt): Remove this in version 2.0 of the RPC API
flavor = filter_properties.get('instance_type')
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
filter_properties = dict(filter_properties, instance_type=flavor)
# Older computes will not send a request_spec during reschedules so we
# need to check and build our own if one is not provided.
if request_spec is None:
legacy_request_spec = scheduler_utils.build_request_spec(
image, instances)
else:
# TODO(mriedem): This is annoying but to populate the local
# request spec below using the filter_properties, we have to pass
# in a primitive version of the request spec. Yes it's inefficient
# and we can remove it once the populate_retry and
# populate_filter_properties utility methods are converted to
# work on a RequestSpec object rather than filter_properties.
# NOTE(gibi): we have to keep a reference to the original
# RequestSpec object passed to this function as we lose information
# during the below legacy conversion
legacy_request_spec = request_spec.to_legacy_request_spec_dict()
# 'host_lists' will be None during a reschedule from a pre-Queens
# compute. In all other cases, it will be a list of lists, though the
# lists may be empty if there are no more hosts left in a rescheduling
# situation.
is_reschedule = host_lists is not None
try:
# check retry policy. Rather ugly use of instances[0]...
# but if we've exceeded max retries... then we really only
# have a single instance.
# TODO(sbauza): Provide directly the RequestSpec object
# when populate_retry() accepts it
scheduler_utils.populate_retry(
filter_properties, instances[0].uuid)
instance_uuids = [instance.uuid for instance in instances]
spec_obj = objects.RequestSpec.from_primitives(
context, legacy_request_spec, filter_properties)
LOG.debug("Rescheduling: %s", is_reschedule)
if is_reschedule:
# Make sure that we have a host, as we may have exhausted all
# our alternates
if not host_lists[0]:
# We have an empty list of hosts, so this instance has
# failed to build.
msg = ("Exhausted all hosts available for retrying build "
"failures for instance %(instance_uuid)s." %
{"instance_uuid": instances[0].uuid})
raise exception.MaxRetriesExceeded(reason=msg)
else:
# This is not a reschedule, so we need to call the scheduler to
# get appropriate hosts for the request.
# NOTE(gibi): We only call the scheduler if we are rescheduling
# from a really old compute. In that case we do not support
# externally-defined resource requests, like port QoS. So no
# requested_resources are set on the RequestSpec here.
host_lists = self._schedule_instances(context, spec_obj,
instance_uuids, return_alternates=True)
except Exception as exc:
# NOTE(mriedem): If we're rescheduling from a failed build on a
# compute, "retry" will be set and num_attempts will be >1 because
# populate_retry above will increment it. If the server build was
# forced onto a host/node or [scheduler]/max_attempts=1, "retry"
# won't be in filter_properties and we won't get here because
# nova-compute will just abort the build since reschedules are
# disabled in those cases.
num_attempts = filter_properties.get(
'retry', {}).get('num_attempts', 1)
for instance in instances:
# If num_attempts > 1, we're in a reschedule and probably
# either hit NoValidHost or MaxRetriesExceeded. Either way,
# the build request should already be gone and we probably
# can't reach the API DB from the cell conductor.
if num_attempts <= 1:
try:
# If the BuildRequest stays around then instance
# show/lists will pull from it rather than the errored
# instance.
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
pass
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
return
elevated = context.elevated()
for (instance, host_list) in six.moves.zip(instances, host_lists):
host = host_list.pop(0)
if is_reschedule:
# If this runs in the superconductor, the first instance will
# already have its resources claimed in placement. If this is a
# retry, though, this is running in the cell conductor, and we
# need to claim first to ensure that the alternate host still
# has its resources available. Note that there are schedulers
# that don't support Placement, so must assume that the host is
# still available.
host_available = False
while host and not host_available:
if host.allocation_request:
alloc_req = jsonutils.loads(host.allocation_request)
else:
alloc_req = None
if alloc_req:
try:
host_available = scheduler_utils.claim_resources(
elevated, self.report_client, spec_obj,
instance.uuid, alloc_req,
host.allocation_request_version)
if request_spec and host_available:
# NOTE(gibi): redo the request group - resource
# provider mapping as the above claim call
# moves the allocation of the instance to
# another host
scheduler_utils.fill_provider_mapping(
request_spec, host)
except Exception as exc:
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
return
else:
# Some deployments use different schedulers that do not
# use Placement, so they will not have an
# allocation_request to claim with. For those cases,
# there is no concept of claiming, so just assume that
# the host is valid.
host_available = True
if not host_available:
# Insufficient resources remain on that host, so
# discard it and try the next.
host = host_list.pop(0) if host_list else None
if not host_available:
# No more available hosts for retrying the build.
msg = ("Exhausted all hosts available for retrying build "
"failures for instance %(instance_uuid)s." %
{"instance_uuid": instance.uuid})
exc = exception.MaxRetriesExceeded(reason=msg)
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
return
# The availability_zone field was added in v1.1 of the Selection
# object so make sure to handle the case where it is missing.
if 'availability_zone' in host:
instance.availability_zone = host.availability_zone
else:
try:
instance.availability_zone = (
availability_zones.get_host_availability_zone(context,
host.service_host))
except Exception as exc:
# Put the instance into ERROR state, set task_state to
# None, inject a fault, etc.
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
continue
try:
# NOTE(danms): This saves the az change above, refreshes our
# instance, and tells us if it has been deleted underneath us
instance.save()
except (exception.InstanceNotFound,
exception.InstanceInfoCacheNotFound):
LOG.debug('Instance deleted during build', instance=instance)
continue
local_filter_props = copy.deepcopy(filter_properties)
scheduler_utils.populate_filter_properties(local_filter_props,
host)
# Populate the request_spec with the local_filter_props information
# like retries and limits. Note that at this point the request_spec
# could have come from a compute via reschedule and it would
# already have some things set, like scheduler_hints.
local_reqspec = objects.RequestSpec.from_primitives(
context, legacy_request_spec, local_filter_props)
# NOTE(gibi): at this point the request spec already got converted
# to a legacy dict and then back to an object so we lost the non
# legacy part of the spec. Re-populate the requested_resources
# field based on the original request spec object passed to this
# function.
if request_spec:
local_reqspec.requested_resources = (
request_spec.requested_resources)
# The block_device_mapping passed from the api doesn't contain
# instance specific information
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# This is populated in scheduler_utils.populate_retry
num_attempts = local_filter_props.get('retry',
{}).get('num_attempts', 1)
if num_attempts <= 1:
# If this is a reschedule the instance is already mapped to
# this cell and the BuildRequest is already deleted so ignore
# the logic below.
inst_mapping = self._populate_instance_mapping(context,
instance,
host)
try:
self._destroy_build_request(context, instance)
except exception.BuildRequestNotFound:
# This indicates an instance delete has been requested in
# the API. Stop the build, cleanup the instance_mapping and
# potentially the block_device_mappings
# TODO(alaski): Handle block_device_mapping cleanup
if inst_mapping:
inst_mapping.destroy()
return
else:
# NOTE(lyarwood): If this is a reschedule then recreate any
# attachments that were previously removed when cleaning up
# after failures to spawn etc.
self._validate_existing_attachment_ids(context, instance, bdms)
alts = [(alt.service_host, alt.nodename) for alt in host_list]
LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s",
host.service_host, host.nodename, alts, instance=instance)
try:
accel_uuids = self._create_and_bind_arq_for_instance(
context, instance, host, local_reqspec)
except Exception as exc:
LOG.exception('Failed to reschedule. Reason: %s', exc)
self._cleanup_when_reschedule_fails(
context, instance, exc, legacy_request_spec,
requested_networks)
continue
self.compute_rpcapi.build_and_run_instance(context,
instance=instance, host=host.service_host, image=image,
request_spec=local_reqspec,
filter_properties=local_filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=bdms, node=host.nodename,
limits=host.limits, host_list=host_list,
accel_uuids=accel_uuids)
def _create_and_bind_arq_for_instance(self, context, instance, host,
request_spec):
try:
resource_provider_mapping = (
request_spec.get_request_group_mapping())
# Using nodename instead of hostname. See:
# http://lists.openstack.org/pipermail/openstack-discuss/2019-November/011044.html # noqa
return self._create_and_bind_arqs(
context, instance.uuid, instance.flavor.extra_specs,
host.nodename, resource_provider_mapping)
except exception.AcceleratorRequestBindingFailed as exc:
# If anything failed here we need to cleanup and bail out.
cyclient = cyborg.get_client(context)
cyclient.delete_arqs_by_uuid(exc.arqs)
raise
def _schedule_instances(self, context, request_spec,
instance_uuids=None, return_alternates=False):
scheduler_utils.setup_instance_group(context, request_spec)
with timeutils.StopWatch() as timer:
host_lists = self.query_client.select_destinations(
context, request_spec, instance_uuids, return_objects=True,
return_alternates=return_alternates)
LOG.debug('Took %0.2f seconds to select destinations for %s '
'instance(s).', timer.elapsed(), len(instance_uuids))
return host_lists
@staticmethod
def _restrict_request_spec_to_cell(context, instance, request_spec):
"""Sets RequestSpec.requested_destination.cell for the move operation
Move operations, e.g. evacuate and unshelve, must be restricted to the
cell in which the instance already exists, so this method is used to
target the RequestSpec, which is sent to the scheduler via the
_schedule_instances method, to the instance's current cell.
:param context: nova auth RequestContext
"""
instance_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
LOG.debug('Requesting cell %(cell)s during scheduling',
{'cell': instance_mapping.cell_mapping.identity},
instance=instance)
if ('requested_destination' in request_spec and
request_spec.requested_destination):
request_spec.requested_destination.cell = (
instance_mapping.cell_mapping)
else:
request_spec.requested_destination = (
objects.Destination(
cell=instance_mapping.cell_mapping))
# TODO(mriedem): Make request_spec required in ComputeTaskAPI RPC v2.0.
@targets_cell
def unshelve_instance(self, context, instance, request_spec=None):
sys_meta = instance.system_metadata
def safe_image_show(ctx, image_id):
if image_id:
return self.image_api.get(ctx, image_id, show_deleted=False)
else:
raise exception.ImageNotFound(image_id='')
if instance.vm_state == vm_states.SHELVED:
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=task_states.UNSHELVING)
self.compute_rpcapi.start_instance(context, instance)
elif instance.vm_state == vm_states.SHELVED_OFFLOADED:
image = None
image_id = sys_meta.get('shelved_image_id')
# No need to check for image if image_id is None as
# "shelved_image_id" key is not set for volume backed
# instance during the shelve process
if image_id:
with compute_utils.EventReporter(
context, 'get_image_info', self.host, instance.uuid):
try:
image = safe_image_show(context, image_id)
except exception.ImageNotFound as error:
instance.vm_state = vm_states.ERROR
instance.save()
reason = _('Unshelve attempted but the image %s '
'cannot be found.') % image_id
LOG.error(reason, instance=instance)
compute_utils.add_instance_fault_from_exc(
context, instance, error, sys.exc_info(),
fault_message=reason)
raise exception.UnshelveException(
instance_id=instance.uuid, reason=reason)
try:
with compute_utils.EventReporter(context, 'schedule_instances',
self.host, instance.uuid):
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
# TODO(sbauza): Provide directly the RequestSpec object
# when populate_filter_properties accepts it
filter_properties = request_spec.\
to_legacy_filter_properties_dict()
port_res_req = (
self.network_api.get_requested_resource_for_instance(
context, instance.uuid))
# NOTE(gibi): When cyborg or other module wants to handle
# similar non-nova resources then here we have to collect
# all the external resource requests in a single list and
# add them to the RequestSpec.
request_spec.requested_resources = port_res_req
# NOTE(cfriesen): Ensure that we restrict the scheduler to
# the cell specified by the instance mapping.
self._restrict_request_spec_to_cell(
context, instance, request_spec)
request_spec.ensure_project_and_user_id(instance)
request_spec.ensure_network_metadata(instance)
compute_utils.heal_reqspec_is_bfv(
context, request_spec, instance)
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
scheduler_utils.populate_filter_properties(
filter_properties, selection)
(host, node) = (selection.service_host, selection.nodename)
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
scheduler_utils.fill_provider_mapping(
request_spec, selection)
self.compute_rpcapi.unshelve_instance(
context, instance, host, request_spec, image=image,
filter_properties=filter_properties, node=node)
except (exception.NoValidHost,
exception.UnsupportedPolicyException):
instance.task_state = None
instance.save()
LOG.warning("No valid host found for unshelve instance",
instance=instance)
return
except Exception:
with excutils.save_and_reraise_exception():
instance.task_state = None
instance.save()
LOG.error("Unshelve attempted but an error "
"has occurred", instance=instance)
else:
LOG.error('Unshelve attempted but vm_state not SHELVED or '
'SHELVED_OFFLOADED', instance=instance)
instance.vm_state = vm_states.ERROR
instance.save()
return
def _allocate_for_evacuate_dest_host(self, context, instance, host,
request_spec=None):
# The user is forcing the destination host and bypassing the
# scheduler. We need to copy the source compute node
# allocations in Placement to the destination compute node.
# Normally select_destinations() in the scheduler would do this
# for us, but when forcing the target host we don't call the
# scheduler.
source_node = None # This is used for error handling below.
try:
source_node = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
dest_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host, use_slave=True))
except exception.ComputeHostNotFound as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(
context, instance.uuid, 'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
if source_node:
LOG.warning('Specified host %s for evacuate was not '
'found.', host, instance=instance)
else:
LOG.warning('Source host %s and node %s for evacuate was '
'not found.', instance.host, instance.node,
instance=instance)
try:
scheduler_utils.claim_resources_on_destination(
context, self.report_client, instance, source_node, dest_node)
except exception.NoValidHost as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(
context, instance.uuid, 'rebuild_server',
{'vm_state': instance.vm_state,
'task_state': None}, ex, request_spec)
LOG.warning('Specified host %s for evacuate is '
'invalid.', host, instance=instance)
# TODO(mriedem): Make request_spec required in ComputeTaskAPI RPC v2.0.
@targets_cell
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata,
bdms, recreate, on_shared_storage,
preserve_ephemeral=False, host=None,
request_spec=None):
# recreate=True means the instance is being evacuated from a failed
# host to a new destination host. The 'recreate' variable name is
# confusing, so rename it to evacuate here at the top, which is simpler
# than renaming a parameter in an RPC versioned method.
evacuate = recreate
# NOTE(efried): It would be nice if this were two separate events, one
# for 'rebuild' and one for 'evacuate', but this is part of the API
# now, so it would be nontrivial to change.
with compute_utils.EventReporter(context, 'rebuild_server',
self.host, instance.uuid):
node = limits = None
try:
migration = objects.Migration.get_by_instance_and_status(
context, instance.uuid, 'accepted')
except exception.MigrationNotFoundByStatus:
LOG.debug("No migration record for the rebuild/evacuate "
"request.", instance=instance)
migration = None
# The host variable is passed in two cases:
# 1. rebuild - the instance.host is passed to rebuild on the
# same host and bypass the scheduler *unless* a new image
# was specified
# 2. evacuate with specified host and force=True - the specified
# host is passed and is meant to bypass the scheduler.
# NOTE(mriedem): This could be a lot more straight-forward if we
# had separate methods for rebuild and evacuate...
if host:
# We only create a new allocation on the specified host if
# we're doing an evacuate since that is a move operation.
if host != instance.host:
# If a destination host is forced for evacuate, create
# allocations against it in Placement.
try:
self._allocate_for_evacuate_dest_host(
context, instance, host, request_spec)
except exception.AllocationUpdateFailed as ex:
with excutils.save_and_reraise_exception():
if migration:
migration.status = 'error'
migration.save()
# NOTE(efried): It would be nice if this were two
# separate events, one for 'rebuild' and one for
# 'evacuate', but this is part of the API now, so
# it would be nontrivial to change.
self._set_vm_state_and_notify(
context,
instance.uuid,
'rebuild_server',
{'vm_state': vm_states.ERROR,
'task_state': None}, ex, request_spec)
LOG.warning('Rebuild failed: %s',
six.text_type(ex), instance=instance)
except exception.NoValidHost:
with excutils.save_and_reraise_exception():
if migration:
migration.status = 'error'
migration.save()
else:
# At this point, the user is either:
#
# 1. Doing a rebuild on the same host (not evacuate) and
# specified a new image.
# 2. Evacuating and specified a host but are not forcing it.
#
# In either case, the API passes host=None but sets up the
# RequestSpec.requested_destination field for the specified
# host.
if evacuate:
# NOTE(sbauza): Augment the RequestSpec object by excluding
# the source host for avoiding the scheduler to pick it
request_spec.ignore_hosts = [instance.host]
# NOTE(sbauza): Force_hosts/nodes needs to be reset
# if we want to make sure that the next destination
# is not forced to be the original host
request_spec.reset_forced_destinations()
port_res_req = (
self.network_api.get_requested_resource_for_instance(
context, instance.uuid))
# NOTE(gibi): When cyborg or other module wants to handle
# similar non-nova resources then here we have to collect
# all the external resource requests in a single list and
# add them to the RequestSpec.
request_spec.requested_resources = port_res_req
try:
# if this is a rebuild of instance on the same host with
# new image.
if not evacuate and orig_image_ref != image_ref:
self._validate_image_traits_for_rebuild(context,
instance,
image_ref)
self._restrict_request_spec_to_cell(
context, instance, request_spec)
request_spec.ensure_project_and_user_id(instance)
request_spec.ensure_network_metadata(instance)
compute_utils.heal_reqspec_is_bfv(
context, request_spec, instance)
host_lists = self._schedule_instances(context,
request_spec, [instance.uuid],
return_alternates=False)
host_list = host_lists[0]
selection = host_list[0]
host, node, limits = (selection.service_host,
selection.nodename, selection.limits)
if recreate:
scheduler_utils.fill_provider_mapping(
request_spec, selection)
except (exception.NoValidHost,
exception.UnsupportedPolicyException,
exception.AllocationUpdateFailed,
# the next two can come from fill_provider_mapping and
# signals a software error.
NotImplementedError,
ValueError) as ex:
if migration:
migration.status = 'error'
migration.save()
# Rollback the image_ref if a new one was provided (this
# only happens in the rebuild case, not evacuate).
if orig_image_ref and orig_image_ref != image_ref:
instance.image_ref = orig_image_ref
instance.save()
with excutils.save_and_reraise_exception():
# NOTE(efried): It would be nice if this were two
# separate events, one for 'rebuild' and one for
# 'evacuate', but this is part of the API now, so it
# would be nontrivial to change.
self._set_vm_state_and_notify(context, instance.uuid,
'rebuild_server',
{'vm_state': vm_states.ERROR,
'task_state': None}, ex, request_spec)
LOG.warning('Rebuild failed: %s',
six.text_type(ex), instance=instance)
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "rebuild.scheduled")
compute_utils.notify_about_instance_rebuild(
context, instance, host,
action=fields.NotificationAction.REBUILD_SCHEDULED,
source=fields.NotificationSource.CONDUCTOR)
instance.availability_zone = (
availability_zones.get_host_availability_zone(
context, host))
self.compute_rpcapi.rebuild_instance(context,
instance=instance,
new_pass=new_pass,
injected_files=injected_files,
image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata,
bdms=bdms,
recreate=evacuate,
on_shared_storage=on_shared_storage,
preserve_ephemeral=preserve_ephemeral,
migration=migration,
host=host, node=node, limits=limits,
request_spec=request_spec)
def _validate_image_traits_for_rebuild(self, context, instance, image_ref):
"""Validates that the traits specified in the image can be satisfied
by the providers of the current allocations for the instance during
rebuild of the instance. If the traits cannot be
satisfied, fails the action by raising a NoValidHost exception.
:raises: NoValidHost exception in case the traits on the providers
of the allocated resources for the instance do not match
the required traits on the image.
"""
image_meta = objects.ImageMeta.from_image_ref(
context, self.image_api, image_ref)
if ('properties' not in image_meta or
'traits_required' not in image_meta.properties or not
image_meta.properties.traits_required):
return
image_traits = set(image_meta.properties.traits_required)
# check any of the image traits are forbidden in flavor traits.
# if so raise an exception
extra_specs = instance.flavor.extra_specs
forbidden_flavor_traits = set()
for key, val in extra_specs.items():
if key.startswith('trait'):
# get the actual key.
prefix, parsed_key = key.split(':', 1)
if val == 'forbidden':
forbidden_flavor_traits.add(parsed_key)
forbidden_traits = image_traits & forbidden_flavor_traits
if forbidden_traits:
raise exception.NoValidHost(
reason=_("Image traits are part of forbidden "
"traits in flavor associated with the server. "
"Either specify a different image during rebuild "
"or create a new server with the specified image "
"and a compatible flavor."))
# If image traits are present, then validate against allocations.
allocations = self.report_client.get_allocations_for_consumer(
context, instance.uuid)
instance_rp_uuids = list(allocations)
# Get provider tree for the instance. We use the uuid of the host
# on which the instance is rebuilding to get the provider tree.
compute_node = objects.ComputeNode.get_by_host_and_nodename(
context, instance.host, instance.node)
# TODO(karimull): Call with a read-only version, when available.
instance_rp_tree = (
self.report_client.get_provider_tree_and_ensure_root(
context, compute_node.uuid))
traits_in_instance_rps = set()
for rp_uuid in instance_rp_uuids:
traits_in_instance_rps.update(
instance_rp_tree.data(rp_uuid).traits)
missing_traits = image_traits - traits_in_instance_rps
if missing_traits:
raise exception.NoValidHost(
reason=_("Image traits cannot be "
"satisfied by the current resource providers. "
"Either specify a different image during rebuild "
"or create a new server with the specified image."))
# TODO(avolkov): move method to bdm
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavor size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _create_block_device_mapping(self, cell, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", list(block_device_mapping),
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
bdm.instance_uuid = instance_uuid
with obj_target_cell(bdm, cell):
bdm.update_or_create()
return instance_block_device_mapping
def _create_tags(self, context, instance_uuid, tags):
"""Create the Tags objects in the db."""
if tags:
tag_list = [tag.tag for tag in tags]
instance_tags = objects.TagList.create(
context, instance_uuid, tag_list)
return instance_tags
else:
return tags
def _create_instance_action_for_cell0(self, context, instance, exc):
"""Create a failed "create" instance action for the instance in cell0.
:param context: nova auth RequestContext targeted at cell0
:param instance: Instance object being buried in cell0
:param exc: Exception that occurred which resulted in burial
"""
# First create the action record.
objects.InstanceAction.action_start(
context, instance.uuid, instance_actions.CREATE, want_result=False)
# Now create an event for that action record.
event_name = 'conductor_schedule_and_build_instances'
objects.InstanceActionEvent.event_start(
context, instance.uuid, event_name, want_result=False,
host=self.host)
# And finish the event with the exception. Note that we expect this
# method to be called from _bury_in_cell0 which is called from within
# an exception handler so sys.exc_info should return values but if not
# it's not the end of the world - this is best effort.
objects.InstanceActionEvent.event_finish_with_failure(
context, instance.uuid, event_name, exc_val=exc,
exc_tb=sys.exc_info()[2], want_result=False)
def _bury_in_cell0(self, context, request_spec, exc,
build_requests=None, instances=None,
block_device_mapping=None,
tags=None):
"""Ensure all provided build_requests and instances end up in cell0.
Cell0 is the fake cell we schedule dead instances to when we can't
schedule them somewhere real. Requests that don't yet have instances
will get a new instance, created in cell0. Instances that have not yet
been created will be created in cell0. All build requests are destroyed
after we're done. Failure to delete a build request will trigger the
instance deletion, just like the happy path in
schedule_and_build_instances() below.
"""
try:
cell0 = objects.CellMapping.get_by_uuid(
context, objects.CellMapping.CELL0_UUID)
except exception.CellMappingNotFound:
# Not yet setup for cellsv2. Instances will need to be written
# to the configured database. This will become a deployment
# error in Ocata.
LOG.error('No cell mapping found for cell0 while '
'trying to record scheduling failure. '
'Setup is incomplete.')
return
build_requests = build_requests or []
instances = instances or []
instances_by_uuid = {inst.uuid: inst for inst in instances}
for build_request in build_requests:
if build_request.instance_uuid not in instances_by_uuid:
# This is an instance object with no matching db entry.
instance = build_request.get_new_instance(context)
instances_by_uuid[instance.uuid] = instance
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances_by_uuid.values():
inst_mapping = None
try:
# We don't need the cell0-targeted context here because the
# instance mapping is in the API DB.
inst_mapping = \
objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
except exception.InstanceMappingNotFound:
# The API created the instance mapping record so it should
# definitely be here. Log an error but continue to create the
# instance in the cell0 database.
LOG.error('While burying instance in cell0, no instance '
'mapping was found.', instance=instance)
# Perform a final sanity check that the instance is not mapped
# to some other cell already because of maybe some crazy
# clustered message queue weirdness.
if inst_mapping and inst_mapping.cell_mapping is not None:
LOG.error('When attempting to bury instance in cell0, the '
'instance is already mapped to cell %s. Ignoring '
'bury in cell0 attempt.',
inst_mapping.cell_mapping.identity,
instance=instance)
continue
with obj_target_cell(instance, cell0) as cctxt:
instance.create()
if inst_mapping:
inst_mapping.cell_mapping = cell0
inst_mapping.save()
# Record an instance action with a failed event.
self._create_instance_action_for_cell0(
cctxt, instance, exc)
# NOTE(mnaser): In order to properly clean-up volumes after
# being buried in cell0, we need to store BDMs.
if block_device_mapping:
self._create_block_device_mapping(
cell0, instance.flavor, instance.uuid,
block_device_mapping)
self._create_tags(cctxt, instance.uuid, tags)
# Use the context targeted to cell0 here since the instance is
# now in cell0.
self._set_vm_state_and_notify(
cctxt, instance.uuid, 'build_instances', updates,
exc, request_spec)
for build_request in build_requests:
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# Instance was deleted before we finished scheduling
inst = instances_by_uuid[build_request.instance_uuid]
with obj_target_cell(inst, cell0):
inst.destroy()
def schedule_and_build_instances(self, context, build_requests,
request_specs, image,
admin_password, injected_files,
requested_networks, block_device_mapping,
tags=None):
# Add all the UUIDs for the instances
instance_uuids = [spec.instance_uuid for spec in request_specs]
try:
host_lists = self._schedule_instances(context, request_specs[0],
instance_uuids, return_alternates=True)
except Exception as exc:
LOG.exception('Failed to schedule instances')
self._bury_in_cell0(context, request_specs[0], exc,
build_requests=build_requests,
block_device_mapping=block_device_mapping,
tags=tags)
return
host_mapping_cache = {}
cell_mapping_cache = {}
instances = []
host_az = {} # host=az cache to optimize multi-create
for (build_request, request_spec, host_list) in six.moves.zip(
build_requests, request_specs, host_lists):
instance = build_request.get_new_instance(context)
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list[0]
# Convert host from the scheduler into a cell record
if host.service_host not in host_mapping_cache:
try:
host_mapping = objects.HostMapping.get_by_host(
context, host.service_host)
host_mapping_cache[host.service_host] = host_mapping
except exception.HostMappingNotFound as exc:
LOG.error('No host-to-cell mapping found for selected '
'host %(host)s. Setup is incomplete.',
{'host': host.service_host})
self._bury_in_cell0(
context, request_spec, exc,
build_requests=[build_request], instances=[instance],
block_device_mapping=block_device_mapping,
tags=tags)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
continue
else:
host_mapping = host_mapping_cache[host.service_host]
cell = host_mapping.cell_mapping
# Before we create the instance, let's make one final check that
# the build request is still around and wasn't deleted by the user
# already.
try:
objects.BuildRequest.get_by_instance_uuid(
context, instance.uuid)
except exception.BuildRequestNotFound:
# the build request is gone so we're done for this instance
LOG.debug('While scheduling instance, the build request '
'was already deleted.', instance=instance)
# This is a placeholder in case the quota recheck fails.
instances.append(None)
# If the build request was deleted and the instance is not
# going to be created, there is on point in leaving an orphan
# instance mapping so delete it.
try:
im = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
im.destroy()
except exception.InstanceMappingNotFound:
pass
self.report_client.delete_allocation_for_instance(
context, instance.uuid)
continue
else:
if host.service_host not in host_az:
host_az[host.service_host] = (
availability_zones.get_host_availability_zone(
context, host.service_host))
instance.availability_zone = host_az[host.service_host]
with obj_target_cell(instance, cell):
instance.create()
instances.append(instance)
cell_mapping_cache[instance.uuid] = cell
# NOTE(melwitt): We recheck the quota after creating the
# objects to prevent users from allocating more resources
# than their allowed quota in the event of a race. This is
# configurable because it can be expensive if strict quota
# limits are not required in a deployment.
if CONF.quota.recheck_quota:
try:
compute_utils.check_num_instances_quota(
context, instance.flavor, 0, 0,
orig_num_req=len(build_requests))
except exception.TooManyInstances as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(context, exc, instances,
build_requests,
request_specs,
block_device_mapping, tags,
cell_mapping_cache)
zipped = six.moves.zip(build_requests, request_specs, host_lists,
instances)
for (build_request, request_spec, host_list, instance) in zipped:
if instance is None:
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
continue
cell = cell_mapping_cache[instance.uuid]
# host_list is a list of one or more Selection objects, the first
# of which has been selected and its resources claimed.
host = host_list.pop(0)
alts = [(alt.service_host, alt.nodename) for alt in host_list]
LOG.debug("Selected host: %s; Selected node: %s; Alternates: %s",
host.service_host, host.nodename, alts, instance=instance)
filter_props = request_spec.to_legacy_filter_properties_dict()
scheduler_utils.populate_retry(filter_props, instance.uuid)
scheduler_utils.populate_filter_properties(filter_props,
host)
# Now that we have a selected host (which has claimed resource
# allocations in the scheduler) for this instance, we may need to
# map allocations to resource providers in the request spec.
try:
scheduler_utils.fill_provider_mapping(request_spec, host)
except Exception as exc:
# If anything failed here we need to cleanup and bail out.
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(
context, exc, instances, build_requests, request_specs,
block_device_mapping, tags, cell_mapping_cache)
# TODO(melwitt): Maybe we should set_target_cell on the contexts
# once we map to a cell, and remove these separate with statements.
with obj_target_cell(instance, cell) as cctxt:
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
# This can lazy-load attributes on instance.
notifications.send_update_with_states(cctxt, instance, None,
vm_states.BUILDING, None, None, service="conductor")
objects.InstanceAction.action_start(
cctxt, instance.uuid, instance_actions.CREATE,
want_result=False)
instance_bdms = self._create_block_device_mapping(
cell, instance.flavor, instance.uuid, block_device_mapping)
instance_tags = self._create_tags(cctxt, instance.uuid, tags)
# TODO(Kevin Zheng): clean this up once instance.create() handles
# tags; we do this so the instance.create notification in
# build_and_run_instance in nova-compute doesn't lazy-load tags
instance.tags = instance_tags if instance_tags \
else objects.TagList()
# Update mapping for instance.
self._map_instance_to_cell(context, instance, cell)
if not self._delete_build_request(
context, build_request, instance, cell, instance_bdms,
instance_tags):
# The build request was deleted before/during scheduling so
# the instance is gone and we don't have anything to build for
# this one.
continue
try:
accel_uuids = self._create_and_bind_arq_for_instance(
context, instance, host, request_spec)
except Exception as exc:
with excutils.save_and_reraise_exception():
self._cleanup_build_artifacts(
context, exc, instances, build_requests, request_specs,
block_device_mapping, tags, cell_mapping_cache)
# NOTE(danms): Compute RPC expects security group names or ids
# not objects, so convert this to a list of names until we can
# pass the objects.
legacy_secgroups = [s.identifier
for s in request_spec.security_groups]
with obj_target_cell(instance, cell) as cctxt:
self.compute_rpcapi.build_and_run_instance(
cctxt, instance=instance, image=image,
request_spec=request_spec,
filter_properties=filter_props,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=legacy_secgroups,
block_device_mapping=instance_bdms,
host=host.service_host, node=host.nodename,
limits=host.limits, host_list=host_list,
accel_uuids=accel_uuids)
def _create_and_bind_arqs(self, context, instance_uuid, extra_specs,
hostname, resource_provider_mapping):
"""Create ARQs, determine their RPs and initiate ARQ binding.
The binding is asynchronous; Cyborg will notify on completion.
The notification will be handled in the compute manager.
"""
dp_name = extra_specs.get('accel:device_profile')
if not dp_name:
return []
LOG.debug('Calling Cyborg to get ARQs. dp_name=%s instance=%s',
dp_name, instance_uuid)
cyclient = cyborg.get_client(context)
arqs = cyclient.create_arqs_and_match_resource_providers(
dp_name, resource_provider_mapping)
LOG.debug('Got ARQs with resource provider mapping %s', arqs)
bindings = {arq['uuid']:
{"hostname": hostname,
"device_rp_uuid": arq['device_rp_uuid'],
"instance_uuid": instance_uuid
}
for arq in arqs}
# Initiate Cyborg binding asynchronously
cyclient.bind_arqs(bindings=bindings)
return [arq['uuid'] for arq in arqs]
@staticmethod
def _map_instance_to_cell(context, instance, cell):
"""Update the instance mapping to point at the given cell.
During initial scheduling once a host and cell is selected in which
to build the instance this method is used to update the instance
mapping to point at that cell.
:param context: nova auth RequestContext
:param instance: Instance object being built
:param cell: CellMapping representing the cell in which the instance
was created and is being built.
:returns: InstanceMapping object that was updated.
"""
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
# Perform a final sanity check that the instance is not mapped
# to some other cell already because of maybe some crazy
# clustered message queue weirdness.
if inst_mapping.cell_mapping is not None:
LOG.error('During scheduling instance is already mapped to '
'another cell: %s. This should not happen and is an '
'indication of bigger problems. If you see this you '
'should report it to the nova team. Overwriting '
'the mapping to point at cell %s.',
inst_mapping.cell_mapping.identity, cell.identity,
instance=instance)
inst_mapping.cell_mapping = cell
inst_mapping.save()
return inst_mapping
def _cleanup_build_artifacts(self, context, exc, instances, build_requests,
request_specs, block_device_mappings, tags,
cell_mapping_cache):
for (instance, build_request, request_spec) in six.moves.zip(
instances, build_requests, request_specs):
# Skip placeholders that were buried in cell0 or had their
# build requests deleted by the user before instance create.
if instance is None:
continue
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
cell = cell_mapping_cache[instance.uuid]
with try_target_cell(context, cell) as cctxt:
self._set_vm_state_and_notify(cctxt, instance.uuid,
'build_instances', updates, exc,
request_spec)
# In order to properly clean-up volumes when deleting a server in
# ERROR status with no host, we need to store BDMs in the same
# cell.
if block_device_mappings:
self._create_block_device_mapping(
cell, instance.flavor, instance.uuid,
block_device_mappings)
# Like BDMs, the server tags provided by the user when creating the
# server should be persisted in the same cell so they can be shown
# from the API.
if tags:
with nova_context.target_cell(context, cell) as cctxt:
self._create_tags(cctxt, instance.uuid, tags)
# NOTE(mdbooth): To avoid an incomplete instance record being
# returned by the API, the instance mapping must be
# created after the instance record is complete in
# the cell, and before the build request is
# destroyed.
# TODO(mnaser): The cell mapping should already be populated by
# this point to avoid setting it below here.
inst_mapping = objects.InstanceMapping.get_by_instance_uuid(
context, instance.uuid)
inst_mapping.cell_mapping = cell
inst_mapping.save()
# Be paranoid about artifacts being deleted underneath us.
try:
build_request.destroy()
except exception.BuildRequestNotFound:
pass
try:
request_spec.destroy()
except exception.RequestSpecNotFound:
pass
def _delete_build_request(self, context, build_request, instance, cell,
instance_bdms, instance_tags):
"""Delete a build request after creating the instance in the cell.
This method handles cleaning up the instance in case the build request
is already deleted by the time we try to delete it.
:param context: the context of the request being handled
:type context: nova.context.RequestContext
:param build_request: the build request to delete
:type build_request: nova.objects.BuildRequest
:param instance: the instance created from the build_request
:type instance: nova.objects.Instance
:param cell: the cell in which the instance was created
:type cell: nova.objects.CellMapping
:param instance_bdms: list of block device mappings for the instance
:type instance_bdms: nova.objects.BlockDeviceMappingList
:param instance_tags: list of tags for the instance
:type instance_tags: nova.objects.TagList
:returns: True if the build request was successfully deleted, False if
the build request was already deleted and the instance is now gone.
"""
try:
build_request.destroy()
except exception.BuildRequestNotFound:
# This indicates an instance deletion request has been
# processed, and the build should halt here. Clean up the
# bdm, tags and instance record.
with obj_target_cell(instance, cell) as cctxt:
with compute_utils.notify_about_instance_delete(
self.notifier, cctxt, instance,
source=fields.NotificationSource.CONDUCTOR):
try:
instance.destroy()
except exception.InstanceNotFound:
pass
except exception.ObjectActionError:
# NOTE(melwitt): Instance became scheduled during
# the destroy, "host changed". Refresh and re-destroy.
try:
instance.refresh()
instance.destroy()
except exception.InstanceNotFound:
pass
for bdm in instance_bdms:
with obj_target_cell(bdm, cell):
try:
bdm.destroy()
except exception.ObjectActionError:
pass
if instance_tags:
with try_target_cell(context, cell) as target_ctxt:
try:
objects.TagList.destroy(target_ctxt, instance.uuid)
except exception.InstanceNotFound:
pass
return False
return True
def cache_images(self, context, aggregate, image_ids):
"""Cache a set of images on the set of hosts in an aggregate.
:param context: The RequestContext
:param aggregate: The Aggregate object from the request to constrain
the host list
:param image_id: The IDs of the image to cache
"""
# TODO(mriedem): Consider including the list of images in the
# notification payload.
compute_utils.notify_about_aggregate_action(
context, aggregate,
fields.NotificationAction.IMAGE_CACHE,
fields.NotificationPhase.START)
clock = timeutils.StopWatch()
threads = CONF.image_cache.precache_concurrency
fetch_pool = eventlet.GreenPool(size=threads)
hosts_by_cell = {}
cells_by_uuid = {}
# TODO(danms): Make this a much more efficient bulk query
for hostname in aggregate.hosts:
hmap = objects.HostMapping.get_by_host(context, hostname)
cells_by_uuid.setdefault(hmap.cell_mapping.uuid, hmap.cell_mapping)
hosts_by_cell.setdefault(hmap.cell_mapping.uuid, [])
hosts_by_cell[hmap.cell_mapping.uuid].append(hostname)
LOG.info('Preparing to request pre-caching of image(s) %(image_ids)s '
'on %(hosts)i hosts across %(cells)i cells.',
{'image_ids': ','.join(image_ids),
'hosts': len(aggregate.hosts),
'cells': len(hosts_by_cell)})
clock.start()
stats = collections.defaultdict(lambda: (0, 0, 0, 0))
failed_images = collections.defaultdict(int)
down_hosts = set()
host_stats = {
'completed': 0,
'total': len(aggregate.hosts),
}
def host_completed(context, host, result):
for image_id, status in result.items():
cached, existing, error, unsupported = stats[image_id]
if status == 'error':
failed_images[image_id] += 1
error += 1
elif status == 'cached':
cached += 1
elif status == 'existing':
existing += 1
elif status == 'unsupported':
unsupported += 1
stats[image_id] = (cached, existing, error, unsupported)
host_stats['completed'] += 1
compute_utils.notify_about_aggregate_cache(context, aggregate,
host, result,
host_stats['completed'],
host_stats['total'])
def wrap_cache_images(ctxt, host, image_ids):
result = self.compute_rpcapi.cache_images(
ctxt,
host=host,
image_ids=image_ids)
host_completed(context, host, result)
def skipped_host(context, host, image_ids):
result = {image: 'skipped' for image in image_ids}
host_completed(context, host, result)
for cell_uuid, hosts in hosts_by_cell.items():
cell = cells_by_uuid[cell_uuid]
with nova_context.target_cell(context, cell) as target_ctxt:
for host in hosts:
service = objects.Service.get_by_compute_host(target_ctxt,
host)
if not self.servicegroup_api.service_is_up(service):
down_hosts.add(host)
LOG.info(
'Skipping image pre-cache request to compute '
'%(host)r because it is not up',
{'host': host})
skipped_host(target_ctxt, host, image_ids)
continue
fetch_pool.spawn_n(wrap_cache_images, target_ctxt, host,
image_ids)
# Wait until all those things finish
fetch_pool.waitall()
overall_stats = {'cached': 0, 'existing': 0, 'error': 0,
'unsupported': 0}
for cached, existing, error, unsupported in stats.values():
overall_stats['cached'] += cached
overall_stats['existing'] += existing
overall_stats['error'] += error
overall_stats['unsupported'] += unsupported
clock.stop()
LOG.info('Image pre-cache operation for image(s) %(image_ids)s '
'completed in %(time).2f seconds; '
'%(cached)i cached, %(existing)i existing, %(error)i errors, '
'%(unsupported)i unsupported, %(skipped)i skipped (down) '
'hosts',
{'image_ids': ','.join(image_ids),
'time': clock.elapsed(),
'cached': overall_stats['cached'],
'existing': overall_stats['existing'],
'error': overall_stats['error'],
'unsupported': overall_stats['unsupported'],
'skipped': len(down_hosts),
})
# Log error'd images specifically at warning level
for image_id, fails in failed_images.items():
LOG.warning('Image pre-cache operation for image %(image)s '
'failed %(fails)i times',
{'image': image_id,
'fails': fails})
compute_utils.notify_about_aggregate_action(
context, aggregate,
fields.NotificationAction.IMAGE_CACHE,
fields.NotificationPhase.END)
@targets_cell
@wrap_instance_event(prefix='conductor')
def confirm_snapshot_based_resize(self, context, instance, migration):
"""Executes the ConfirmResizeTask
:param context: nova auth request context targeted at the target cell
:param instance: Instance object in "resized" status from the target
cell
:param migration: Migration object from the target cell for the resize
operation expected to have status "confirming"
"""
task = cross_cell_migrate.ConfirmResizeTask(
context, instance, migration, self.notifier, self.compute_rpcapi)
task.execute()
@targets_cell
# NOTE(mriedem): Upon successful completion of RevertResizeTask the
# instance is hard-deleted, along with its instance action record(s), from
# the target cell database so EventReporter hits InstanceActionNotFound on
# __exit__. Pass graceful_exit=True to avoid an ugly traceback.
@wrap_instance_event(prefix='conductor', graceful_exit=True)
def revert_snapshot_based_resize(self, context, instance, migration):
"""Executes the RevertResizeTask
:param context: nova auth request context targeted at the target cell
:param instance: Instance object in "resized" status from the target
cell
:param migration: Migration object from the target cell for the resize
operation expected to have status "reverting"
"""
task = cross_cell_migrate.RevertResizeTask(
context, instance, migration, self.notifier, self.compute_rpcapi)
task.execute()
|
the-stack_0_4945 | import os
import urllib.request
import subprocess
import time
import ssl
import requests
from test_workflow.test_cluster import TestCluster, ClusterCreationException
class LocalTestCluster(TestCluster):
'''
Represents an on-box test cluster. This class downloads a bundle (from a BundleManifest) and runs it as a background process.
'''
def __init__(self, work_dir, bundle_manifest, security_enabled):
self.manifest = bundle_manifest
self.work_dir = os.path.join(work_dir, 'local-test-cluster')
os.makedirs(self.work_dir, exist_ok = True)
self.security_enabled = security_enabled
self.process = None
def create(self):
self.download()
self.stdout = open('stdout.txt', 'w')
self.stderr = open('stderr.txt', 'w')
dir = f'opensearch-{self.manifest.build.version}'
if not self.security_enabled:
self.disable_security(dir)
self.process = subprocess.Popen('./opensearch-tar-install.sh', cwd = dir, shell = True, stdout = self.stdout, stderr = self.stderr)
print(f'Started OpenSearch with PID {self.process.pid}')
self.wait_for_service()
def endpoint(self):
return 'localhost'
def port(self):
return 9200
def destroy(self):
if self.process is None:
print('Local test cluster is not started')
return
print(f'Sending SIGTERM to PID {self.process.pid}')
self.process.terminate()
try:
print('Waiting for process to terminate')
self.process.wait(10)
except TimeoutExpired:
print('Process did not terminate after 10 seconds. Sending SIGKILL')
self.process.kill()
try:
print('Waiting for process to terminate')
self.process.wait(10)
except TimeoutExpired:
print('Process failed to terminate even after SIGKILL')
raise
finally:
print(f'Process terminated with exit code {self.process.returncode}')
self.stdout.close()
self.stderr.close()
self.process = None
def url(self, path=''):
return f'{"https" if self.security_enabled else "http"}://{self.endpoint()}:{self.port()}{path}'
def download(self):
print(f'Creating local test cluster in {self.work_dir}')
os.chdir(self.work_dir)
print(f'Downloading bundle from {self.manifest.build.location}')
urllib.request.urlretrieve(self.manifest.build.location, 'bundle.tgz')
print(f'Downloaded bundle to {os.path.realpath("bundle.tgz")}')
print('Unpacking')
subprocess.check_call('tar -xzf bundle.tgz', shell = True)
print('Unpacked')
def disable_security(self, dir):
subprocess.check_call(f'echo "plugins.security.disabled: true" >> {os.path.join(dir, "config", "opensearch.yml")}', shell = True)
def wait_for_service(self):
print('Waiting for service to become available')
url = self.url('/_cluster/health')
for attempt in range(10):
try:
print(f'Pinging {url} attempt {attempt}')
response = requests.get(url, verify = False, auth = ('admin', 'admin'))
print(f'{response.status_code}: {response.text}')
if response.status_code == 200 and '"status":"green"' in response.text:
print('Cluster is green')
return
except requests.exceptions.ConnectionError:
print(f'Service not available yet')
time.sleep(10)
raise ClusterCreationException('Cluster is not green after 10 attempts')
|
the-stack_0_4947 | # Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import sys
from botocore.exceptions import ClientError
from c7n.exceptions import PolicyValidationError
from c7n.executor import MainThreadExecutor
from c7n.resources.ebs import (
CopyInstanceTags,
EncryptInstanceVolumes,
CopySnapshot,
Delete,
QueryParser
)
from .common import BaseTest, TestConfig as Config
class SnapshotQueryParse(BaseTest):
def test_query(self):
qfilters = [
{'Name': 'tag:Name', 'Values': ['Snapshot1']},
{'Name': 'status', 'Values': ['completed']}]
self.assertEqual(qfilters, QueryParser.parse(qfilters))
def test_invalid_query(self):
self.assertRaises(
PolicyValidationError, QueryParser.parse, {})
self.assertRaises(
PolicyValidationError, QueryParser.parse, [None])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [{'X': 1}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': 'completed'}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'status', 'Values': ['Completed']}])
self.assertRaises(
PolicyValidationError, QueryParser.parse, [
{'Name': 'snapshot-id', 'Values': [1]}])
class SnapshotAccessTest(BaseTest):
def test_snapshot_access(self):
# pre conditions, 2 snapshots one shared to a separate account, and one
# shared publicly. 2 non matching volumes, one not shared, one shared
# explicitly to its own account.
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_cross_account")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": ["cross-account"],
},
config=Config.empty(),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 2)
self.assertEqual(
{r["SnapshotId"]: r["c7n:CrossAccountViolations"] for r in resources},
{"snap-7f9496cf": ["619193117841"], "snap-af0eb71b": ["all"]},
)
class SnapshotDetachTest(BaseTest):
def test_volume_detach(self):
factory = self.replay_flight_data('test_ebs_detach')
p = self.load_policy(
{
'name': 'volume-detach',
'resource': 'ebs',
'filters': [{'VolumeId': 'vol-0850cf7c8e949c318'}],
'actions': [
{
'type': 'detach'
}
]
}, config=Config.empty(), session_factory=factory)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client('ec2')
volumelist = []
volumelist.append(resources[0]['VolumeId'])
response = client.describe_volumes(VolumeIds=volumelist)
for resp in response['Volumes']:
for attachment in resp['Attachments']:
self.assertTrue(attachment['State'] == "detached" or
attachment['State'] == "detaching")
class SnapshotCopyTest(BaseTest):
def test_snapshot_copy(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
self.change_environment(AWS_DEFAULT_REGION="us-west-2")
factory = self.replay_flight_data("test_ebs_snapshot_copy")
p = self.load_policy(
{
"name": "snap-copy",
"resource": "ebs-snapshot",
"filters": [{"tag:ASV": "RoadKill"}],
"actions": [
{
"type": "copy",
"target_region": "us-east-1",
"target_key": "82645407-2faa-4d93-be71-7d6a8d59a5fc",
}
],
},
Config.empty(region="us-west-2"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
client = factory(region="us-east-1").client("ec2")
tags = client.describe_tags(
Filters=[
{"Name": "resource-id", "Values": [resources[0]["c7n:CopiedSnapshot"]]}
]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in tags}
self.assertEqual(tags["ASV"], "RoadKill")
class SnapshotAmiSnapshotTest(BaseTest):
def test_snapshot_ami_snapshot_filter(self):
self.patch(CopySnapshot, "executor_factory", MainThreadExecutor)
# DEFAULT_REGION needs to be set to west for recording
factory = self.replay_flight_data("test_ebs_ami_snapshot_filter")
# first case should return only resources that are ami snapshots
p = self.load_policy(
{
"name": "ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": False}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
# second case should return resources that are NOT ami snapshots
policy = self.load_policy(
{
"name": "non-ami-snap-filter",
"resource": "ebs-snapshot",
"filters": [{"type": "skip-ami-snapshots", "value": True}],
},
session_factory=factory,
)
resources = policy.run()
self.assertEqual(len(resources), 2)
class SnapshotTrimTest(BaseTest):
def test_snapshot_trim(self):
factory = self.replay_flight_data("test_ebs_snapshot_delete")
p = self.load_policy(
{
"name": "snapshot-trim",
"resource": "ebs-snapshot",
"filters": [{"tag:InstanceId": "not-null"}],
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class AttachedInstanceTest(BaseTest):
def test_ebs_instance_filter(self):
factory = self.replay_flight_data("test_ebs_instance_filter")
p = self.load_policy(
{
"name": "attached-instance-test",
"resource": "ebs",
"filters": [
{"type": "instance", "key": "tag:Name", "value": "CompiledLambda"}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class ResizeTest(BaseTest):
def test_resize_action(self):
factory = self.replay_flight_data("test_ebs_modifyable_action")
client = factory().client("ec2")
# Change a volume from 32 gb gp2 and 100 iops (sized based) to
# 64gb and 500 iops.
vol_id = "vol-0073dcd216489ea1b"
p = self.load_policy(
{
"name": "resizable",
"resource": "ebs",
"filters": ["modifyable", {"VolumeId": vol_id}],
"actions": [
{
"type": "modify",
"volume-type": "io1",
"size-percent": 200,
"iops-percent": 500,
}
],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(resources[0]["Iops"], 100)
self.assertEqual(resources[0]["Size"], 32)
vol = client.describe_volumes(VolumeIds=[vol_id])["Volumes"][0]
self.assertEqual(vol["Iops"], 500)
self.assertEqual(vol["Size"], 64)
def test_resize_filter(self):
# precondition, 6 volumes, 4 not modifyable.
factory = self.replay_flight_data("test_ebs_modifyable_filter")
output = self.capture_logging("custodian.filters", level=logging.DEBUG)
p = self.load_policy(
{"name": "resizable", "resource": "ebs", "filters": ["modifyable"]},
session_factory=factory,
)
resources = p.run()
self.assertEqual(
{r["VolumeId"] for r in resources},
set(("vol-0073dcd216489ea1b", "vol-0e4cba7adc4764f79")),
)
# normalizing on str/unicode repr output between versions.. punt
if sys.version_info[0] > 2:
return
self.assertEqual(
output.getvalue().strip(),
(
"filtered 4 of 6 volumes due to [(u'instance-type', 2), "
"(u'vol-mutation', 1), (u'vol-type', 1)]"
),
)
class CopyInstanceTagsTest(BaseTest):
def test_copy_instance_tags(self):
# More a functional/coverage test then a unit test.
self.patch(CopyInstanceTags, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_copy_instance_tags")
volume_id = "vol-2b047792"
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy(
{
"name": "test-copy-instance-tags",
"resource": "ebs",
"actions": [{"type": "copy-instance-tags", "tags": ["Name"]}],
},
config={"region": "us-west-2"},
session_factory=factory,
)
policy.run()
results = factory().client("ec2").describe_tags(
Filters=[{"Name": "resource-id", "Values": [volume_id]}]
)[
"Tags"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags["Name"], "CompileLambda")
class VolumeSnapshotTest(BaseTest):
def test_volume_snapshot(self):
factory = self.replay_flight_data("test_ebs_snapshot")
policy = self.load_policy(
{
"name": "test-ebs-snapshot",
"resource": "ebs",
"filters": [{"VolumeId": "vol-01adbb6a4f175941d"}],
"actions": ["snapshot"],
},
session_factory=factory,
)
policy.run()
snapshot_data = factory().client("ec2").describe_snapshots(
Filters=[{"Name": "volume-id", "Values": ["vol-01adbb6a4f175941d"]}]
)
self.assertEqual(len(snapshot_data["Snapshots"]), 1)
class VolumeDeleteTest(BaseTest):
def test_volume_delete_force(self):
self.patch(Delete, "executor_factory", MainThreadExecutor)
factory = self.replay_flight_data("test_ebs_force_delete")
policy = self.load_policy(
{
"name": "test-ebs",
"resource": "ebs",
"filters": [{"VolumeId": "vol-d0790258"}],
"actions": [{"type": "delete", "force": True}],
},
session_factory=factory,
)
resources = policy.run()
try:
factory().client("ec2").describe_volumes(
VolumeIds=[resources[0]["VolumeId"]]
)
except ClientError as e:
self.assertEqual(e.response["Error"]["Code"], "InvalidVolume.NotFound")
else:
self.fail("Volume still exists")
class EncryptExtantVolumesTest(BaseTest):
def test_encrypt_volumes(self):
self.patch(EncryptInstanceVolumes, "executor_factory", MainThreadExecutor)
session_factory = self.replay_flight_data("test_encrypt_volumes")
policy = self.load_policy(
{
"name": "ebs-remediate-attached",
"resource": "ebs",
"filters": [
{"Encrypted": False}, {"VolumeId": "vol-0f53c81b92b4ecfce"}
],
"actions": [
{
"type": "encrypt-instance-volumes",
"delay": 0.001,
"key": "alias/encryptebs",
}
],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
volumes = session_factory().client("ec2").describe_volumes(
Filters=[
{
"Name": "attachment.instance-id",
"Values": [r["Attachments"][0]["InstanceId"]],
}
]
)
for v in volumes["Volumes"]:
self.assertTrue(v["Attachments"][0]["DeleteOnTermination"])
self.assertTrue(v["Encrypted"])
if "Tags" in v:
self.assertNotIn(
"maid-crypt-remediation", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-origin-volume", [i["Key"] for i in v["Tags"]]
)
self.assertNotIn(
"maid-instance-device", [i["Key"] for i in v["Tags"]]
)
class TestKmsAlias(BaseTest):
def test_ebs_kms_alias(self):
session_factory = self.replay_flight_data("test_ebs_aws_managed_kms_keys")
p = self.load_policy(
{
"name": "ebs-aws-managed-kms-keys-filters",
"resource": "ebs",
"filters": [
{
"type": "kms-alias",
"key": "AliasName",
"value": "^(alias/aws/)",
"op": "regex",
}
],
},
config={"region": "us-west-2"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-14a3cd9d")
class EbsFaultToleranceTest(BaseTest):
def test_ebs_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-fault-tolerant",
"resource": "ebs",
"filters": ["fault-tolerant"],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-c5eaa459")
def test_ebs_non_fault_tolerant(self):
session = self.replay_flight_data("test_ebs_non_fault_tolerant")
policy = self.load_policy(
{
"name": "ebs-non-fault-tolerant",
"resource": "ebs",
"filters": [{"type": "fault-tolerant", "tolerant": False}],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["VolumeId"], "vol-abdb8d37")
class PiopsMetricsFilterTest(BaseTest):
def test_ebs_metrics_percent_filter(self):
session = self.replay_flight_data("test_ebs_metrics_percent_filter")
policy = self.load_policy(
{
"name": "ebs-unused-piops",
"resource": "ebs",
"filters": [
{
"type": "metrics",
"name": "VolumeConsumedReadWriteOps",
"op": "lt",
"value": 50,
"statistics": "Maximum",
"days": 1,
"percent-attr": "Iops",
}
],
},
session_factory=session,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
class HealthEventsFilterTest(BaseTest):
def test_ebs_health_events_filter(self):
session_factory = self.replay_flight_data("test_ebs_health_events_filter")
policy = self.load_policy(
{
"name": "ebs-health-events-filter",
"resource": "ebs",
"filters": [{"type": "health-event", "types": ["AWS_EBS_VOLUME_LOST"]}],
},
session_factory=session_factory,
)
resources = policy.run()
self.assertEqual(len(resources), 1)
for r in resources:
self.assertTrue(
("c7n:HealthEvent" in r) and
("Description" in e for e in r["c7n:HealthEvent"])
)
|
the-stack_0_4949 | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import pytest
from indico.modules.users import User
pytest_plugins = 'indico.modules.rb.testing.fixtures'
@pytest.mark.parametrize('bulk_possible', (True, False))
def test_managed_rooms(monkeypatch, bulk_possible, create_user, create_room, dummy_user):
from indico.modules.rb.operations.rooms import get_managed_room_ids
monkeypatch.setattr(User, 'can_get_all_multipass_groups', bulk_possible)
users = {
'x': {'first_name': 'Regular', 'last_name': 'User'},
'y': {'first_name': 'Room', 'last_name': 'Owner'},
'z': {'first_name': 'ManyRooms', 'last_name': 'Owner'}
}
rooms = {
'a': {'verbose_name': 'Red room', 'owner': 'z'},
'b': {'verbose_name': 'Blue room', 'owner': 'y'},
'c': {'verbose_name': 'Green room', 'owner': 'y'}
}
user_map = {key: create_user(id_, **data) for id_, (key, data) in enumerate(users.iteritems(), 1)}
room_map = {}
for id_, (key, data) in enumerate(rooms.iteritems(), 1):
data['id'] = id_
data['owner'] = user_map[data['owner']]
room_map[key] = create_room(**data)
room_map['a'].update_principal(user_map['y'], full_access=True)
for key, user in user_map.iteritems():
room_ids = [room.id for room in room_map.values() if (room.owner == user_map[key] or room.can_manage(user))]
assert get_managed_room_ids(user) == set(room_ids)
|
the-stack_0_4950 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import scipy as sp
import scanpy as sc
def pearson_residuals(counts, theta, clipping=True):
'''Computes analytical residuals for NB model with a fixed theta, clipping outlier residuals to sqrt(N)'''
counts_sum0 = np.sum(counts, axis=0, keepdims=True)
counts_sum1 = np.sum(counts, axis=1, keepdims=True)
counts_sum = np.sum(counts)
#get residuals
mu = counts_sum1 @ counts_sum0 / counts_sum
z = (counts - mu) / np.sqrt(mu + mu**2/theta)
#clip to sqrt(n)
if clipping:
n = counts.shape[0]
z[z > np.sqrt(n)] = np.sqrt(n)
z[z < -np.sqrt(n)] = -np.sqrt(n)
return z
def read_dataset(adata, transpose=False, copy=False):
if isinstance(adata, sc.AnnData):
if copy:
adata = adata.copy()
elif isinstance(adata, str):
adata = sc.read(adata)
else:
raise NotImplementedError
norm_error = 'Make sure that the dataset (adata.X) contains unnormalized count data.'
assert 'n_count' not in adata.obs, norm_error
if adata.X.size < 50e6: # check if adata.X is integer only if array is small
if sp.sparse.issparse(adata.X):
assert (adata.X.astype(int) != adata.X).nnz == 0, norm_error
else:
assert np.all(adata.X.astype(int) == adata.X), norm_error
if transpose: adata = adata.transpose()
print('### Autoencoder: Successfully preprocessed {} genes and {} cells.'.format(adata.n_vars, adata.n_obs))
return adata
def normalize_training(adata, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
if size_factors or normalize_input or logtrans_input:
adata.raw = adata.copy()
else:
adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / np.median(adata.obs.n_counts)
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
sc.pp.scale(adata)
return adata
def normalize_testing(adata, training_median_n_counts, training_mean, training_std, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
if size_factors or normalize_input or logtrans_input:
adata.raw = adata.copy()
else:
adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / training_median_n_counts
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
adata.X = (adata.X - np.array(training_mean)) / np.array(training_std)
return adata
|
the-stack_0_4952 | import contextlib
import io
from elftools.elf.elffile import ELFFile
from elftools.dwarf.die import DIE
from elftools.dwarf.die import AttributeValue
from elftools.dwarf.descriptions import describe_DWARF_expr, set_global_machine_arch
from elftools.dwarf.locationlists import LocationEntry, LocationExpr, LocationParser
class DebugInfo:
def debug_info(self, show=None, **kwargs):
"""Print a summary of the debugging info for the compiled code.
This is the data that debuggers use to make debugging a program
comprehensible. It includes variable and function names, types, file
names, line numbers, etc.
Currently only `DWARF4 <https://dwarfstd.org/doc/DWARF4.pdf>`_ is
supported, which is the standard on Linux systems.
In order for debugging information to present, the code must be
compiled with :code:`-g`.
Args:
show: What to show -- a function name. Defaults to ``None`` which will display all the debugging info.
Returns:
:code:`str`: String rendering the DWARF data for the file or function. This can be very long.
"""
self.show = show
with self.DWARFInfo() as dwarfinfo:
if dwarfinfo is None:
return f"No debugging data in {self.lib}"
for CU in dwarfinfo.iter_CUs():
top_DIE = CU.get_top_DIE()
return DWARFRenderer(top_DIE, show).render()
return f"No Compilation units in {self.lib}."
def stack_frame(self, show, **kwargs):
"""Print the stack frame layout for a function.
This returns a description of where each variable and argument
resides on the stack or in registers.
For instance:
.. doctest::
>>> from cfiddle import *
>>> sample = code(r'''
... extern "C"
... int foo(int a) {
... register int sum = 0;
... for(int i = 0; i < 10; i++) {
... sum += i;
... }
... return sum;
... }
... ''')
>>> stack_frame = build(sample)[0].stack_frame("foo")
>>> print(stack_frame) # doctest: +SKIP
function foo
a: (DW_OP_fbreg: -44)
sum: (DW_OP_reg3 (rbx))
i: (DW_OP_fbreg: -28)
The format is potentially complicated (the DWARF format is Turing
complelete!), but most entries are easy to understand.
The example above shows that :code:`a` is store at -44 bytes relative
to the frame base register and :code:`sum` is a register.
This is a work in progress. Here's the `Dwarf4 spec
<https://dwarfstd.org/doc/DWARF4.pdf>`_ and the source code for
`pyelftools <https://github.com/eliben/pyelftools>`_, which is reasonably well documented.
Pull requests welcome :-).
Args:
show: Function to extract the frame layout from.
Returns:
:code:`str`: A description of the layout
"""
output = io.StringIO()
current_function = None
self._set_machine_architecture()
def emit(s):
if current_function == show:
output.write(s)
with self.DWARFInfo() as dwarfinfo:
loc_parser = self._build_location_parser(dwarfinfo)
for CU in dwarfinfo.iter_CUs():
for DIE in CU.iter_DIEs():
if DIE.tag == "DW_TAG_subprogram":
current_function = self._extract_name(DIE)
emit(self._render_function_name(DIE))
elif DIE.tag in ["DW_TAG_formal_parameter", "DW_TAG_variable"]:
if current_function == show:
emit(self._render_variable_location(DIE, CU, dwarfinfo, loc_parser))
return output.getvalue()
def _render_variable_location(self, DIE, CU, dwarfinfo, loc_parser):
if "DW_AT_name" in DIE.attributes:
name = DIE.attributes['DW_AT_name'].value.decode()
else:
name = "<unnamed>"
if "DW_AT_location" not in DIE.attributes:
return f"{name} has no location\n"
else:
loc = loc_parser.parse_from_attribute(DIE.attributes["DW_AT_location"], CU['version'])
if isinstance(loc, LocationExpr):
offset = describe_DWARF_expr(loc.loc_expr, dwarfinfo.structs, CU.cu_offset)
return f" {name}: {offset}\n"
else:
return f" {name}: <not a location>\n"
def _set_machine_architecture(self):
with self.ELFFile() as elffile: # This is required for the descriptions module to correctly decode
# register names contained in DWARF expressions.
set_global_machine_arch(elffile.get_machine_arch())
def _render_function_name(self, DIE):
n = self._extract_name(DIE)
if n is None:
return f"function <anon>\n"
else:
return f"function {n}\n"
def _extract_name(self, DIE):
if "DW_AT_name" in DIE.attributes:
return DIE.attributes['DW_AT_name'].value.decode()
else:
return None
def _build_location_parser(self, dwarfinfo):
location_lists = dwarfinfo.location_lists()
return LocationParser(location_lists)
@contextlib.contextmanager
def DWARFInfo(self):
"""Context manager for the raw :code:`DWARFInfo` object for the compiled code.
Returns:
:code:`DWARFInfo`: :code:`DWARFInfo` object created by `pyelftools <https://github.com/eliben/pyelftools>`_.
"""
try:
with self.ELFFile() as elffile:
if not elffile.has_dwarf_info():
yield None
else:
# we need to yield because the elftools hasn't finished parsing yet
yield elffile.get_dwarf_info()
finally:
pass
@contextlib.contextmanager
def ELFFile(self):
"""Context manager for the raw :code:`ELFFile` object for the compiled code.
Returns:
:code:`ELFFile`: :code:`ELFFile` object created by `pyelftools <https://github.com/eliben/pyelftools>`_.
"""
try:
with open(self.lib, 'rb') as f:
yield ELFFile(f)
finally:
pass
class DWARFRenderer:
def __init__(self, die, show):
self.root = die
self.show = show
if self.show is None:
self.printing = 1
else:
self.printing = 0
self.output = io.StringIO()
self.indent = 0
def render(self):
self._die_info_rec(self.root)
return self.output.getvalue()
def _die_info_rec(self, die):
printing_increment = 0
if die.tag == "DW_TAG_subprogram":
if self.show == self._get_die_name(die):
printing_increment = 1
self.printing += printing_increment
self._output_element(die)
self._push_indent()
for key, attribute in die.attributes.items():
self._output_element(attribute)
for child in die.iter_children():
self._die_info_rec(child)
self._pop_indent()
self.printing -= printing_increment
def _get_die_name(self, die):
if "DW_AT_name" in die.attributes:
return die.attributes["DW_AT_name"].value.decode()
else:
return "<unknown name>"
def _push_indent(self):
self.indent += 1
def _pop_indent(self):
self.indent -= 1
def _output_element(self, e):
if self.printing > 0:
indent = " " * self.indent
self.output.write(f"[{e.offset:4}] {indent}{self._render_element(e)}\n")
def _render_element(self, e):
if isinstance(e, AttributeValue) :
return f"{e.name} = {e.value}"
elif isinstance(e, DIE) :
return f"{e.tag}"
|
the-stack_0_4954 | from __future__ import absolute_import, division, print_function
# DIALS version numbers are constructed from
# 1. a common prefix
__dials_version_format = "DIALS %s"
# 2. the most recent annotated git tag (or failing that: a default string)
__dials_version_default = "2.dev"
# 3. a dash followed by the number of commits since that tag
# 4. a dash followed by a lowercase 'g' and the current commit id
def get_git_version(dials_path, treat_merges_as_single_commit=False):
import os
import subprocess
version = None
with open(os.devnull, "w") as devnull:
# Obtain name of the current branch. If this fails then the other commands will probably also fail
branch = subprocess.check_output(
["git", "describe", "--contains", "--all", "HEAD"],
cwd=dials_path,
stderr=devnull,
).rstrip()
releasebranch = "dials-2" in branch
# Always treat merges as single commit on release branches
if releasebranch:
treat_merges_as_single_commit = True
# Get descriptive version string, eg. v1.1.0-1-g56f9cd7
if treat_merges_as_single_commit:
try:
# Get a 'correct' depth, which should be the shortest path to the most recent tag
version = subprocess.check_output(
["git", "describe", "--long", "--first-parent"],
cwd=dials_path,
stderr=devnull,
).rstrip()
except Exception:
pass # This is not supported on older git versions < 1.8.4.
if version is None:
# Find the most recent tag
version = subprocess.check_output(
["git", "describe", "--long"], cwd=dials_path, stderr=devnull
).rstrip()
if treat_merges_as_single_commit:
tag = version[: version.rindex("-", 0, version.rindex("-"))]
commit = version[version.rindex("-") + 1 :] # 'gxxxxxxx'
# Now find the first-parent-path
depth = subprocess.check_output(
["git", "rev-list", "%s..HEAD" % tag, "--first-parent"],
cwd=dials_path,
stderr=devnull,
).rstrip()
if depth:
depth = depth.strip().count("\n") + 1
else:
depth = 0
version = "%s-%d-%s" % (tag, depth, commit)
# Turn descriptive version string into proper version number
if version[0] == "v":
version = version[1:].replace(".0-", "-")
version = version.replace("-", ".", 1)
# If we are on a release branch, then append a '-release'-tag
if releasebranch:
version = version + "-release"
return version
# When run from a development installation the version information is extracted
# from the git repository. Otherwise it is read from the file '.gitversion' in the
# DIALS module directory.
def dials_version():
"""Try to obtain the current git revision number
and store a copy in .gitversion"""
version = None
try:
import os
dials_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
version_file = os.path.join(dials_path, ".gitversion")
# 1. Try to access information in .git directory
# Regenerate .gitversion if possible
if not os.environ.get("DIALS_SKIP_GIT_VERSIONING") and os.path.exists(
os.path.join(dials_path, ".git")
):
try:
version = get_git_version(dials_path)
with open(version_file, "w") as gv:
gv.write(version)
except Exception:
if version == "":
version = None
# 2. If .git directory missing or 'git describe' failed, read .gitversion
if (version is None) and os.path.exists(version_file):
with open(version_file, "r") as gv:
version = gv.read().rstrip()
except Exception:
pass
if version is None:
version = __dials_version_format % __dials_version_default
else:
version = __dials_version_format % version
return version
|
the-stack_0_4955 | from base import api
from .helpers import TestsDatasets
from .helpers import LibraryPopulator
from .helpers import wait_on_state
class LibrariesApiTestCase( api.ApiTestCase, TestsDatasets ):
def setUp( self ):
super( LibrariesApiTestCase, self ).setUp()
self.library_populator = LibraryPopulator( self )
def test_create( self ):
data = dict( name="CreateTestLibrary" )
create_response = self._post( "libraries", data=data, admin=True )
self._assert_status_code_is( create_response, 200 )
library = create_response.json()
self._assert_has_keys( library, "name" )
assert library[ "name" ] == "CreateTestLibrary"
def test_create_private_library_permissions( self ):
library = self.library_populator.new_library( "PermissionTestLibrary" )
library_id = library[ "id" ]
role_id = self.library_populator.user_private_role_id()
self.library_populator.set_permissions( library_id, role_id )
create_response = self._create_folder( library )
self._assert_status_code_is( create_response, 200 )
def test_create_dataset( self ):
library = self.library_populator.new_private_library( "ForCreateDatasets" )
payload, files = self.library_populator.create_dataset_request( library, file_type="txt", contents="create_test" )
create_response = self._post( "libraries/%s/contents" % library[ "id" ], payload, files=files )
self._assert_status_code_is( create_response, 200 )
library_datasets = create_response.json()
assert len( library_datasets ) == 1
library_dataset = library_datasets[ 0 ]
def show():
return self._get( "libraries/%s/contents/%s" % ( library[ "id" ], library_dataset[ "id" ] ) )
wait_on_state( show, assert_ok=True )
library_dataset = show().json()
self._assert_has_keys( library_dataset, "peek", "data_type" )
assert library_dataset[ "peek" ].find("create_test") >= 0
assert library_dataset[ "file_ext" ] == "txt", library_dataset[ "file_ext" ]
def _create_folder( self, library ):
create_data = dict(
folder_id=library[ "root_folder_id" ],
create_type="folder",
name="New Folder",
)
return self._post( "libraries/%s/contents" % library[ "id" ], data=create_data )
|
the-stack_0_4960 | import pandas as pd
import numpy as np
from typing import Dict, Any, Union, Tuple, AnyStr
from sklearn import datasets, metrics, model_selection
from sklearn.model_selection import train_test_split, cross_val_score, cross_validate
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import mlflow
import mlflow.sklearn
import hyperopt
from hyperopt.pyll.base import scope
from hyperopt import Trials, hp
from modeler import Modeler
import click
def regression_metrics(actual: pd.Series,
pred: pd.Series) -> Dict:
"""Return a collection of regression metrics as a Series.
Args:
actual: series of actual/true values
pred: series of predicted values
Returns:
Series with the following values in a labeled index:
MAE, RMSE
"""
return {
"ACCURACY": accuracy_score(actual,pred),
"F1": metrics.f1_score(actual,pred)}
def fit_and_log_cv(model,
x_train: Union[pd.DataFrame, np.array],
y_train: Union[pd.Series, np.array],
x_test: Union[pd.DataFrame, np.array],
y_test: Union[pd.Series, np.array],
params: Dict[str, Any],
nested: bool = False) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Fit a model and log it along with train/CV metrics.
Args:
x_train: feature matrix for training/CV data
y_train: label array for training/CV data
x_test: feature matrix for test data
y_test: label array for test data
nested: if true, mlflow run will be started as child
of existing parent
"""
with mlflow.start_run(nested=nested) as run:
# Fit CV models; extract predictions and metrics
print(type(params))
print(params)
model_cv = model(**params)
y_pred_cv = model_selection.cross_val_predict(model_cv, x_train, y_train)
metrics_cv = {
f"val_{metric}": value
for metric, value in regression_metrics(y_train, y_pred_cv).items()}
# Fit and log full training sample model; extract predictions and metrics
mlflow.sklearn.autolog()
model = model(**params)
model.fit(x_train, y_train)
y_pred_test = model.predict(x_test)
metrics_test = {
f"test_{metric}": value
for metric, value in regression_metrics(y_test, y_pred_test).items()}
metrics = {**metrics_test, **metrics_cv}
mlflow.log_metrics(metrics)
mlflow.sklearn.log_model(model, "model")
return metrics
def build_train_objective(model,
x_train: Union[pd.DataFrame, np.array],
y_train: Union[pd.Series, np.array],
x_test: Union[pd.DataFrame, np.array],
y_test: Union[pd.Series, np.array],
metric: str):
"""Build optimization objective function fits and evaluates model.
Args:
x_train: feature matrix for training/CV data
y_train: label array for training/CV data
x_test: feature matrix for test data
y_test: label array for test data
metric: name of metric to be optimized
Returns:
Optimization function set up to take parameter dict from Hyperopt.
"""
def train_func(params):
"""Train a model and return loss metric."""
metrics = fit_and_log_cv(model,
x_train, y_train, x_test, y_test, params, nested=True)
return {'status': hyperopt.STATUS_OK, 'loss': -metrics[metric]}
return train_func
def log_best(run: mlflow.entities.Run,
metric: str) -> None:
"""Log the best parameters from optimization to the parent experiment.
Args:
run: current run to log metrics
metric: name of metric to select best and log
"""
client = mlflow.tracking.MlflowClient()
runs = client.search_runs(
[run.info.experiment_id],
"tags.mlflow.parentRunId = '{run_id}' ".format(run_id=run.info.run_id))
best_run = min(runs, key=lambda run: -run.data.metrics[metric])
mlflow.set_tag("best_run", best_run.info.run_id)
mlflow.log_metric(f"best_{metric}", best_run.data.metrics[metric])
##############################################################################
@click.command()
@click.option('--name', type=str, default='')
@click.option('--maxeval', type=int, default=10)
@click.option('--metric', type=str, default='val_F1')
def main(name,maxeval,metric):
"""Triggers experiment looping through ML algorithms
Args:
name: name of experiment
maxeval: maximum number of evaluation
metric: name of metric to minimize cost function
"""
mlflow.set_experiment(name)
MAX_EVALS = maxeval
METRIC = metric
space = [{
'max_depth': hp.choice('max_depth', range(1,20)),
'max_features': hp.choice('max_features', range(1,26)),
'n_estimators': hp.choice('n_estimators', range(100,500)),
'criterion': hp.choice('criterion', ["gini", "entropy"])},
{'var_smoothing':hp.uniform('var_smoothing', 0.000000001,0.000001)}]
X_train, X_test, y_train, y_test = Modeler().prepro()
for index, algo in enumerate([RandomForestClassifier,GaussianNB]):
with mlflow.start_run(run_name=str(algo)) as run:
trials = Trials()
train_objective = build_train_objective(algo,X_train, y_train, X_test, y_test, METRIC)
hyperopt.fmin(fn=train_objective,
space=space[index],
algo=hyperopt.tpe.suggest,
max_evals=MAX_EVALS,
trials=trials)
log_best(run, METRIC)
# search_run_id = run.info.run_id
# experiment_id = run.info.experiment_id
mlflow.end_run()
if __name__ == '__main__':
main() |
the-stack_0_4961 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SecurityRuleAssociations(Model):
"""All security rules associated with the network interface.
:param network_interface_association:
:type network_interface_association: :class:`NetworkInterfaceAssociation
<azure.mgmt.network.models.NetworkInterfaceAssociation>`
:param subnet_association:
:type subnet_association: :class:`SubnetAssociation
<azure.mgmt.network.models.SubnetAssociation>`
:param default_security_rules: Collection of default security rules of the
network security group.
:type default_security_rules: list of :class:`SecurityRule
<azure.mgmt.network.models.SecurityRule>`
:param effective_security_rules: Collection of effective security rules.
:type effective_security_rules: list of
:class:`EffectiveNetworkSecurityRule
<azure.mgmt.network.models.EffectiveNetworkSecurityRule>`
"""
_attribute_map = {
'network_interface_association': {'key': 'networkInterfaceAssociation', 'type': 'NetworkInterfaceAssociation'},
'subnet_association': {'key': 'subnetAssociation', 'type': 'SubnetAssociation'},
'default_security_rules': {'key': 'defaultSecurityRules', 'type': '[SecurityRule]'},
'effective_security_rules': {'key': 'effectiveSecurityRules', 'type': '[EffectiveNetworkSecurityRule]'},
}
def __init__(self, network_interface_association=None, subnet_association=None, default_security_rules=None, effective_security_rules=None):
self.network_interface_association = network_interface_association
self.subnet_association = subnet_association
self.default_security_rules = default_security_rules
self.effective_security_rules = effective_security_rules
|
the-stack_0_4962 | import argparse
import math
from urllib.request import urlopen
import sys
import os
import json
import subprocess
import glob
from braceexpand import braceexpand
from types import SimpleNamespace
import os.path
from omegaconf import OmegaConf
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
torch.backends.cudnn.benchmark = False # NR: True is a bit faster, but can lead to OOM. False is more deterministic.
#torch.use_deterministic_algorithms(True) # NR: grid_sampler_2d_backward_cuda does not have a deterministic implementation
from torch_optimizer import DiffGrad, AdamP, RAdam
from perlin_numpy import generate_fractal_noise_2d
from CLIP import clip
import kornia
import kornia.augmentation as K
import numpy as np
import imageio
import re
import random
from einops import rearrange
from PIL import ImageFile, Image, PngImagePlugin
ImageFile.LOAD_TRUNCATED_IMAGES = True
# or 'border'
global_padding_mode = 'reflection'
global_aspect_width = 1
global_spot_file = None
from util import map_number, palette_from_string, real_glob
from vqgan import VqganDrawer
class_table = {
"vqgan": VqganDrawer
}
try:
from clipdrawer import ClipDrawer
from pixeldrawer import PixelDrawer
from linedrawer import LineDrawer
# update class_table if these import OK
class_table.update({
"line_sketch": LineDrawer,
"pixel": PixelDrawer,
"clipdraw": ClipDrawer
})
except ImportError:
# diffvg is not strictly required
pass
try:
import matplotlib.colors
except ImportError:
# only needed for palette stuff
pass
# this is enabled when not in the master branch
# print("warning: running unreleased future version")
# https://stackoverflow.com/a/39662359
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'Shell':
return True # Seems to be what co-lab does
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
IS_NOTEBOOK = isnotebook()
if IS_NOTEBOOK:
from IPython import display
from tqdm.notebook import tqdm
from IPython.display import clear_output
else:
from tqdm import tqdm
# Functions and classes
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
# NR: Testing with different intital images
def old_random_noise_image(w,h):
random_image = Image.fromarray(np.random.randint(0,255,(w,h,3),dtype=np.dtype('uint8')))
return random_image
def NormalizeData(data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
# https://stats.stackexchange.com/a/289477
def contrast_noise(n):
n = 0.9998 * n + 0.0001
n1 = (n / (1-n))
n2 = np.power(n1, -2)
n3 = 1 / (1 + n2)
return n3
def random_noise_image(w,h):
# scale up roughly as power of 2
if (w>1024 or h>1024):
side, octp = 2048, 7
elif (w>512 or h>512):
side, octp = 1024, 6
elif (w>256 or h>256):
side, octp = 512, 5
else:
side, octp = 256, 4
nr = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
ng = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
nb = NormalizeData(generate_fractal_noise_2d((side, side), (32, 32), octp))
stack = np.dstack((contrast_noise(nr),contrast_noise(ng),contrast_noise(nb)))
substack = stack[:h, :w, :]
im = Image.fromarray((255.9 * stack).astype('uint8'))
return im
# testing
def gradient_2d(start, stop, width, height, is_horizontal):
if is_horizontal:
return np.tile(np.linspace(start, stop, width), (height, 1))
else:
return np.tile(np.linspace(start, stop, height), (width, 1)).T
def gradient_3d(width, height, start_list, stop_list, is_horizontal_list):
result = np.zeros((height, width, len(start_list)), dtype=float)
for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):
result[:, :, i] = gradient_2d(start, stop, width, height, is_horizontal)
return result
def random_gradient_image(w,h):
array = gradient_3d(w, h, (0, 0, np.random.randint(0,255)), (np.random.randint(1,255), np.random.randint(2,255), np.random.randint(3,128)), (True, False, False))
random_image = Image.fromarray(np.uint8(array))
return random_image
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
replace_grad = ReplaceGrad.apply
def spherical_dist_loss(x, y):
x = F.normalize(x, dim=-1)
y = F.normalize(y, dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
class Prompt(nn.Module):
def __init__(self, embed, weight=1., stop=float('-inf')):
super().__init__()
self.register_buffer('embed', embed)
self.register_buffer('weight', torch.as_tensor(weight))
self.register_buffer('stop', torch.as_tensor(stop))
def forward(self, input):
input_normed = F.normalize(input.unsqueeze(1), dim=2)
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
dists = dists * self.weight.sign()
return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()
def parse_prompt(prompt):
vals = prompt.rsplit(':', 2)
vals = vals + ['', '1', '-inf'][len(vals):]
# print(f"parsed vals is {vals}")
return vals[0], float(vals[1]), float(vals[2])
from typing import cast, Dict, List, Optional, Tuple, Union
# override class to get padding_mode
class MyRandomPerspective(K.RandomPerspective):
def apply_transform(
self, input: torch.Tensor, params: Dict[str, torch.Tensor], transform: Optional[torch.Tensor] = None
) -> torch.Tensor:
_, _, height, width = input.shape
transform = cast(torch.Tensor, transform)
return kornia.geometry.warp_perspective(
input, transform, (height, width),
mode=self.resample.name.lower(), align_corners=self.align_corners, padding_mode=global_padding_mode
)
cached_spot_indexes = {}
def fetch_spot_indexes(sideX, sideY):
global global_spot_file
# make sure image is loaded if we need it
cache_key = (sideX, sideY)
if cache_key not in cached_spot_indexes:
if global_spot_file is not None:
mask_image = Image.open(global_spot_file)
elif global_aspect_width != 1:
mask_image = Image.open("inputs/spot_wide.png")
else:
mask_image = Image.open("inputs/spot_square.png")
# this is a one channel mask
mask_image = mask_image.convert('RGB')
mask_image = mask_image.resize((sideX, sideY), Image.LANCZOS)
mask_image_tensor = TF.to_tensor(mask_image)
# print("ONE CHANNEL ", mask_image_tensor.shape)
mask_indexes = mask_image_tensor.ge(0.5).to(device)
# print("GE ", mask_indexes.shape)
# sys.exit(0)
mask_indexes_off = mask_image_tensor.lt(0.5).to(device)
cached_spot_indexes[cache_key] = [mask_indexes, mask_indexes_off]
return cached_spot_indexes[cache_key]
# n = torch.ones((3,5,5))
# f = generate.fetch_spot_indexes(5, 5)
# f[0].shape = [60,3]
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
global global_aspect_width
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cutn_zoom = int(2*cutn/3)
self.cut_pow = cut_pow
self.transforms = None
augmentations = []
if global_aspect_width != 1:
augmentations.append(K.RandomCrop(size=(self.cut_size,self.cut_size), p=1.0, cropping_mode="resample", return_transform=True))
augmentations.append(MyRandomPerspective(distortion_scale=0.40, p=0.7, return_transform=True))
augmentations.append(K.RandomResizedCrop(size=(self.cut_size,self.cut_size), scale=(0.1,0.75), ratio=(0.85,1.2), cropping_mode='resample', p=0.7, return_transform=True))
augmentations.append(K.ColorJitter(hue=0.1, saturation=0.1, p=0.8, return_transform=True))
self.augs_zoom = nn.Sequential(*augmentations)
augmentations = []
if global_aspect_width == 1:
n_s = 0.95
n_t = (1-n_s)/2
augmentations.append(K.RandomAffine(degrees=0, translate=(n_t, n_t), scale=(n_s, n_s), p=1.0, return_transform=True))
elif global_aspect_width > 1:
n_s = 1/global_aspect_width
n_t = (1-n_s)/2
augmentations.append(K.RandomAffine(degrees=0, translate=(0, n_t), scale=(0.9*n_s, n_s), p=1.0, return_transform=True))
else:
n_s = global_aspect_width
n_t = (1-n_s)/2
augmentations.append(K.RandomAffine(degrees=0, translate=(n_t, 0), scale=(0.9*n_s, n_s), p=1.0, return_transform=True))
# augmentations.append(K.CenterCrop(size=(self.cut_size,self.cut_size), p=1.0, cropping_mode="resample", return_transform=True))
augmentations.append(K.CenterCrop(size=self.cut_size, cropping_mode='resample', p=1.0, return_transform=True))
augmentations.append(K.RandomPerspective(distortion_scale=0.20, p=0.7, return_transform=True))
augmentations.append(K.ColorJitter(hue=0.1, saturation=0.1, p=0.8, return_transform=True))
self.augs_wide = nn.Sequential(*augmentations)
self.noise_fac = 0.1
# Pooling
self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))
self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))
def forward(self, input, spot=None):
global global_aspect_width, cur_iteration
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
mask_indexes = None
if spot is not None:
spot_indexes = fetch_spot_indexes(self.cut_size, self.cut_size)
if spot == 0:
mask_indexes = spot_indexes[1]
else:
mask_indexes = spot_indexes[0]
# print("Mask indexes ", mask_indexes)
for _ in range(self.cutn):
# Pooling
cutout = (self.av_pool(input) + self.max_pool(input))/2
if mask_indexes is not None:
cutout[0][mask_indexes] = 0.0 # 0.5
if global_aspect_width != 1:
if global_aspect_width > 1:
cutout = kornia.geometry.transform.rescale(cutout, (1, global_aspect_width))
else:
cutout = kornia.geometry.transform.rescale(cutout, (1/global_aspect_width, 1))
# if cur_iteration % 50 == 0 and _ == 0:
# print(cutout.shape)
# TF.to_pil_image(cutout[0].cpu()).save(f"cutout_im_{cur_iteration:02d}_{spot}.png")
cutouts.append(cutout)
if self.transforms is not None:
# print("Cached transforms available")
batch1 = kornia.geometry.transform.warp_perspective(torch.cat(cutouts[:self.cutn_zoom], dim=0), self.transforms[:self.cutn_zoom],
(self.cut_size, self.cut_size), padding_mode=global_padding_mode)
batch2 = kornia.geometry.transform.warp_perspective(torch.cat(cutouts[self.cutn_zoom:], dim=0), self.transforms[self.cutn_zoom:],
(self.cut_size, self.cut_size), padding_mode='zeros')
batch = torch.cat([batch1, batch2])
# if cur_iteration < 2:
# for j in range(4):
# TF.to_pil_image(batch[j].cpu()).save(f"cached_im_{cur_iteration:02d}_{j:02d}_{spot}.png")
# j_wide = j + self.cutn_zoom
# TF.to_pil_image(batch[j_wide].cpu()).save(f"cached_im_{cur_iteration:02d}_{j_wide:02d}_{spot}.png")
else:
batch1, transforms1 = self.augs_zoom(torch.cat(cutouts[:self.cutn_zoom], dim=0))
batch2, transforms2 = self.augs_wide(torch.cat(cutouts[self.cutn_zoom:], dim=0))
# print(batch1.shape, batch2.shape)
batch = torch.cat([batch1, batch2])
# print(batch.shape)
self.transforms = torch.cat([transforms1, transforms2])
## batch, self.transforms = self.augs(torch.cat(cutouts, dim=0))
# if cur_iteration < 2:
# for j in range(4):
# TF.to_pil_image(batch[j].cpu()).save(f"live_im_{cur_iteration:02d}_{j:02d}_{spot}.png")
# j_wide = j + self.cutn_zoom
# TF.to_pil_image(batch[j_wide].cpu()).save(f"live_im_{cur_iteration:02d}_{j_wide:02d}_{spot}.png")
# print(batch.shape, self.transforms.shape)
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
def resize_image(image, out_size):
ratio = image.size[0] / image.size[1]
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
return image.resize(size, Image.LANCZOS)
def rebuild_optimisers(args):
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
global drawer
drop_divisor = 10 ** num_loss_drop
new_opts = drawer.get_opts(drop_divisor)
if new_opts == None:
# legacy
dropped_learning_rate = args.learning_rate/drop_divisor;
# print(f"Optimizing with {args.optimiser} set to {dropped_learning_rate}")
# Set the optimiser
to_optimize = [ drawer.get_z() ]
if args.optimiser == "Adam":
opt = optim.Adam(to_optimize, lr=dropped_learning_rate) # LR=0.1
elif args.optimiser == "AdamW":
opt = optim.AdamW(to_optimize, lr=dropped_learning_rate) # LR=0.2
elif args.optimiser == "Adagrad":
opt = optim.Adagrad(to_optimize, lr=dropped_learning_rate) # LR=0.5+
elif args.optimiser == "Adamax":
opt = optim.Adamax(to_optimize, lr=dropped_learning_rate) # LR=0.5+?
elif args.optimiser == "DiffGrad":
opt = DiffGrad(to_optimize, lr=dropped_learning_rate) # LR=2+?
elif args.optimiser == "AdamP":
opt = AdamP(to_optimize, lr=dropped_learning_rate) # LR=2+?
elif args.optimiser == "RAdam":
opt = RAdam(to_optimize, lr=dropped_learning_rate) # LR=2+?
new_opts = [opt]
return new_opts
def do_init(args):
global opts, perceptors, normalize, cutoutsTable, cutoutSizeTable
global z_orig, z_targets, z_labels, init_image_tensor, target_image_tensor
global gside_X, gside_Y, overlay_image_rgba
global pmsTable, pmsImageTable, pImages, device, spotPmsTable, spotOffPmsTable
global drawer
# do seed first!
if args.seed is None:
seed = torch.seed()
else:
seed = args.seed
int_seed = int(seed)%(2**30)
print('Using seed:', seed)
torch.manual_seed(seed)
np.random.seed(int_seed)
random.seed(int_seed)
# Do it (init that is)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
drawer = class_table[args.drawer](args)
drawer.load_model(args, device)
num_resolutions = drawer.get_num_resolutions()
# print("-----------> NUMR ", num_resolutions)
jit = True if float(torch.__version__[:3]) < 1.8 else False
f = 2**(num_resolutions - 1)
toksX, toksY = args.size[0] // f, args.size[1] // f
sideX, sideY = toksX * f, toksY * f
# save sideX, sideY in globals (need if using overlay)
gside_X = sideX
gside_Y = sideY
for clip_model in args.clip_models:
perceptor = clip.load(clip_model, jit=jit)[0].eval().requires_grad_(False).to(device)
perceptors[clip_model] = perceptor
cut_size = perceptor.visual.input_resolution
cutoutSizeTable[clip_model] = cut_size
if not cut_size in cutoutsTable:
make_cutouts = MakeCutouts(cut_size, args.num_cuts, cut_pow=args.cut_pow)
cutoutsTable[cut_size] = make_cutouts
init_image_tensor = None
target_image_tensor = None
# Image initialisation
if args.init_image or args.init_noise:
# setup init image wih pil
# first - always start with noise or blank
if args.init_noise == 'pixels':
img = random_noise_image(args.size[0], args.size[1])
elif args.init_noise == 'gradient':
img = random_gradient_image(args.size[0], args.size[1])
elif args.init_noise == 'snow':
img = old_random_noise_image(args.size[0], args.size[1])
else:
img = Image.new(mode="RGB", size=(args.size[0], args.size[1]), color=(255, 255, 255))
starting_image = img.convert('RGB')
starting_image = starting_image.resize((sideX, sideY), Image.LANCZOS)
if args.init_image:
# now we might overlay an init image (init_image also can be recycled as overlay)
if 'http' in args.init_image:
init_image = Image.open(urlopen(args.init_image))
else:
init_image = Image.open(args.init_image)
# this version is needed potentially for the loss function
init_image_rgb = init_image.convert('RGB')
init_image_rgb = init_image_rgb.resize((sideX, sideY), Image.LANCZOS)
init_image_tensor = TF.to_tensor(init_image_rgb)
init_image_tensor = init_image_tensor.to(device).unsqueeze(0)
# this version gets overlaid on the background (noise)
init_image_rgba = init_image.convert('RGBA')
init_image_rgba = init_image_rgba.resize((sideX, sideY), Image.LANCZOS)
top_image = init_image_rgba.copy()
if args.init_image_alpha and args.init_image_alpha >= 0:
top_image.putalpha(args.init_image_alpha)
starting_image.paste(top_image, (0, 0), top_image)
starting_image.save("starting_image.png")
starting_tensor = TF.to_tensor(starting_image)
init_tensor = starting_tensor.to(device).unsqueeze(0) * 2 - 1
drawer.init_from_tensor(init_tensor)
else:
# untested
drawer.rand_init(toksX, toksY)
if args.overlay_every:
if args.overlay_image:
if 'http' in args.overlay_image:
overlay_image = Image.open(urlopen(args.overlay_image))
else:
overlay_image = Image.open(args.overlay_image)
overlay_image_rgba = overlay_image.convert('RGBA')
overlay_image_rgba = overlay_image_rgba.resize((sideX, sideY), Image.LANCZOS)
else:
overlay_image_rgba = init_image_rgba
if args.overlay_alpha:
overlay_image_rgba.putalpha(args.overlay_alpha)
overlay_image_rgba.save('overlay_image.png')
if args.target_images is not None:
z_targets = []
filelist = real_glob(args.target_images)
for target_image in filelist:
target_image = Image.open(target_image)
target_image_rgb = target_image.convert('RGB')
target_image_rgb = target_image_rgb.resize((sideX, sideY), Image.LANCZOS)
target_image_tensor_local = TF.to_tensor(target_image_rgb)
target_image_tensor = target_image_tensor_local.to(device).unsqueeze(0) * 2 - 1
z_target = drawer.get_z_from_tensor(target_image_tensor)
z_targets.append(z_target)
if args.image_labels is not None:
z_labels = []
filelist = real_glob(args.image_labels)
cur_labels = []
for image_label in filelist:
image_label = Image.open(image_label)
image_label_rgb = image_label.convert('RGB')
image_label_rgb = image_label_rgb.resize((sideX, sideY), Image.LANCZOS)
image_label_rgb_tensor = TF.to_tensor(image_label_rgb)
image_label_rgb_tensor = image_label_rgb_tensor.to(device).unsqueeze(0) * 2 - 1
z_label = drawer.get_z_from_tensor(image_label_rgb_tensor)
cur_labels.append(z_label)
image_embeddings = torch.stack(cur_labels)
print("Processing labels: ", image_embeddings.shape)
image_embeddings /= image_embeddings.norm(dim=-1, keepdim=True)
image_embeddings = image_embeddings.mean(dim=0)
image_embeddings /= image_embeddings.norm()
z_labels.append(image_embeddings.unsqueeze(0))
z_orig = drawer.get_z_copy()
pmsTable = {}
pmsImageTable = {}
spotPmsTable = {}
spotOffPmsTable = {}
for clip_model in args.clip_models:
pmsTable[clip_model] = []
pmsImageTable[clip_model] = []
spotPmsTable[clip_model] = []
spotOffPmsTable[clip_model] = []
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
# CLIP tokenize/encode
# NR: Weights / blending
for prompt in args.prompts:
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for vect_prompt in args.vector_prompts:
f1, weight, stop = parse_prompt(vect_prompt)
# vect_promts are by nature tuned to 10% of a normal prompt
weight = 0.1 * weight
if 'http' in f1:
# note: this is currently untested...
infile = urlopen(f1)
elif 'json' in f1:
infile = f1
else:
infile = f"vectors/{f1}.json"
if not os.path.exists(infile):
infile = f"pixray/vectors/{f1}.json"
with open(infile) as f_in:
vect_table = json.load(f_in)
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
v = np.array(vect_table[clip_model])
embed = torch.FloatTensor(v).to(device).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.spot_prompts:
for clip_model in args.clip_models:
pMs = spotPmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.spot_prompts_off:
for clip_model in args.clip_models:
pMs = spotOffPmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for label in args.labels:
for clip_model in args.clip_models:
pMs = pmsTable[clip_model]
perceptor = perceptors[clip_model]
txt, weight, stop = parse_prompt(label)
texts = [template.format(txt) for template in imagenet_templates] #format with class
print(f"Tokenizing all of {texts}")
texts = clip.tokenize(texts).to(device) #tokenize
class_embeddings = perceptor.encode_text(texts) #embed with text encoder
class_embeddings /= class_embeddings.norm(dim=-1, keepdim=True)
class_embedding = class_embeddings.mean(dim=0)
class_embedding /= class_embedding.norm()
pMs.append(Prompt(class_embedding.unsqueeze(0), weight, stop).to(device))
for clip_model in args.clip_models:
pImages = pmsImageTable[clip_model]
for path in args.image_prompts:
img = Image.open(path)
pil_image = img.convert('RGB')
img = resize_image(pil_image, (sideX, sideY))
pImages.append(TF.to_tensor(img).unsqueeze(0).to(device))
for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):
gen = torch.Generator().manual_seed(seed)
embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)
pMs.append(Prompt(embed, weight).to(device))
opts = rebuild_optimisers(args)
# Output for the user
print('Using device:', device)
print('Optimising using:', args.optimiser)
if args.prompts:
print('Using text prompts:', args.prompts)
if args.spot_prompts:
print('Using spot prompts:', args.spot_prompts)
if args.spot_prompts_off:
print('Using spot off prompts:', args.spot_prompts_off)
if args.image_prompts:
print('Using #image prompts:', len(args.image_prompts))
if args.init_image:
print('Using initial image:', args.init_image)
if args.noise_prompt_weights:
print('Noise prompt weights:', args.noise_prompt_weights)
# dreaded globals (for now)
z_orig = None
z_targets = None
z_labels = None
opts = None
drawer = None
perceptors = {}
normalize = None
cutoutsTable = {}
cutoutSizeTable = {}
init_image_tensor = None
target_image_tensor = None
pmsTable = None
spotPmsTable = None
spotOffPmsTable = None
pmsImageTable = None
gside_X=None
gside_Y=None
overlay_image_rgba=None
device=None
cur_iteration=None
cur_anim_index=None
anim_output_files=[]
anim_cur_zs=[]
anim_next_zs=[]
best_loss = None
best_iter = None
best_z = None
num_loss_drop = 0
max_loss_drops = 2
iter_drop_delay = 20
def make_gif(args, iter):
gif_output = os.path.join(args.animation_dir, "anim.gif")
if os.path.exists(gif_output):
os.remove(gif_output)
cmd = ['ffmpeg', '-framerate', '10', '-pattern_type', 'glob',
'-i', f"{args.animation_dir}/*.png", '-loop', '0', gif_output]
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as cpe:
output = cpe.output
print("Ignoring non-zero exit: ", output)
return gif_output
# !ffmpeg \
# -framerate 10 -pattern_type glob \
# -i '{animation_output}/*_*.png' \
# -loop 0 {animation_output}/final.gif
@torch.no_grad()
def checkdrop(args, iter, losses):
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
global drawer
drop_loss_time = False
loss_sum = sum(losses)
is_new_best = False
num_cycles_not_best = 0
if (loss_sum < best_loss):
is_new_best = True
best_loss = loss_sum
best_iter = iter
best_z = drawer.get_z_copy()
else:
num_cycles_not_best = iter - best_iter
if num_cycles_not_best >= iter_drop_delay:
drop_loss_time = True
return drop_loss_time
@torch.no_grad()
def checkin(args, iter, losses):
global drawer
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
num_cycles_not_best = iter - best_iter
if losses is not None:
losses_str = ', '.join(f'{loss.item():2.3g}' for loss in losses)
writestr = f'iter: {iter}, loss: {sum(losses).item():1.3g}, losses: {losses_str}'
else:
writestr = f'iter: {iter}, finished'
if args.animation_dir is not None:
writestr = f'anim: {cur_anim_index}/{len(anim_output_files)} {writestr}'
else:
writestr = f'{writestr} (-{num_cycles_not_best}=>{best_loss:2.4g})'
info = PngImagePlugin.PngInfo()
info.add_text('comment', f'{args.prompts}')
timg = drawer.synth(cur_iteration)
img = TF.to_pil_image(timg[0].cpu())
# img = drawer.to_image()
if cur_anim_index is None:
outfile = args.output
else:
outfile = anim_output_files[cur_anim_index]
img.save(outfile, pnginfo=info)
if cur_anim_index == len(anim_output_files) - 1:
# save gif
gif_output = make_gif(args, iter)
if IS_NOTEBOOK and iter % args.display_every == 0:
clear_output()
display.display(display.Image(open(gif_output,'rb').read()))
if IS_NOTEBOOK and iter % args.display_every == 0:
if cur_anim_index is None or iter == 0:
if args.display_clear:
clear_output()
display.display(display.Image(outfile))
tqdm.write(writestr)
def ascend_txt(args):
global cur_iteration, cur_anim_index, perceptors, normalize, cutoutsTable, cutoutSizeTable
global z_orig, z_targets, z_labels, init_image_tensor, target_image_tensor, drawer
global pmsTable, pmsImageTable, spotPmsTable, spotOffPmsTable, global_padding_mode
out = drawer.synth(cur_iteration);
result = []
if (cur_iteration%2 == 0):
global_padding_mode = 'reflection'
else:
global_padding_mode = 'border'
cur_cutouts = {}
cur_spot_cutouts = {}
cur_spot_off_cutouts = {}
for cutoutSize in cutoutsTable:
make_cutouts = cutoutsTable[cutoutSize]
cur_cutouts[cutoutSize] = make_cutouts(out)
if args.spot_prompts:
for cutoutSize in cutoutsTable:
cur_spot_cutouts[cutoutSize] = make_cutouts(out, spot=1)
if args.spot_prompts_off:
for cutoutSize in cutoutsTable:
cur_spot_off_cutouts[cutoutSize] = make_cutouts(out, spot=0)
for clip_model in args.clip_models:
perceptor = perceptors[clip_model]
cutoutSize = cutoutSizeTable[clip_model]
transient_pMs = []
if args.spot_prompts:
iii_s = perceptor.encode_image(normalize( cur_spot_cutouts[cutoutSize] )).float()
spotPms = spotPmsTable[clip_model]
for prompt in spotPms:
result.append(prompt(iii_s))
if args.spot_prompts_off:
iii_so = perceptor.encode_image(normalize( cur_spot_off_cutouts[cutoutSize] )).float()
spotOffPms = spotOffPmsTable[clip_model]
for prompt in spotOffPms:
result.append(prompt(iii_so))
pMs = pmsTable[clip_model]
iii = perceptor.encode_image(normalize( cur_cutouts[cutoutSize] )).float()
for prompt in pMs:
result.append(prompt(iii))
# If there are image prompts we make cutouts for those each time
# so that they line up with the current cutouts from augmentation
make_cutouts = cutoutsTable[cutoutSize]
# if animating select one pImage, otherwise use them all
if cur_anim_index is None:
pImages = pmsImageTable[clip_model]
else:
pImages = [ pmsImageTable[clip_model][cur_anim_index] ]
for timg in pImages:
# note: this caches and reuses the transforms - a bit of a hack but it works
if args.image_prompt_shuffle:
# print("Disabling cached transforms")
make_cutouts.transforms = None
# print("Building throwaway image prompts")
# new way builds throwaway Prompts
batch = make_cutouts(timg)
embed = perceptor.encode_image(normalize(batch)).float()
if args.image_prompt_weight is not None:
transient_pMs.append(Prompt(embed, args.image_prompt_weight).to(device))
else:
transient_pMs.append(Prompt(embed).to(device))
for prompt in transient_pMs:
result.append(prompt(iii))
if args.enforce_palette_annealing and args.target_palette:
target_palette = torch.FloatTensor(args.target_palette).requires_grad_(False).to(device)
_pixels = cur_cutouts[cutoutSize].permute(0,2,3,1).reshape(-1,3)
palette_dists = torch.cdist(target_palette, _pixels, p=2)
best_guesses = palette_dists.argmin(axis=0)
diffs = _pixels - target_palette[best_guesses]
palette_loss = torch.mean( torch.norm( diffs, 2, dim=1 ) )*cur_cutouts[cutoutSize].shape[0]
result.append( palette_loss*cur_iteration/args.enforce_palette_annealing )
if args.smoothness > 0 and args.smoothness_type:
_pixels = cur_cutouts[cutoutSize].permute(0,2,3,1).reshape(-1,cur_cutouts[cutoutSize].shape[2],3)
gyr, gxr = torch.gradient(_pixels[:,:,0])
gyg, gxg = torch.gradient(_pixels[:,:,1])
gyb, gxb = torch.gradient(_pixels[:,:,2])
sharpness = torch.sqrt(gyr**2 + gxr**2+ gyg**2 + gxg**2 + gyb**2 + gxb**2)
if args.smoothness_type=='clipped':
sharpness = torch.clamp( sharpness, max=0.5 )
elif args.smoothness_type=='log':
sharpness = torch.log( torch.ones_like(sharpness)+sharpness )
sharpness = torch.mean( sharpness )
result.append( sharpness*args.smoothness )
if args.saturation:
# based on the old "percepted colourfulness" heuristic from Hasler and Süsstrunk’s 2003 paper
# https://www.researchgate.net/publication/243135534_Measuring_Colourfulness_in_Natural_Images
_pixels = cur_cutouts[cutoutSize].permute(0,2,3,1).reshape(-1,3)
rg = _pixels[:,0]-_pixels[:,1]
yb = 0.5*(_pixels[:,0]+_pixels[:,1])-_pixels[:,2]
rg_std, rg_mean = torch.std_mean(rg)
yb_std, yb_mean = torch.std_mean(yb)
std_rggb = torch.sqrt(rg_std**2 + yb_std**2)
mean_rggb = torch.sqrt(rg_mean**2 + yb_mean**2)
colorfullness = std_rggb+.3*mean_rggb
result.append( -colorfullness*args.saturation/5.0 )
for cutoutSize in cutoutsTable:
# clear the transform "cache"
make_cutouts = cutoutsTable[cutoutSize]
make_cutouts.transforms = None
# main init_weight uses spherical loss
if args.target_images is not None and args.target_image_weight > 0:
if cur_anim_index is None:
cur_z_targets = z_targets
else:
cur_z_targets = [ z_targets[cur_anim_index] ]
for z_target in cur_z_targets:
f_z = drawer.get_z()
if f_z is not None:
f = f_z.reshape(1,-1)
f2 = z_target.reshape(1,-1)
cur_loss = spherical_dist_loss(f, f2) * args.target_image_weight
result.append(cur_loss)
if args.target_weight_pix:
if target_image_tensor is None:
print("OOPS TIT is 0")
else:
cur_loss = F.l1_loss(out, target_image_tensor) * args.target_weight_pix
result.append(cur_loss)
if args.image_labels is not None:
for z_label in z_labels:
f = drawer.get_z().reshape(1,-1)
f2 = z_label.reshape(1,-1)
cur_loss = spherical_dist_loss(f, f2) * args.image_label_weight
result.append(cur_loss)
# main init_weight uses spherical loss
if args.init_weight:
f = drawer.get_z().reshape(1,-1)
f2 = z_orig.reshape(1,-1)
cur_loss = spherical_dist_loss(f, f2) * args.init_weight
result.append(cur_loss)
# these three init_weight variants offer mse_loss, mse_loss in pixel space, and cos loss
if args.init_weight_dist:
cur_loss = F.mse_loss(z, z_orig) * args.init_weight_dist / 2
result.append(cur_loss)
if args.init_weight_pix:
if init_image_tensor is None:
print("OOPS IIT is 0")
else:
cur_loss = F.l1_loss(out, init_image_tensor) * args.init_weight_pix / 2
result.append(cur_loss)
if args.init_weight_cos:
f = drawer.get_z().reshape(1,-1)
f2 = z_orig.reshape(1,-1)
y = torch.ones_like(f[0])
cur_loss = F.cosine_embedding_loss(f, f2, y) * args.init_weight_cos
result.append(cur_loss)
if args.make_video:
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
imageio.imwrite(f'./steps/frame_{cur_iteration:04d}.png', np.array(img))
return result
def re_average_z(args):
global gside_X, gside_Y
global device, drawer
# old_z = z.clone()
cur_z_image = drawer.to_image()
cur_z_image = cur_z_image.convert('RGB')
if overlay_image_rgba:
# print("applying overlay image")
cur_z_image.paste(overlay_image_rgba, (0, 0), overlay_image_rgba)
cur_z_image.save("overlaid.png")
cur_z_image = cur_z_image.resize((gside_X, gside_Y), Image.LANCZOS)
drawer.reapply_from_tensor(TF.to_tensor(cur_z_image).to(device).unsqueeze(0) * 2 - 1)
# torch.autograd.set_detect_anomaly(True)
def train(args, cur_it):
global drawer, opts
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
lossAll = None
if cur_it < args.iterations:
# this is awkward, but train is in also in charge of saving, so...
rebuild_opts_when_done = False
for opt in opts:
# opt.zero_grad(set_to_none=True)
opt.zero_grad()
# print("drops at ", args.learning_rate_drops)
# num_batches = args.batches * (num_loss_drop + 1)
num_batches = args.batches
for i in range(num_batches):
lossAll = ascend_txt(args)
if i == 0:
if cur_it in args.learning_rate_drops:
print("Dropping learning rate")
rebuild_opts_when_done = True
else:
did_drop = checkdrop(args, cur_it, lossAll)
if args.auto_stop is True:
rebuild_opts_when_done = disabl
if i == 0 and cur_it % args.save_every == 0:
checkin(args, cur_it, lossAll)
loss = sum(lossAll)
loss.backward()
for opt in opts:
opt.step()
if args.overlay_every and cur_it != 0 and \
(cur_it % (args.overlay_every + args.overlay_offset)) == 0:
re_average_z(args)
drawer.clip_z()
if cur_it == args.iterations:
# this resetting to best is currently disabled
# drawer.set_z(best_z)
checkin(args, cur_it, lossAll)
return False
if rebuild_opts_when_done:
num_loss_drop = num_loss_drop + 1
# this resetting to best is currently disabled
# drawer.set_z(best_z)
# always checkin (and save) after resetting z
# checkin(args, cur_it, lossAll)
if num_loss_drop > max_loss_drops:
return False
best_iter = cur_it
best_loss = 1e20
opts = rebuild_optimisers(args)
return True
imagenet_templates = [
"itap of a {}.",
"a bad photo of the {}.",
"a origami {}.",
"a photo of the large {}.",
"a {} in a video game.",
"art of the {}.",
"a photo of the small {}.",
]
def do_run(args):
global cur_iteration, cur_anim_index
global anim_cur_zs, anim_next_zs, anim_output_files
cur_iteration = 0
if args.animation_dir is not None:
# we already have z_targets. setup some sort of global ring
# we need something like
# copies of all the current z's (they can all start off all as copies)
# a list of all the output filenames
#
if not os.path.exists(args.animation_dir):
os.mkdir(args.animation_dir)
if args.target_images is not None:
filelist = real_glob(args.target_images)
else:
filelist = args.image_prompts
num_anim_frames = len(filelist)
for target_image in filelist:
basename = os.path.basename(target_image)
target_output = os.path.join(args.animation_dir, basename)
anim_output_files.append(target_output)
for i in range(num_anim_frames):
cur_z = drawer.get_z_copy()
anim_cur_zs.append(cur_z)
anim_next_zs.append(None)
step_iteration = 0
with tqdm() as pbar:
while True:
cur_images = []
for i in range(num_anim_frames):
# do merge frames here from cur->next when we are ready to be fancy
cur_anim_index = i
# anim_cur_zs[cur_anim_index] = anim_next_zs[cur_anim_index]
cur_iteration = step_iteration
drawer.set_z(anim_cur_zs[cur_anim_index])
for j in range(args.save_every):
keep_going = train(args, cur_iteration)
cur_iteration += 1
pbar.update()
# anim_next_zs[cur_anim_index] = drawer.get_z_copy()
cur_images.append(drawer.to_image())
step_iteration = step_iteration + args.save_every
if step_iteration >= args.iterations:
break
# compute the next round of cur_zs here from all the next_zs
for i in range(num_anim_frames):
prev_i = (i + num_anim_frames - 1) % num_anim_frames
base_image = cur_images[i].copy()
prev_image = cur_images[prev_i].copy().convert('RGBA')
prev_image.putalpha(args.animation_alpha)
base_image.paste(prev_image, (0, 0), prev_image)
# base_image.save(f"overlaid_{i:02d}.png")
drawer.reapply_from_tensor(TF.to_tensor(base_image).to(device).unsqueeze(0) * 2 - 1)
anim_cur_zs[i] = drawer.get_z_copy()
else:
try:
keep_going = True
with tqdm() as pbar:
while keep_going:
try:
keep_going = train(args, cur_iteration)
if cur_iteration == args.iterations:
break
cur_iteration += 1
pbar.update()
except RuntimeError as e:
print("Oops: runtime error: ", e)
print("Try reducing --num-cuts to save memory")
raise e
except KeyboardInterrupt:
pass
if args.make_video:
do_video(args)
def do_video(args):
global cur_iteration
# Video generation
init_frame = 1 # This is the frame where the video will start
last_frame = cur_iteration # You can change to the number of the last frame you want to generate. It will raise an error if that number of frames does not exist.
min_fps = 10
max_fps = 60
total_frames = last_frame-init_frame
length = 15 # Desired time of the video in seconds
frames = []
tqdm.write('Generating video...')
for i in range(init_frame,last_frame): #
frames.append(Image.open(f'./steps/frame_{i:04d}.png'))
#fps = last_frame/10
fps = np.clip(total_frames/length,min_fps,max_fps)
from subprocess import Popen, PIPE
import re
output_file = re.compile('\.png$').sub('.mp4', args.output)
p = Popen(['ffmpeg',
'-y',
'-f', 'image2pipe',
'-vcodec', 'png',
'-r', str(fps),
'-i',
'-',
'-vcodec', 'libx264',
'-r', str(fps),
'-pix_fmt', 'yuv420p',
'-crf', '17',
'-preset', 'veryslow',
'-metadata', f'comment={args.prompts}',
output_file], stdin=PIPE)
for im in tqdm(frames):
im.save(p.stdin, 'PNG')
p.stdin.close()
p.wait()
# this dictionary is used for settings in the notebook
global_pixray_settings = {}
def setup_parser(vq_parser):
# Create the parser
# vq_parser = argparse.ArgumentParser(description='Image generation using VQGAN+CLIP')
# Add the arguments
vq_parser.add_argument("-p", "--prompts", type=str, help="Text prompts", default=[], dest='prompts')
vq_parser.add_argument("-sp", "--spot", type=str, help="Spot Text prompts", default=[], dest='spot_prompts')
vq_parser.add_argument("-spo", "--spot_off", type=str, help="Spot off Text prompts", default=[], dest='spot_prompts_off')
vq_parser.add_argument("-spf", "--spot_file", type=str, help="Custom spot file", default=None, dest='spot_file')
vq_parser.add_argument("-l", "--labels", type=str, help="ImageNet labels", default=[], dest='labels')
vq_parser.add_argument("-vp", "--vector_prompts", type=str, help="Vector prompts", default=[], dest='vector_prompts')
vq_parser.add_argument("-ip", "--image_prompts", type=str, help="Image prompts", default=[], dest='image_prompts')
vq_parser.add_argument("-ipw", "--image_prompt_weight", type=float, help="Weight for image prompt", default=None, dest='image_prompt_weight')
vq_parser.add_argument("-ips", "--image_prompt_shuffle", type=bool, help="Shuffle image prompts", default=False, dest='image_prompt_shuffle')
vq_parser.add_argument("-il", "--image_labels", type=str, help="Image prompts", default=None, dest='image_labels')
vq_parser.add_argument("-ilw", "--image_label_weight", type=float, help="Weight for image prompt", default=1.0, dest='image_label_weight')
vq_parser.add_argument("-i", "--iterations", type=int, help="Number of iterations", default=None, dest='iterations')
vq_parser.add_argument("-se", "--save_every", type=int, help="Save image iterations", default=10, dest='save_every')
vq_parser.add_argument("-de", "--display_every", type=int, help="Display image iterations", default=20, dest='display_every')
vq_parser.add_argument("-dc", "--display_clear", type=bool, help="Clear dispaly when updating", default=False, dest='display_clear')
vq_parser.add_argument("-ove", "--overlay_every", type=int, help="Overlay image iterations", default=None, dest='overlay_every')
vq_parser.add_argument("-ovo", "--overlay_offset", type=int, help="Overlay image iteration offset", default=0, dest='overlay_offset')
vq_parser.add_argument("-ovi", "--overlay_image", type=str, help="Overlay image (if not init)", default=None, dest='overlay_image')
vq_parser.add_argument("-qua", "--quality", type=str, help="draft, normal, best", default="normal", dest='quality')
vq_parser.add_argument("-asp", "--aspect", type=str, help="widescreen, square", default="widescreen", dest='aspect')
vq_parser.add_argument("-ezs", "--ezsize", type=str, help="small, medium, large", default=None, dest='ezsize')
vq_parser.add_argument("-sca", "--scale", type=float, help="scale (instead of ezsize)", default=None, dest='scale')
vq_parser.add_argument("-ova", "--overlay_alpha", type=int, help="Overlay alpha (0-255)", default=None, dest='overlay_alpha')
vq_parser.add_argument("-s", "--size", nargs=2, type=int, help="Image size (width height)", default=None, dest='size')
vq_parser.add_argument("-ii", "--init_image", type=str, help="Initial image", default=None, dest='init_image')
vq_parser.add_argument("-iia", "--init_image_alpha", type=int, help="Init image alpha (0-255)", default=200, dest='init_image_alpha')
vq_parser.add_argument("-in", "--init_noise", type=str, help="Initial noise image (pixels or gradient)", default="pixels", dest='init_noise')
vq_parser.add_argument("-ti", "--target_images", type=str, help="Target images", default=None, dest='target_images')
vq_parser.add_argument("-tiw", "--target_image_weight", type=float, help="Target images weight", default=1.0, dest='target_image_weight')
vq_parser.add_argument("-twp", "--target_weight_pix", type=float, help="Target weight pix loss", default=0., dest='target_weight_pix')
vq_parser.add_argument("-anim", "--animation_dir", type=str, help="Animation output dir", default=None, dest='animation_dir')
vq_parser.add_argument("-ana", "--animation_alpha", type=int, help="Forward blend for consistency", default=128, dest='animation_alpha')
vq_parser.add_argument("-iw", "--init_weight", type=float, help="Initial weight (main=spherical)", default=None, dest='init_weight')
vq_parser.add_argument("-iwd", "--init_weight_dist", type=float, help="Initial weight dist loss", default=0., dest='init_weight_dist')
vq_parser.add_argument("-iwc", "--init_weight_cos", type=float, help="Initial weight cos loss", default=0., dest='init_weight_cos')
vq_parser.add_argument("-iwp", "--init_weight_pix", type=float, help="Initial weight pix loss", default=0., dest='init_weight_pix')
vq_parser.add_argument("-m", "--clip_models", type=str, help="CLIP model", default=None, dest='clip_models')
vq_parser.add_argument("-nps", "--noise_prompt_seeds", nargs="*", type=int, help="Noise prompt seeds", default=[], dest='noise_prompt_seeds')
vq_parser.add_argument("-npw", "--noise_prompt_weights", nargs="*", type=float, help="Noise prompt weights", default=[], dest='noise_prompt_weights')
vq_parser.add_argument("-lr", "--learning_rate", type=float, help="Learning rate", default=0.2, dest='learning_rate')
vq_parser.add_argument("-lrd", "--learning_rate_drops", nargs="*", type=float, help="When to drop learning rate (relative to iterations)", default=[75], dest='learning_rate_drops')
vq_parser.add_argument("-as", "--auto_stop", type=bool, help="Auto stopping", default=False, dest='auto_stop')
vq_parser.add_argument("-cuts", "--num_cuts", type=int, help="Number of cuts", default=None, dest='num_cuts')
vq_parser.add_argument("-bats", "--batches", type=int, help="How many batches of cuts", default=1, dest='batches')
vq_parser.add_argument("-cutp", "--cut_power", type=float, help="Cut power", default=1., dest='cut_pow')
vq_parser.add_argument("-sd", "--seed", type=int, help="Seed", default=None, dest='seed')
vq_parser.add_argument("-opt", "--optimiser", type=str, help="Optimiser (Adam, AdamW, Adagrad, Adamax, DiffGrad, AdamP or RAdam)", default='Adam', dest='optimiser')
vq_parser.add_argument("-o", "--output", type=str, help="Output file", default="output.png", dest='output')
vq_parser.add_argument("-vid", "--video", type=bool, help="Create video frames?", default=False, dest='make_video')
vq_parser.add_argument("-d", "--deterministic", type=bool, help="Enable cudnn.deterministic?", default=False, dest='cudnn_determinism')
vq_parser.add_argument("-mo", "--do_mono", type=bool, help="Monochromatic", default=False, dest='do_mono')
vq_parser.add_argument("-epw", "--enforce_palette_annealing", type=int, help="enforce palette annealing, 0 -- skip", default=5000, dest='enforce_palette_annealing')
vq_parser.add_argument("-tp", "--target_palette", type=str, help="target palette", default=None, dest='target_palette')
vq_parser.add_argument("-tpl", "--target_palette_length", type=int, help="target palette length", default=16, dest='target_palette_length')
vq_parser.add_argument("-smo", "--smoothness", type=float, help="encourage smoothness, 0 -- skip", default=0, dest='smoothness')
vq_parser.add_argument("-est", "--smoothness_type", type=str, help="enforce smoothness type: default/clipped/log", default='default', dest='smoothness_type')
vq_parser.add_argument("-sat", "--saturation", type=float, help="encourage saturation, 0 -- skip", default=0, dest='saturation')
return vq_parser
square_size = [144, 144]
widescreen_size = [200, 112] # at the small size this becomes 192,112
def process_args(vq_parser, namespace=None):
global global_aspect_width
global cur_iteration, cur_anim_index, anim_output_files, anim_cur_zs, anim_next_zs;
global global_spot_file
global best_loss, best_iter, best_z, num_loss_drop, max_loss_drops, iter_drop_delay
if namespace == None:
# command line: use ARGV to get args
args = vq_parser.parse_args()
elif isnotebook():
args = vq_parser.parse_args(args=[], namespace=namespace)
else:
# sometimes there are both settings and cmd line
args = vq_parser.parse_args(namespace=namespace)
if args.cudnn_determinism:
torch.backends.cudnn.deterministic = True
quality_to_clip_models_table = {
'draft': 'ViT-B/32',
'normal': 'ViT-B/32,ViT-B/16',
'better': 'RN50,ViT-B/32,ViT-B/16',
'best': 'RN50x4,ViT-B/32,ViT-B/16'
}
quality_to_iterations_table = {
'draft': 200,
'normal': 300,
'better': 400,
'best': 500
}
quality_to_scale_table = {
'draft': 1,
'normal': 2,
'better': 3,
'best': 4
}
# this should be replaced with logic that does somethings
# smart based on available memory (eg: size, num_models, etc)
quality_to_num_cuts_table = {
'draft': 40,
'normal': 40,
'better': 40,
'best': 40
}
if args.quality not in quality_to_clip_models_table:
print("Qualitfy setting not understood, aborting -> ", args.quality)
exit(1)
if args.clip_models is None:
args.clip_models = quality_to_clip_models_table[args.quality]
if args.iterations is None:
args.iterations = quality_to_iterations_table[args.quality]
if args.num_cuts is None:
args.num_cuts = quality_to_num_cuts_table[args.quality]
if args.ezsize is None and args.scale is None:
args.scale = quality_to_scale_table[args.quality]
size_to_scale_table = {
'small': 1,
'medium': 2,
'large': 4
}
aspect_to_size_table = {
'square': [150, 150],
'widescreen': [200, 112]
}
if args.size is not None:
global_aspect_width = args.size[0] / args.size[1]
elif args.aspect == "widescreen":
global_aspect_width = 16/9
else:
global_aspect_width = 1
# determine size if not set
if args.size is None:
size_scale = args.scale
if size_scale is None:
if args.ezsize in size_to_scale_table:
size_scale = size_to_scale_table[args.ezsize]
else:
print("EZ Size not understood, aborting -> ", args.ezsize)
exit(1)
if args.aspect in aspect_to_size_table:
base_size = aspect_to_size_table[args.aspect]
base_width = int(size_scale * base_size[0])
base_height = int(size_scale * base_size[1])
args.size = [base_width, base_height]
else:
print("aspect not understood, aborting -> ", args.aspect)
exit(1)
if args.init_noise.lower() == "none":
args.init_noise = None
# Split text prompts using the pipe character
if args.prompts:
args.prompts = [phrase.strip() for phrase in args.prompts.split("|")]
# Split text prompts using the pipe character
if args.spot_prompts:
args.spot_prompts = [phrase.strip() for phrase in args.spot_prompts.split("|")]
# Split text prompts using the pipe character
if args.spot_prompts_off:
args.spot_prompts_off = [phrase.strip() for phrase in args.spot_prompts_off.split("|")]
# Split text labels using the pipe character
if args.labels:
args.labels = [phrase.strip() for phrase in args.labels.split("|")]
# Split target images using the pipe character
if args.image_prompts:
args.image_prompts = real_glob(args.image_prompts)
# Split text prompts using the pipe character
if args.vector_prompts:
args.vector_prompts = [phrase.strip() for phrase in args.vector_prompts.split("|")]
if args.target_palette is not None:
args.target_palette = palette_from_string(args.target_palette)
if args.overlay_every is not None and args.overlay_every <= 0:
args.overlay_every = None
clip_models = args.clip_models.split(",")
args.clip_models = [model.strip() for model in clip_models]
# Make video steps directory
if args.make_video:
if not os.path.exists('steps'):
os.mkdir('steps')
if args.learning_rate_drops is None:
args.learning_rate_drops = []
else:
args.learning_rate_drops = [int(map_number(n, 0, 100, 0, args.iterations-1)) for n in args.learning_rate_drops]
# reset global animation variables
cur_iteration=0
best_iter = cur_iteration
best_loss = 1e20
num_loss_drop = 0
max_loss_drops = len(args.learning_rate_drops)
iter_drop_delay = 12
best_z = None
cur_anim_index=None
anim_output_files=[]
anim_cur_zs=[]
anim_next_zs=[]
global_spot_file = args.spot_file
return args
def reset_settings():
global global_pixray_settings
global_pixray_settings = {}
def add_settings(**kwargs):
global global_pixray_settings
for k, v in kwargs.items():
if v is None:
# just remove the key if it is there
global_pixray_settings.pop(k, None)
else:
global_pixray_settings[k] = v
def get_settings():
global global_pixray_settings
return global_pixray_settings.copy()
def apply_settings():
global global_pixray_settings
settingsDict = None
# first pass - just get the drawer
# Create the parser
vq_parser = argparse.ArgumentParser(description='Image generation using VQGAN+CLIP')
vq_parser.add_argument("--drawer", type=str, help="clipdraw, pixeldraw, etc", default="vqgan", dest='drawer')
settingsDict = SimpleNamespace(**global_pixray_settings)
settings_core, unknown = vq_parser.parse_known_args(namespace=settingsDict)
vq_parser = setup_parser(vq_parser)
class_table[settings_core.drawer].add_settings(vq_parser)
if len(global_pixray_settings) > 0:
# check for any bogus entries in the settings
dests = [d.dest for d in vq_parser._actions]
for k in global_pixray_settings:
if not k in dests:
raise ValueError(f"Requested setting not found, aborting: {k}={global_pixray_settings[k]}")
# convert dictionary to easyDict
# which can be used as an argparse namespace instead
# settingsDict = easydict.EasyDict(global_pixray_settings)
settingsDict = SimpleNamespace(**global_pixray_settings)
settings = process_args(vq_parser, settingsDict)
return settings
def command_line_override():
global global_pixray_settings
settingsDict = None
vq_parser = setup_parser()
settings = process_args(vq_parser)
return settings
def main():
settings = apply_settings()
do_init(settings)
do_run(settings)
if __name__ == '__main__':
main() |
the-stack_0_4963 | import subprocess
import typer
from typer.testing import CliRunner
from docs_src.first_steps import tutorial004 as mod
runner = CliRunner()
app = typer.Typer()
app.command()(mod.main)
def test_help():
result = runner.invoke(app, ["--help"])
assert result.exit_code == 0
assert "Arguments:" in result.output
assert "NAME [required]" in result.output
assert "LASTNAME [required]" in result.output
assert "--formal / --no-formal" in result.output
def test_1():
result = runner.invoke(app, ["Camila", "Gutiérrez"])
assert result.exit_code == 0
assert "Hello Camila Gutiérrez" in result.output
def test_formal_1():
result = runner.invoke(app, ["Camila", "Gutiérrez", "--formal"])
assert result.exit_code == 0
assert "Good day Ms. Camila Gutiérrez." in result.output
def test_formal_2():
result = runner.invoke(app, ["Camila", "--formal", "Gutiérrez"])
assert result.exit_code == 0
assert "Good day Ms. Camila Gutiérrez." in result.output
def test_formal_3():
result = runner.invoke(app, ["--formal", "Camila", "Gutiérrez"])
assert result.exit_code == 0
assert "Good day Ms. Camila Gutiérrez." in result.output
def test_script():
result = subprocess.run(
["coverage", "run", mod.__file__, "--help"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
assert "Usage" in result.stdout
|
the-stack_0_4964 | # coding: utf-8
"""
Dyspatch API
# Introduction The Dyspatch API is based on the REST paradigm, and features resource based URLs with standard HTTP response codes to indicate errors. We use standard HTTP authentication and request verbs, and all responses are JSON formatted. See our [Implementation Guide](https://docs.dyspatch.io/development/implementing_dyspatch/) for more details on how to implement Dyspatch. ## API Client Libraries Dyspatch provides API Clients for popular languages and web frameworks. - [Java](https://github.com/getdyspatch/dyspatch-java) - [Javascript](https://github.com/getdyspatch/dyspatch-javascript) - [Python](https://github.com/getdyspatch/dyspatch-python) - [C#](https://github.com/getdyspatch/dyspatch-dotnet) - [Go](https://github.com/getdyspatch/dyspatch-golang) - [Ruby](https://github.com/getdyspatch/dyspatch-ruby) # noqa: E501
The version of the OpenAPI document: 2020.11
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from dyspatch_client.configuration import Configuration
class TemplateRead(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'name': 'str',
'description': 'str',
'url': 'str',
'compiled': 'CompiledRead',
'created_at': 'datetime',
'updated_at': 'datetime',
'localizations': 'list[LocalizationMetaRead]'
}
attribute_map = {
'id': 'id',
'name': 'name',
'description': 'description',
'url': 'url',
'compiled': 'compiled',
'created_at': 'createdAt',
'updated_at': 'updatedAt',
'localizations': 'localizations'
}
def __init__(self, id=None, name=None, description=None, url=None, compiled=None, created_at=None, updated_at=None, localizations=None, local_vars_configuration=None): # noqa: E501
"""TemplateRead - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._name = None
self._description = None
self._url = None
self._compiled = None
self._created_at = None
self._updated_at = None
self._localizations = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if description is not None:
self.description = description
if url is not None:
self.url = url
if compiled is not None:
self.compiled = compiled
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if localizations is not None:
self.localizations = localizations
@property
def id(self):
"""Gets the id of this TemplateRead. # noqa: E501
An opaque, unique identifier for a template # noqa: E501
:return: The id of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this TemplateRead.
An opaque, unique identifier for a template # noqa: E501
:param id: The id of this TemplateRead. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this TemplateRead. # noqa: E501
The name of a template # noqa: E501
:return: The name of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this TemplateRead.
The name of a template # noqa: E501
:param name: The name of this TemplateRead. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this TemplateRead. # noqa: E501
A description of the template # noqa: E501
:return: The description of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this TemplateRead.
A description of the template # noqa: E501
:param description: The description of this TemplateRead. # noqa: E501
:type: str
"""
self._description = description
@property
def url(self):
"""Gets the url of this TemplateRead. # noqa: E501
The API url for a specific template # noqa: E501
:return: The url of this TemplateRead. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this TemplateRead.
The API url for a specific template # noqa: E501
:param url: The url of this TemplateRead. # noqa: E501
:type: str
"""
self._url = url
@property
def compiled(self):
"""Gets the compiled of this TemplateRead. # noqa: E501
:return: The compiled of this TemplateRead. # noqa: E501
:rtype: CompiledRead
"""
return self._compiled
@compiled.setter
def compiled(self, compiled):
"""Sets the compiled of this TemplateRead.
:param compiled: The compiled of this TemplateRead. # noqa: E501
:type: CompiledRead
"""
self._compiled = compiled
@property
def created_at(self):
"""Gets the created_at of this TemplateRead. # noqa: E501
The time of initial creation # noqa: E501
:return: The created_at of this TemplateRead. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this TemplateRead.
The time of initial creation # noqa: E501
:param created_at: The created_at of this TemplateRead. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this TemplateRead. # noqa: E501
The time of last update # noqa: E501
:return: The updated_at of this TemplateRead. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this TemplateRead.
The time of last update # noqa: E501
:param updated_at: The updated_at of this TemplateRead. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def localizations(self):
"""Gets the localizations of this TemplateRead. # noqa: E501
A list of the Template's available localizations # noqa: E501
:return: The localizations of this TemplateRead. # noqa: E501
:rtype: list[LocalizationMetaRead]
"""
return self._localizations
@localizations.setter
def localizations(self, localizations):
"""Sets the localizations of this TemplateRead.
A list of the Template's available localizations # noqa: E501
:param localizations: The localizations of this TemplateRead. # noqa: E501
:type: list[LocalizationMetaRead]
"""
self._localizations = localizations
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TemplateRead):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TemplateRead):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_4965 | import sys
from typing import ( # type: ignore
TYPE_CHECKING,
AbstractSet,
Any,
ClassVar,
Dict,
Generator,
List,
Mapping,
NewType,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
_eval_type,
cast,
get_type_hints,
)
from typing_extensions import Annotated, Literal
try:
from typing import _TypingBase as typing_base # type: ignore
except ImportError:
from typing import _Final as typing_base # type: ignore
try:
from typing import GenericAlias # type: ignore
except ImportError:
# python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on)
GenericAlias = ()
if sys.version_info < (3, 7):
if TYPE_CHECKING:
class ForwardRef:
def __init__(self, arg: Any):
pass
def _eval_type(self, globalns: Any, localns: Any) -> Any:
pass
else:
from typing import _ForwardRef as ForwardRef
else:
from typing import ForwardRef
if sys.version_info < (3, 7):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._eval_type(globalns, localns)
elif sys.version_info < (3, 9):
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
return type_._evaluate(globalns, localns)
else:
def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any:
# Even though it is the right signature for python 3.9, mypy complains with
# `error: Too many arguments for "_evaluate" of "ForwardRef"` hence the cast...
return cast(Any, type_)._evaluate(globalns, localns, set())
if sys.version_info < (3, 9):
# Ensure we always get all the whole `Annotated` hint, not just the annotated type.
# For 3.6 to 3.8, `get_type_hints` doesn't recognize `typing_extensions.Annotated`,
# so it already returns the full annotation
get_all_type_hints = get_type_hints
else:
def get_all_type_hints(obj: Any, globalns: Any = None, localns: Any = None) -> Any:
return get_type_hints(obj, globalns, localns, include_extras=True)
if sys.version_info < (3, 7):
from typing import Callable as Callable
AnyCallable = Callable[..., Any]
NoArgAnyCallable = Callable[[], Any]
else:
from collections.abc import Callable as Callable
from typing import Callable as TypingCallable
AnyCallable = TypingCallable[..., Any]
NoArgAnyCallable = TypingCallable[[], Any]
# Annotated[...] is implemented by returning an instance of one of these classes, depending on
# python/typing_extensions version.
AnnotatedTypeNames = {'AnnotatedMeta', '_AnnotatedAlias'}
if sys.version_info < (3, 8):
def get_origin(t: Type[Any]) -> Optional[Type[Any]]:
if type(t).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm in py3.6
return getattr(t, '__origin__', None)
else:
from typing import get_origin as _typing_get_origin
def get_origin(tp: Type[Any]) -> Type[Any]:
"""
We can't directly use `typing.get_origin` since we need a fallback to support
custom generic classes like `ConstrainedList`
It should be useless once https://github.com/cython/cython/issues/3537 is
solved and https://github.com/samuelcolvin/pydantic/pull/1753 is merged.
"""
if type(tp).__name__ in AnnotatedTypeNames:
return cast(Type[Any], Annotated) # mypy complains about _SpecialForm
return _typing_get_origin(tp) or getattr(tp, '__origin__', None)
if sys.version_info < (3, 7): # noqa: C901 (ignore complexity)
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Simplest get_args compatibility layer possible.
The Python 3.6 typing module does not have `_GenericAlias` so
this won't work for everything. In particular this will not
support the `generics` module (we don't support generic models in
python 3.6).
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
return getattr(t, '__args__', ())
elif sys.version_info < (3, 8): # noqa: C901
from typing import _GenericAlias
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
"""Compatibility version of get_args for python 3.7.
Mostly compatible with the python 3.8 `typing` module version
and able to handle almost all use cases.
"""
if type(t).__name__ in AnnotatedTypeNames:
return t.__args__ + t.__metadata__
if isinstance(t, _GenericAlias):
res = t.__args__
if t.__origin__ is Callable and res and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return getattr(t, '__args__', ())
else:
from typing import get_args as _typing_get_args
def _generic_get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""
In python 3.9, `typing.Dict`, `typing.List`, ...
do have an empty `__args__` by default (instead of the generic ~T for example).
In order to still support `Dict` for example and consider it as `Dict[Any, Any]`,
we retrieve the `_nparams` value that tells us how many parameters it needs.
"""
if hasattr(tp, '_nparams'):
return (Any,) * tp._nparams
return ()
def get_args(tp: Type[Any]) -> Tuple[Any, ...]:
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if type(tp).__name__ in AnnotatedTypeNames:
return tp.__args__ + tp.__metadata__
# the fallback is needed for the same reasons as `get_origin` (see above)
return _typing_get_args(tp) or getattr(tp, '__args__', ()) or _generic_get_args(tp)
if TYPE_CHECKING:
from .fields import ModelField
TupleGenerator = Generator[Tuple[str, Any], None, None]
DictStrAny = Dict[str, Any]
DictAny = Dict[Any, Any]
SetStr = Set[str]
ListStr = List[str]
IntStr = Union[int, str]
AbstractSetIntStr = AbstractSet[IntStr]
DictIntStrAny = Dict[IntStr, Any]
MappingIntStrAny = Mapping[IntStr, Any]
CallableGenerator = Generator[AnyCallable, None, None]
ReprArgs = Sequence[Tuple[Optional[str], Any]]
__all__ = (
'ForwardRef',
'Callable',
'AnyCallable',
'NoArgAnyCallable',
'NoneType',
'NONE_TYPES',
'display_as_type',
'resolve_annotations',
'is_callable_type',
'is_literal_type',
'all_literal_values',
'is_namedtuple',
'is_typeddict',
'is_new_type',
'new_type_supertype',
'is_classvar',
'update_field_forward_refs',
'TupleGenerator',
'DictStrAny',
'DictAny',
'SetStr',
'ListStr',
'IntStr',
'AbstractSetIntStr',
'DictIntStrAny',
'CallableGenerator',
'ReprArgs',
'CallableGenerator',
'GenericAlias',
'get_args',
'get_origin',
'typing_base',
'get_all_type_hints',
)
NoneType = None.__class__
NONE_TYPES: Set[Any] = {None, NoneType, Literal[None]}
def display_as_type(v: Type[Any]) -> str:
if not isinstance(v, typing_base) and not isinstance(v, GenericAlias) and not isinstance(v, type):
v = v.__class__
if isinstance(v, GenericAlias):
# Generic alias are constructs like `list[int]`
return str(v).replace('typing.', '')
try:
return v.__name__
except AttributeError:
# happens with typing objects
return str(v).replace('typing.', '')
def resolve_annotations(raw_annotations: Dict[str, Type[Any]], module_name: Optional[str]) -> Dict[str, Type[Any]]:
"""
Partially taken from typing.get_type_hints.
Resolve string or ForwardRef annotations into type objects if possible.
"""
base_globals: Optional[Dict[str, Any]] = None
if module_name:
try:
module = sys.modules[module_name]
except KeyError:
# happens occasionally, see https://github.com/samuelcolvin/pydantic/issues/2363
pass
else:
base_globals = module.__dict__
annotations = {}
for name, value in raw_annotations.items():
if isinstance(value, str):
if sys.version_info >= (3, 7):
value = ForwardRef(value, is_argument=False)
else:
value = ForwardRef(value)
try:
value = _eval_type(value, base_globals, None)
except NameError:
# this is ok, it can be fixed with update_forward_refs
pass
annotations[name] = value
return annotations
def is_callable_type(type_: Type[Any]) -> bool:
return type_ is Callable or get_origin(type_) is Callable
if sys.version_info >= (3, 7):
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and get_origin(type_) is Literal
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return get_args(type_)
else:
def is_literal_type(type_: Type[Any]) -> bool:
return Literal is not None and hasattr(type_, '__values__') and type_ == Literal[type_.__values__]
def literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
return type_.__values__
def all_literal_values(type_: Type[Any]) -> Tuple[Any, ...]:
"""
This method is used to retrieve all Literal values as
Literal can be used recursively (see https://www.python.org/dev/peps/pep-0586)
e.g. `Literal[Literal[Literal[1, 2, 3], "foo"], 5, None]`
"""
if not is_literal_type(type_):
return (type_,)
values = literal_values(type_)
return tuple(x for value in values for x in all_literal_values(value))
def is_namedtuple(type_: Type[Any]) -> bool:
"""
Check if a given class is a named tuple.
It can be either a `typing.NamedTuple` or `collections.namedtuple`
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, tuple) and hasattr(type_, '_fields')
def is_typeddict(type_: Type[Any]) -> bool:
"""
Check if a given class is a typed dict (from `typing` or `typing_extensions`)
In 3.10, there will be a public method (https://docs.python.org/3.10/library/typing.html#typing.is_typeddict)
"""
from .utils import lenient_issubclass
return lenient_issubclass(type_, dict) and hasattr(type_, '__total__')
test_type = NewType('test_type', str)
def is_new_type(type_: Type[Any]) -> bool:
"""
Check whether type_ was created using typing.NewType
"""
return isinstance(type_, test_type.__class__) and hasattr(type_, '__supertype__') # type: ignore
def new_type_supertype(type_: Type[Any]) -> Type[Any]:
while hasattr(type_, '__supertype__'):
type_ = type_.__supertype__
return type_
def _check_classvar(v: Optional[Type[Any]]) -> bool:
if v is None:
return False
return v.__class__ == ClassVar.__class__ and (sys.version_info < (3, 7) or getattr(v, '_name', None) == 'ClassVar')
def is_classvar(ann_type: Type[Any]) -> bool:
return _check_classvar(ann_type) or _check_classvar(get_origin(ann_type))
def update_field_forward_refs(field: 'ModelField', globalns: Any, localns: Any) -> None:
"""
Try to update ForwardRefs on fields based on this ModelField, globalns and localns.
"""
if field.type_.__class__ == ForwardRef:
field.type_ = evaluate_forwardref(field.type_, globalns, localns or None)
field.prepare()
if field.sub_fields:
for sub_f in field.sub_fields:
update_field_forward_refs(sub_f, globalns=globalns, localns=localns)
def get_class(type_: Type[Any]) -> Union[None, bool, Type[Any]]:
"""
Tries to get the class of a Type[T] annotation. Returns True if Type is used
without brackets. Otherwise returns None.
"""
try:
origin = get_origin(type_)
if origin is None: # Python 3.6
origin = type_
if issubclass(origin, Type): # type: ignore
if not get_args(type_) or not isinstance(get_args(type_)[0], type):
return True
return get_args(type_)[0]
except (AttributeError, TypeError):
pass
return None
|
the-stack_0_4966 | import math
import collections
class NaiveBayes:
classes = ['spam', 'ham']
# Word Lists
spam_list = []
ham_list = []
spam_file_count = 0
ham_file_count = 0
def __init__(self, spam_list, ham_list, spam_file_count, ham_file_count):
self.spam_list = spam_list
self.ham_list = ham_list
self.spam_file_count = spam_file_count
self.ham_file_count = ham_file_count
self.vocabulary = set(spam_list).union(set(ham_list))
self.spam_counter = collections.Counter(spam_list)
self.ham_counter = collections.Counter(ham_list)
def classify(self, test_file_word_list):
'''
it tests the testing data
log P(spam | test_file) proportional to --> log P(spam) + log P(word1|spam) + log P(word2|spam) + ... + log P (wordn|spam)
log P(spam) = len spam / (len spam + len ham)
log P(word i | spam) = log ( ( (count of word i) + 1 ) / ( (count of all words in both lists including duplicates) + len vocabulary ) )
denominator = (count of all words in both lists including duplicates) + len vocabulary --> is same for all terms except first term which is log P(spam)
'''
log_prob_spam = math.log(len(self.spam_list)/(len(self.spam_list)+len(self.ham_list)))
denominator = len(self.spam_list) + len(self.vocabulary)
for word in test_file_word_list:
numerator = self.spam_counter.get(word.lower(), 0) + 1
log_prob_spam += math.log(numerator/denominator)
log_prob_ham = math.log(len(self.ham_list)/(len(self.spam_list)+len(self.ham_list)))
denominator = len(self.ham_list) + len(self.vocabulary)
for word in test_file_word_list:
numerator = self.ham_counter.get(word.lower(), 0) + 1
log_prob_ham += math.log(numerator/denominator)
if log_prob_spam > log_prob_ham:
return self.classes[0]
else:
return self.classes[1] |
the-stack_0_4968 | import torch
from torch import nn
import torch.nn.functional as F
"""
Differences with V-Net
Adding nn.Tanh in the end of the conv. to make the outputs in [-1, 1].
"""
class ConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i==0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class ResidualConvBlock(nn.Module):
def __init__(self, n_stages, n_filters_in, n_filters_out, normalization='none'):
super(ResidualConvBlock, self).__init__()
ops = []
for i in range(n_stages):
if i == 0:
input_channel = n_filters_in
else:
input_channel = n_filters_out
ops.append(nn.Conv3d(input_channel, n_filters_out, 3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
if i != n_stages-1:
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = (self.conv(x) + x)
x = self.relu(x)
return x
class DownsamplingConvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(DownsamplingConvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.Conv3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class UpsamplingDeconvBlock(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(UpsamplingDeconvBlock, self).__init__()
ops = []
if normalization != 'none':
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
else:
assert False
else:
ops.append(nn.ConvTranspose3d(n_filters_in, n_filters_out, stride, padding=0, stride=stride))
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class Upsampling(nn.Module):
def __init__(self, n_filters_in, n_filters_out, stride=2, normalization='none'):
super(Upsampling, self).__init__()
ops = []
ops.append(nn.Upsample(scale_factor=stride, mode='trilinear',align_corners=False))
ops.append(nn.Conv3d(n_filters_in, n_filters_out, kernel_size=3, padding=1))
if normalization == 'batchnorm':
ops.append(nn.BatchNorm3d(n_filters_out))
elif normalization == 'groupnorm':
ops.append(nn.GroupNorm(num_groups=16, num_channels=n_filters_out))
elif normalization == 'instancenorm':
ops.append(nn.InstanceNorm3d(n_filters_out))
elif normalization != 'none':
assert False
ops.append(nn.ReLU(inplace=True))
self.conv = nn.Sequential(*ops)
def forward(self, x):
x = self.conv(x)
return x
class VNet(nn.Module):
def __init__(self, n_channels=3, n_classes=2, n_filters=16, normalization='none', has_dropout=False, has_residual=False):
super(VNet, self).__init__()
self.has_dropout = has_dropout
convBlock = ConvBlock if not has_residual else ResidualConvBlock
self.block_one = convBlock(1, n_channels, n_filters, normalization=normalization)
self.block_one_dw = DownsamplingConvBlock(n_filters, 2 * n_filters, normalization=normalization)
self.block_two = convBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_two_dw = DownsamplingConvBlock(n_filters * 2, n_filters * 4, normalization=normalization)
self.block_three = convBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_three_dw = DownsamplingConvBlock(n_filters * 4, n_filters * 8, normalization=normalization)
self.block_four = convBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_four_dw = DownsamplingConvBlock(n_filters * 8, n_filters * 16, normalization=normalization)
self.block_five = convBlock(3, n_filters * 16, n_filters * 16, normalization=normalization)
self.block_five_up = UpsamplingDeconvBlock(n_filters * 16, n_filters * 8, normalization=normalization)
self.block_six = convBlock(3, n_filters * 8, n_filters * 8, normalization=normalization)
self.block_six_up = UpsamplingDeconvBlock(n_filters * 8, n_filters * 4, normalization=normalization)
self.block_seven = convBlock(3, n_filters * 4, n_filters * 4, normalization=normalization)
self.block_seven_up = UpsamplingDeconvBlock(n_filters * 4, n_filters * 2, normalization=normalization)
self.block_eight = convBlock(2, n_filters * 2, n_filters * 2, normalization=normalization)
self.block_eight_up = UpsamplingDeconvBlock(n_filters * 2, n_filters, normalization=normalization)
self.block_nine = convBlock(1, n_filters, n_filters, normalization=normalization)
self.out_conv = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.out_conv2 = nn.Conv3d(n_filters, n_classes, 1, padding=0)
self.tanh = nn.Tanh()
self.dropout = nn.Dropout3d(p=0.5, inplace=False)
# self.__init_weight()
def encoder(self, input):
x1 = self.block_one(input)
x1_dw = self.block_one_dw(x1)
x2 = self.block_two(x1_dw)
x2_dw = self.block_two_dw(x2)
x3 = self.block_three(x2_dw)
x3_dw = self.block_three_dw(x3)
x4 = self.block_four(x3_dw)
x4_dw = self.block_four_dw(x4)
x5 = self.block_five(x4_dw)
# x5 = F.dropout3d(x5, p=0.5, training=True)
if self.has_dropout:
x5 = self.dropout(x5)
res = [x1, x2, x3, x4, x5]
return res
def decoder(self, features):
x1 = features[0]
x2 = features[1]
x3 = features[2]
x4 = features[3]
x5 = features[4]
x5_up = self.block_five_up(x5)
x5_up = x5_up + x4
x6 = self.block_six(x5_up)
x6_up = self.block_six_up(x6)
x6_up = x6_up + x3
x7 = self.block_seven(x6_up)
x7_up = self.block_seven_up(x7)
x7_up = x7_up + x2
x8 = self.block_eight(x7_up)
x8_up = self.block_eight_up(x8)
x8_up = x8_up + x1
x9 = self.block_nine(x8_up)
# x9 = F.dropout3d(x9, p=0.5, training=True)
if self.has_dropout:
x9 = self.dropout(x9)
out = self.out_conv(x9)
out_tanh = self.tanh(out)
out_seg = self.out_conv2(x9)
return out_tanh, out_seg
def forward(self, input, turnoff_drop=False):
if turnoff_drop:
has_dropout = self.has_dropout
self.has_dropout = False
features = self.encoder(input)
out_tanh, out_seg = self.decoder(features)
if turnoff_drop:
self.has_dropout = has_dropout
return out_tanh, out_seg
# def __init_weight(self):
# for m in self.modules():
# if isinstance(m, nn.Conv3d):
# torch.nn.init.kaiming_normal_(m.weight)
# elif isinstance(m, nn.BatchNorm3d):
# m.weight.data.fill_(1)
if __name__ == '__main__':
# compute FLOPS & PARAMETERS
from thop import profile
from thop import clever_format
model = VNet(n_channels=1, n_classes=2)
input = torch.randn(4, 1, 112, 112, 80)
flops, params = profile(model, inputs=(input,))
macs, params = clever_format([flops, params], "%.3f")
print(macs, params)
print("VNet have {} paramerters in total".format(sum(x.numel() for x in model.parameters())))
# import ipdb; ipdb.set_trace() |
the-stack_0_4969 | """Support for IKEA Tradfri covers."""
import logging
from pytradfri.error import PytradfriError
from homeassistant.components.cover import (
CoverDevice,
ATTR_POSITION,
SUPPORT_OPEN,
SUPPORT_CLOSE,
SUPPORT_SET_POSITION,
)
from homeassistant.core import callback
from .const import DOMAIN, KEY_GATEWAY, KEY_API, CONF_GATEWAY_ID
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load Tradfri covers based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
api = hass.data[KEY_API][config_entry.entry_id]
gateway = hass.data[KEY_GATEWAY][config_entry.entry_id]
devices_commands = await api(gateway.get_devices())
devices = await api(devices_commands)
covers = [dev for dev in devices if dev.has_blind_control]
if covers:
async_add_entities(TradfriCover(cover, api, gateway_id) for cover in covers)
class TradfriCover(CoverDevice):
"""The platform class required by Home Assistant."""
def __init__(self, cover, api, gateway_id):
"""Initialize a cover."""
self._api = api
self._unique_id = f"{gateway_id}-{cover.id}"
self._cover = None
self._cover_control = None
self._cover_data = None
self._name = None
self._available = True
self._gateway_id = gateway_id
self._refresh(cover)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
@property
def unique_id(self):
"""Return unique ID for cover."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
info = self._cover.device_info
return {
"identifiers": {(DOMAIN, self._cover.id)},
"name": self._name,
"manufacturer": info.manufacturer,
"model": info.model_number,
"sw_version": info.firmware_version,
"via_device": (DOMAIN, self._gateway_id),
}
async def async_added_to_hass(self):
"""Start thread when added to hass."""
self._async_start_observe()
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def should_poll(self):
"""No polling needed for tradfri cover."""
return False
@property
def name(self):
"""Return the display name of this cover."""
return self._name
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
return 100 - self._cover_data.current_cover_position
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
await self._api(self._cover_control.set_state(100 - kwargs[ATTR_POSITION]))
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self._api(self._cover_control.set_state(0))
async def async_close_cover(self, **kwargs):
"""Close cover."""
await self._api(self._cover_control.set_state(100))
@property
def is_closed(self):
"""Return if the cover is closed or not."""
return self.current_cover_position == 0
@callback
def _async_start_observe(self, exc=None):
"""Start observation of cover."""
if exc:
self._available = False
self.async_schedule_update_ha_state()
_LOGGER.warning("Observation failed for %s", self._name, exc_info=exc)
try:
cmd = self._cover.observe(
callback=self._observe_update,
err_callback=self._async_start_observe,
duration=0,
)
self.hass.async_create_task(self._api(cmd))
except PytradfriError as err:
_LOGGER.warning("Observation failed, trying again", exc_info=err)
self._async_start_observe()
def _refresh(self, cover):
"""Refresh the cover data."""
self._cover = cover
# Caching of BlindControl and cover object
self._available = cover.reachable
self._cover_control = cover.blind_control
self._cover_data = cover.blind_control.blinds[0]
self._name = cover.name
@callback
def _observe_update(self, tradfri_device):
"""Receive new state data for this cover."""
self._refresh(tradfri_device)
self.async_schedule_update_ha_state()
|
the-stack_0_4971 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""
Utilities for downloading and building data.
These can be replaced if your particular file system does not support them.
"""
import time
import datetime
import os
import requests
import shutil
from parlai.core.utils import ProgressLogger
def built(path, version_string=None):
"""Checks if '.built' flag has been set for that task.
If a version_string is provided, this has to match, or the version
is regarded as not built.
"""
if version_string:
fname = os.path.join(path, '.built')
if not os.path.isfile(fname):
return False
else:
with open(fname, 'r') as read:
text = read.read().split('\n')
return (len(text) > 1 and text[1] == version_string)
else:
return os.path.isfile(os.path.join(path, '.built'))
def mark_done(path, version_string=None):
"""Marks the path as done by adding a '.built' file with the current
timestamp plus a version description string if specified.
"""
with open(os.path.join(path, '.built'), 'w') as write:
write.write(str(datetime.datetime.today()))
if version_string:
write.write('\n' + version_string)
def download(url, path, fname, redownload=False):
"""Downloads file using `requests`. If ``redownload`` is set to false, then
will not download tar file again if it is present (default ``True``)."""
outfile = os.path.join(path, fname)
download = not os.path.isfile(outfile) or redownload
retry = 5
exp_backoff = [2 ** r for r in reversed(range(retry))]
logger = ProgressLogger()
while download and retry >= 0:
resume_file = outfile + '.part'
resume = os.path.isfile(resume_file)
if resume:
resume_pos = os.path.getsize(resume_file)
mode = 'ab'
else:
resume_pos = 0
mode = 'wb'
response = None
with requests.Session() as session:
try:
header = {'Range': 'bytes=%d-' % resume_pos,
'Accept-Encoding': 'identity'} if resume else {}
response = session.get(url, stream=True, timeout=5, headers=header)
# negative reply could be 'none' or just missing
if resume and response.headers.get('Accept-Ranges', 'none') == 'none':
resume_pos = 0
mode = 'wb'
CHUNK_SIZE = 32768
total_size = int(response.headers.get('Content-Length', -1))
# server returns remaining size if resuming, so adjust total
total_size += resume_pos
done = resume_pos
with open(resume_file, mode) as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
if total_size > 0:
done += len(chunk)
if total_size < done:
# don't freak out if content-length was too small
total_size = done
logger.log(done, total_size)
break
except requests.exceptions.ConnectionError:
retry -= 1
print(''.join([' '] * 60), end='\r') # TODO Better way to clean progress bar?
if retry >= 0:
print('Connection error, retrying. (%d retries left)' % retry)
time.sleep(exp_backoff[retry])
else:
print('Retried too many times, stopped retrying.')
finally:
if response:
response.close()
if retry < 0:
raise RuntimeWarning('Connection broken too many times. Stopped retrying.')
if download and retry > 0:
logger.log(done, total_size, force=True)
print()
if done < total_size:
raise RuntimeWarning('Received less data than specified in ' +
'Content-Length header for ' + url + '.' +
' There may be a download problem.')
move(resume_file, outfile)
def make_dir(path):
"""Makes the directory and any nonexistent parent directories."""
os.makedirs(path, exist_ok=True)
def move(path1, path2):
"""Renames the given file."""
shutil.move(path1, path2)
def remove_dir(path):
"""Removes the given directory, if it exists."""
shutil.rmtree(path, ignore_errors=True)
def untar(path, fname, deleteTar=True):
"""Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
def cat(file1, file2, outfile, deleteFiles=True):
with open(outfile, 'wb') as wfd:
for f in [file1, file2]:
with open(f,'rb') as fd:
shutil.copyfileobj(fd, wfd, 1024*1024*10)
#10MB per writing chunk to avoid reading big file into memory.
if deleteFiles:
os.remove(file1)
os.remove(file2)
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def download_from_google_drive(gd_id, destination):
"""Uses the requests package to download a file from Google Drive."""
URL = 'https://docs.google.com/uc?export=download'
with requests.Session() as session:
response = session.get(URL, params={'id': gd_id}, stream=True)
token = _get_confirm_token(response)
if token:
response.close()
params = {'id': gd_id, 'confirm': token}
response = session.get(URL, params=params, stream=True)
CHUNK_SIZE = 32768
with open(destination, 'wb') as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
response.close()
def download_models(opt, fnames, model_folder, version='v1.0', path='aws', use_model_type=False):
"""Download models into the ParlAI model zoo from a url.
fnames -- list of filenames to download
model_folder -- models will be downloaded into models/model_folder/model_type
path -- url for downloading models; defaults to downloading from AWS
use_model_type -- whether models are categorized by type in AWS
"""
model_type = opt.get('model_type', None)
if model_type is not None:
dpath = os.path.join(opt['datapath'], 'models', model_folder, model_type)
else:
dpath = os.path.join(opt['datapath'], 'models', model_folder)
if not built(dpath, version):
for fname in fnames:
print('[building data: ' + dpath + '/' + fname + ']')
if built(dpath):
# An older version exists, so remove these outdated files.
remove_dir(dpath)
make_dir(dpath)
# Download the data.
for fname in fnames:
if path == 'aws':
if use_model_type:
url = 'https://s3.amazonaws.com/fair-data/parlai/_models/' + os.path.join(model_folder, model_type, fname)
else:
url = 'https://s3.amazonaws.com/fair-data/parlai/_models/' + os.path.join(model_folder, fname)
else:
url = path + '/' + fname
download(url, dpath, fname)
if '.tgz' in fname or '.gz' in fname:
untar(dpath, fname)
# Mark the data as built.
mark_done(dpath, version)
|
the-stack_0_4973 | '''
A Keras port of the original Caffe SSD300 network.
Copyright (C) 2018 Pierluigi Ferrari
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import division
import numpy as np
from keras.models import Model
from keras.layers import Input, Lambda, Activation, Conv2D, MaxPooling2D, ZeroPadding2D, Reshape, Concatenate, SeparableConv2D, Dropout, BatchNormalization
from keras.layers import DepthwiseConv2D, AveragePooling2D, Add
import keras.backend as K
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
import sys, os
import light_networks.shufflenetv2_relu6_se_no_shuffle.shufflenetv2 as shufflenet_v2
import light_networks.shufflenetv2_relu6_se_no_shuffle.utils as utils
def ssd_300(image_size,
n_classes,
input_tensor = None,
mode='training',
scale_factor=1,
min_scale=None,
max_scale=None,
scales=None,
aspect_ratios_global=None,
aspect_ratios_per_layer=[[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5, 3.0, 1.0/3.0],
[1.0, 2.0, 0.5],
[1.0, 2.0, 0.5]],
two_boxes_for_ar1=True,
steps=[8, 16, 32, 64, 100, 300],
offsets=None,
clip_boxes=False,
variances=[0.1, 0.1, 0.2, 0.2],
coords='centroids',
normalize_coords=True,
subtract_mean=[123, 117, 104],
divide_by_stddev=None,
swap_channels=[2, 1, 0],
confidence_thresh=0.01,
iou_threshold=0.45,
top_k=200,
nms_max_output_size=400,
return_predictor_sizes=False):
'''
Build a Keras model with SSD300 architecture, see references.
The base network is a reduced atrous VGG-16, extended by the SSD architecture,
as described in the paper.
Most of the arguments that this function takes are only needed for the anchor
box layers. In case you're training the network, the parameters passed here must
be the same as the ones used to set up `SSDBoxEncoder`. In case you're loading
trained weights, the parameters passed here must be the same as the ones used
to produce the trained weights.
Some of these arguments are explained in more detail in the documentation of the
`SSDBoxEncoder` class.
Note: Requires Keras v2.0 or later. Currently works only with the
TensorFlow backend (v1.0 or later).
Arguments:
image_size (tuple): The input image size in the format `(height, width, channels)`.
input_tensor: Tensor with shape (batch, height, width, channels)
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
mode (str, optional): One of 'training', 'inference' and 'inference_fast'. In 'training' mode,
the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes,
the raw predictions are decoded into absolute coordinates and filtered via confidence thresholding,
non-maximum suppression, and top-k filtering. The difference between latter two modes is that
'inference' follows the exact procedure of the original Caffe implementation, while
'inference_fast' uses a faster prediction decoding procedure.
min_scale (float, optional): The smallest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images. All scaling factors between the smallest and the
largest will be linearly interpolated. Note that the second to last of the linearly interpolated
scaling factors will actually be the scaling factor for the last predictor layer, while the last
scaling factor is used for the second box for aspect ratio 1 in the last predictor layer
if `two_boxes_for_ar1` is `True`.
scales (list, optional): A list of floats containing scaling factors per convolutional predictor layer.
This list must be one element longer than the number of predictor layers. The first `k` elements are the
scaling factors for the `k` predictor layers, while the last element is used for the second box
for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional
last scaling factor must be passed either way, even if it is not being used. If a list is passed,
this argument overrides `min_scale` and `max_scale`. All scaling factors must be greater than zero.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are to be
generated. This list is valid for all prediction layers.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each prediction layer.
This allows you to set the aspect ratios for each predictor layer individually, which is the case for the
original SSD300 implementation. If a list is passed, it overrides `aspect_ratios_global`.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratio lists that contain 1. Will be ignored otherwise.
If `True`, two anchor boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor.
steps (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either ints/floats or tuples of two ints/floats. These numbers represent for each predictor layer how many
pixels apart the anchor box center points should be vertically and horizontally along the spatial grid over
the image. If the list contains ints/floats, then that value will be used for both spatial dimensions.
If the list contains tuples of two ints/floats, then they represent `(step_height, step_width)`.
If no steps are provided, then they will be computed such that the anchor box center points will form an
equidistant grid within the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either floats or tuples of two floats. These numbers represent for each predictor layer how many
pixels from the top and left boarders of the image the top-most and left-most anchor box center points should be
as a fraction of `steps`. The last bit is important: The offsets are not absolute pixel values, but fractions
of the step size specified in the `steps` argument. If the list contains floats, then that value will
be used for both spatial dimensions. If the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will default to 0.5 of the step size.
clip_boxes (bool, optional): If `True`, clips the anchor box coordinates to stay within image boundaries.
variances (list, optional): A list of 4 floats >0. The anchor box offset for each coordinate will be divided by
its respective variance value.
coords (str, optional): The box coordinate format to be used internally by the model (i.e. this is not the input format
of the ground truth labels). Can be either 'centroids' for the format `(cx, cy, w, h)` (box center coordinates, width,
and height), 'minmax' for the format `(xmin, xmax, ymin, ymax)`, or 'corners' for the format `(xmin, ymin, xmax, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model is supposed to use relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
subtract_mean (array-like, optional): `None` or an array-like object of integers or floating point values
of any shape that is broadcast-compatible with the image shape. The elements of this array will be
subtracted from the image pixel intensity values. For example, pass a list of three integers
to perform per-channel mean normalization for color images.
divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers or
floating point values of any shape that is broadcast-compatible with the image shape. The image pixel
intensity values will be divided by the elements of this array. For example, pass a list
of three integers to perform per-channel standard deviation normalization for color images.
swap_channels (list, optional): Either `False` or a list of integers representing the desired order in which the input
image channels should be swapped.
confidence_thresh (float, optional): A float in [0,1), the minimum classification confidence in a specific
positive class in order to be considered for the non-maximum suppression stage for the respective class.
A lower value will result in a larger part of the selection process being done by the non-maximum suppression
stage, while a larger value will result in a larger part of the selection process happening in the confidence
thresholding stage.
iou_threshold (float, optional): A float in [0,1]. All boxes that have a Jaccard similarity of greater than `iou_threshold`
with a locally maximal box will be removed from the set of predictions for a given class, where 'maximal' refers
to the box's confidence score.
top_k (int, optional): The number of highest scoring predictions to be kept for each batch item after the
non-maximum suppression stage.
nms_max_output_size (int, optional): The maximal number of predictions that will be left over after the NMS stage.
return_predictor_sizes (bool, optional): If `True`, this function not only returns the model, but also
a list containing the spatial dimensions of the predictor layers. This isn't strictly necessary since
you can always get their sizes easily via the Keras API, but it's convenient and less error-prone
to get them this way. They are only relevant for training anyway (SSDBoxEncoder needs to know the
spatial dimensions of the predictor layers), for inference you don't need them.
Returns:
model: The Keras SSD300 model.
predictor_sizes (optional): A Numpy array containing the `(height, width)` portion
of the output tensor shape for each convolutional predictor layer. During
training, the generator function needs this in order to transform
the ground truth labels into tensors of identical structure as the
output tensors of the model, which is in turn needed for the cost
function.
References:
https://arxiv.org/abs/1512.02325v5
'''
n_predictor_layers = 6 # The number of predictor conv layers in the network is 6 for the original SSD300.
n_classes += 1 # Account for the background class.
img_height, img_width, img_channels = image_size[0], image_size[1], image_size[2]
############################################################################
# Get a few exceptions out of the way.
############################################################################
if aspect_ratios_global is None and aspect_ratios_per_layer is None:
raise ValueError("`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None. At least one needs to be specified.")
if aspect_ratios_per_layer:
if len(aspect_ratios_per_layer) != n_predictor_layers:
raise ValueError("It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}.".format(n_predictor_layers, len(aspect_ratios_per_layer)))
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if len(scales) != n_predictor_layers+1:
raise ValueError("It must be either scales is None or len(scales) == {}, but len(scales) == {}.".format(n_predictor_layers+1, len(scales)))
else: # If no explicit list of scaling factors was passed, compute the list of scaling factors from `min_scale` and `max_scale`
scales = np.linspace(min_scale, max_scale, n_predictor_layers+1)
if len(variances) != 4:
raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
if (not (steps is None)) and (len(steps) != n_predictor_layers):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
raise ValueError("You must provide at least one offset value per predictor layer.")
############################################################################
# Compute the anchor box parameters.
############################################################################
# Set the aspect ratios for each predictor layer. These are only needed for the anchor box layers.
if aspect_ratios_per_layer:
aspect_ratios = aspect_ratios_per_layer
else:
aspect_ratios = [aspect_ratios_global] * n_predictor_layers
# Compute the number of boxes to be predicted per cell for each predictor layer.
# We need this so that we know how many channels the predictor layers need to have.
if aspect_ratios_per_layer:
n_boxes = []
for ar in aspect_ratios_per_layer:
if (1 in ar) & two_boxes_for_ar1:
n_boxes.append(len(ar) + 1) # +1 for the second box for aspect ratio 1
else:
n_boxes.append(len(ar))
else: # If only a global aspect ratio list was passed, then the number of boxes is the same for each predictor layer
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
n_boxes = len(aspect_ratios_global) + 1
else:
n_boxes = len(aspect_ratios_global)
n_boxes = [n_boxes] * n_predictor_layers
if steps is None:
steps = [None] * n_predictor_layers
if offsets is None:
offsets = [None] * n_predictor_layers
############################################################################
# Define functions for the Lambda layers below.
############################################################################
def identity_layer(tensor):
return tensor
def input_mean_normalization(tensor):
return tensor - np.array(subtract_mean)
def input_stddev_normalization(tensor):
return tensor / np.array(divide_by_stddev)
def input_channel_swap(tensor):
if len(swap_channels) == 3:
return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]]], axis=-1)
elif len(swap_channels) == 4:
return K.stack([tensor[...,swap_channels[0]], tensor[...,swap_channels[1]], tensor[...,swap_channels[2]], tensor[...,swap_channels[3]]], axis=-1)
def relu6(x):
return K.relu(x, max_value=6)
############################################################################
# Build the network.
############################################################################
if input_tensor != None:
x = Input(tensor=input_tensor, shape=(img_height, img_width, img_channels))
else:
x = Input(shape=(img_height, img_width, img_channels))
# The following identity layer is only needed so that the subsequent lambda layers can be optional.
x1 = Lambda(identity_layer, output_shape=(img_height, img_width, img_channels), name='identity_layer')(x)
if not (divide_by_stddev is None):
x1 = Lambda(input_stddev_normalization, output_shape=(img_height, img_width, img_channels), name='input_stddev_normalization')(x1)
if not (subtract_mean is None):
x1 = Lambda(input_mean_normalization, output_shape=(img_height, img_width, img_channels), name='input_mean_normalization')(x1)
if swap_channels:
x1 = Lambda(input_channel_swap, output_shape=(img_height, img_width, img_channels), name='input_channel_swap')(x1)
num_shuffle_units = list([3,7,3])
out_dim_stage_two = {0.5:48, 1:116, 1.5:176, 2:244}
exp = np.insert(np.arange(len(num_shuffle_units), dtype=np.float32), 0, 0) # [0., 0., 1., 2.]
out_channels_in_stage = 2**exp
out_channels_in_stage *= out_dim_stage_two[scale_factor] # calculate output channels for each stage
out_channels_in_stage[0] = 24 # first stage has always 24 output channels
out_channels_in_stage = out_channels_in_stage.astype(int)
# change last conv
if scale_factor == 2:
k = 2048
else:
k = 1024
# elif scale_factor == 1.5:
# k = 768
# elif scale_factor == 1:
# k = 512
# else:
# k = 256
# Get shufflenet architecture
shufflenetv2 = shufflenet_v2.ShuffleNetV2(bottleneck_ratio=scale_factor,
input_shape=(img_height, img_width, img_channels),
include_top=False)
FeatureExtractor = Model(inputs=shufflenetv2.input, outputs=shufflenetv2.get_layer('stage3/block8/concat_1').output)
# Stage 3 last block unit
shuffle_unit13 = FeatureExtractor(x1)
layer = utils.shuffle_unit(shuffle_unit13, out_channels=out_channels_in_stage[4-1], strides=2,
bottleneck_ratio=scale_factor, stage=4, block=1)
conv18 = Conv2D(k, kernel_size=1, padding='same', strides=1, name='1x1conv5_out', activation=relu6)(layer)
conv19_2 = utils.shuffle_unit(conv18, out_channels=512, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=1)
conv20_2 = utils.shuffle_unit(conv19_2, out_channels=256, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=2)
conv21_2 = utils.shuffle_unit(conv20_2, out_channels=256, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=3)
conv22_2 = utils.shuffle_unit(conv21_2, out_channels=128, strides=2,
bottleneck_ratio=scale_factor, stage=5, block=4)
### Build the convolutional predictor layers on top of the base network
# We precidt `n_classes` confidence values for each box, hence the confidence predictors have depth `n_boxes * n_classes`
# Output shape of the confidence layers: `(batch, height, width, n_boxes * n_classes)`
conv13_mbox_conf = Conv2D(n_boxes[0] * n_classes, (3, 3), padding='same', name='conv13_mbox_conf')(shuffle_unit13)
conv18_mbox_conf = Conv2D(n_boxes[1] * n_classes, (3, 3), padding='same', name='conv18_mbox_conf')(conv18)
conv19_2_mbox_conf = Conv2D(n_boxes[2] * n_classes, (3, 3), padding='same', name='conv19_2_mbox_conf')(conv19_2)
conv20_2_mbox_conf = Conv2D(n_boxes[3] * n_classes, (3, 3), padding='same', name='conv20_2_mbox_conf')(conv20_2)
conv21_2_mbox_conf = Conv2D(n_boxes[4] * n_classes, (3, 3), padding='same', name='conv21_2_mbox_conf')(conv21_2)
conv22_2_mbox_conf = Conv2D(n_boxes[5] * n_classes, (3, 3), padding='same', name='conv22_2_mbox_conf')(conv22_2)
# We predict 4 box coordinates for each box, hence the localization predictors have depth `n_boxes * 4`
# Output shape of the localization layers: `(batch, height, width, n_boxes * 4)`
conv13_mbox_loc = Conv2D(n_boxes[0] * 4, (3, 3), padding='same', name='conv13_mbox_loc')(shuffle_unit13)
conv18_mbox_loc = Conv2D(n_boxes[1] * 4, (3, 3), padding='same', name='conv18_mbox_loc')(conv18)
conv19_2_mbox_loc = Conv2D(n_boxes[2] * 4, (3, 3), padding='same', name='conv19_2_mbox_loc')(conv19_2)
conv20_2_mbox_loc = Conv2D(n_boxes[3] * 4, (3, 3), padding='same', name='conv20_2_mbox_loc')(conv20_2)
conv21_2_mbox_loc = Conv2D(n_boxes[4] * 4, (3, 3), padding='same', name='conv21_2_mbox_loc')(conv21_2)
conv22_2_mbox_loc = Conv2D(n_boxes[5] * 4, (3, 3), padding='same', name='conv22_2_mbox_loc')(conv22_2)
### Generate the anchor boxes (called "priors" in the original Caffe/C++ implementation, so I'll keep their layer names)
# Output shape of anchors: `(batch, height, width, n_boxes, 8)`
conv13_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[0], next_scale=scales[1], aspect_ratios=aspect_ratios[0],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[0], this_offsets=offsets[0], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv13_mbox_priorbox')(conv13_mbox_loc)
conv18_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[1], next_scale=scales[2], aspect_ratios=aspect_ratios[1],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[1], this_offsets=offsets[1], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv18_mbox_priorbox')(conv18_mbox_loc)
conv19_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[2], next_scale=scales[3], aspect_ratios=aspect_ratios[2],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[2], this_offsets=offsets[2], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv19_2_mbox_priorbox')(conv19_2_mbox_loc)
conv20_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[3], next_scale=scales[4], aspect_ratios=aspect_ratios[3],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[3], this_offsets=offsets[3], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv20_2_mbox_priorbox')(conv20_2_mbox_loc)
conv21_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[4], next_scale=scales[5], aspect_ratios=aspect_ratios[4],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[4], this_offsets=offsets[4], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv21_2_mbox_priorbox')(conv21_2_mbox_loc)
conv22_2_mbox_priorbox = AnchorBoxes(img_height, img_width, this_scale=scales[5], next_scale=scales[6], aspect_ratios=aspect_ratios[5],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[5], this_offsets=offsets[5], clip_boxes=clip_boxes,
variances=variances, coords=coords, normalize_coords=normalize_coords, name='conv22_2_mbox_priorbox')(conv22_2_mbox_loc)
### Reshape
# Reshape the class predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, n_classes)`
# We want the classes isolated in the last axis to perform softmax on them
conv13_mbox_conf_reshape = Reshape((-1, n_classes), name='conv13_mbox_conf_reshape')(conv13_mbox_conf)
conv18_mbox_conf_reshape = Reshape((-1, n_classes), name='conv18_mbox_conf_reshape')(conv18_mbox_conf)
conv19_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv19_2_mbox_conf_reshape')(conv19_2_mbox_conf)
conv20_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv20_2_mbox_conf_reshape')(conv20_2_mbox_conf)
conv21_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv21_2_mbox_conf_reshape')(conv21_2_mbox_conf)
conv22_2_mbox_conf_reshape = Reshape((-1, n_classes), name='conv22_2_mbox_conf_reshape')(conv22_2_mbox_conf)
# Reshape the box predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, 4)`
# We want the four box coordinates isolated in the last axis to compute the smooth L1 loss
conv13_mbox_loc_reshape = Reshape((-1, 4), name='conv13_mbox_loc_reshape')(conv13_mbox_loc)
conv18_mbox_loc_reshape = Reshape((-1, 4), name='conv18_mbox_loc_reshape')(conv18_mbox_loc)
conv19_2_mbox_loc_reshape = Reshape((-1, 4), name='conv19_2_mbox_loc_reshape')(conv19_2_mbox_loc)
conv20_2_mbox_loc_reshape = Reshape((-1, 4), name='conv20_2_mbox_loc_reshape')(conv20_2_mbox_loc)
conv21_2_mbox_loc_reshape = Reshape((-1, 4), name='conv21_2_mbox_loc_reshape')(conv21_2_mbox_loc)
conv22_2_mbox_loc_reshape = Reshape((-1, 4), name='conv22_2_mbox_loc_reshape')(conv22_2_mbox_loc)
# Reshape the anchor box tensors, yielding 3D tensors of shape `(batch, height * width * n_boxes, 8)`
conv13_mbox_priorbox_reshape = Reshape((-1, 8), name='conv13_mbox_priorbox_reshape')(conv13_mbox_priorbox)
conv18_mbox_priorbox_reshape = Reshape((-1, 8), name='conv18_mbox_priorbox_reshape')(conv18_mbox_priorbox)
conv19_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv19_2_mbox_priorbox_reshape')(conv19_2_mbox_priorbox)
conv20_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv20_2_mbox_priorbox_reshape')(conv20_2_mbox_priorbox)
conv21_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv21_2_mbox_priorbox_reshape')(conv21_2_mbox_priorbox)
conv22_2_mbox_priorbox_reshape = Reshape((-1, 8), name='conv22_2_mbox_priorbox_reshape')(conv22_2_mbox_priorbox)
### Concatenate the predictions from the different layers
# Axis 0 (batch) and axis 2 (n_classes or 4, respectively) are identical for all layer predictions,
# so we want to concatenate along axis 1, the number of boxes per layer
# Output shape of `mbox_conf`: (batch, n_boxes_total, n_classes)
mbox_conf = Concatenate(axis=1, name='mbox_conf')([conv13_mbox_conf_reshape,
conv18_mbox_conf_reshape,
conv19_2_mbox_conf_reshape,
conv20_2_mbox_conf_reshape,
conv21_2_mbox_conf_reshape,
conv22_2_mbox_conf_reshape])
# Output shape of `mbox_loc`: (batch, n_boxes_total, 4)
mbox_loc = Concatenate(axis=1, name='mbox_loc')([conv13_mbox_loc_reshape,
conv18_mbox_loc_reshape,
conv19_2_mbox_loc_reshape,
conv20_2_mbox_loc_reshape,
conv21_2_mbox_loc_reshape,
conv22_2_mbox_loc_reshape])
# Output shape of `mbox_priorbox`: (batch, n_boxes_total, 8)
mbox_priorbox = Concatenate(axis=1, name='mbox_priorbox')([conv13_mbox_priorbox_reshape,
conv18_mbox_priorbox_reshape,
conv19_2_mbox_priorbox_reshape,
conv20_2_mbox_priorbox_reshape,
conv21_2_mbox_priorbox_reshape,
conv22_2_mbox_priorbox_reshape])
# The box coordinate predictions will go into the loss function just the way they are,
# but for the class predictions, we'll apply a softmax activation layer first
mbox_conf_softmax = Activation('softmax', name='mbox_conf_softmax')(mbox_conf)
# Concatenate the class and box predictions and the anchors to one large predictions vector
# Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
predictions = Concatenate(axis=2, name='predictions')([mbox_conf_softmax, mbox_loc, mbox_priorbox])
if mode == 'training':
model = Model(inputs=x, outputs=predictions)
elif mode == 'inference':
decoded_predictions = DecodeDetections(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
coords=coords,
# normalize_coords=normalize_coords, #change this parameter for inference
normalize_coords=False,
img_height=img_height,
img_width=img_width,
name='decoded_predictions')(predictions)
model = Model(inputs=x, outputs=decoded_predictions)
elif mode == 'inference_fast':
decoded_predictions = DecodeDetectionsFast(confidence_thresh=confidence_thresh,
iou_threshold=iou_threshold,
top_k=top_k,
nms_max_output_size=nms_max_output_size,
coords=coords,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width,
name='decoded_predictions')(predictions)
model = Model(inputs=x, outputs=decoded_predictions)
else:
raise ValueError("`mode` must be one of 'training', 'inference' or 'inference_fast', but received '{}'.".format(mode))
return model
|
the-stack_0_4975 | #! /usr/bin/env python
"""
Copyright 2015-2018 Jacob M. Graving <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
import glob
class ImageReader:
'''Read images in batches.
Parameters
----------
path: str
Glob path to the images.
batch_size: int, default = 1
Batch size for reading frames
framerate: float, default = None
Video framerate for determining timestamps
for each frame. If None, timestamps will
equal frame number.
gray: bool, default = False
If gray, return only the middle channel
'''
def __init__(self, path, batch_size=1, framerate=None, gray=False):
#if isinstance(path, str):
# if os.path.exists(path):
# self.path = path
# else:
# raise ValueError('file or path does not exist')
#else:
# raise TypeError('path must be str')
self.path = path
self.image_paths = glob.glob(path)
self.batch_size = batch_size
self.n_frames = len(self.image_paths)
if framerate:
self.timestep = 1. / framerate
else:
self.timestep = 1.
test_images = cv2.imread(self.image_paths[0])
self.height = test_images.shape[0]
self.width = test_images.shape[1]
self.shape = (self.height, self.width)
self.gray = gray
self.idx = 0
def read(self, idx):
''' Read one frame
Returns
-------
frame: array
Image is returned of the frame if a frame exists.
Otherwise, return None.
'''
frame = cv2.imread(self.image_paths[idx])
if self.gray:
frame = frame[..., 1][..., None]
return idx, frame
def read_batch(self, idx0, idx1):
''' Read in a batch of frames.
Returns
-------
frames_idx: array
A batch of frames from the video.
frames: array
A batch of frames from the video.
'''
frames = []
frames_idx = []
for idx in range(idx0, idx1):
frame = self.read(idx)
frame_idx, frame = frame
frames.append(frame)
frames_idx.append(frame_idx)
if len(frames) == 1:
frames = frames[0][None,]
frames_idx = np.array(frames_idx)
timestamps = frames_idx * self.timestep
elif len(frames) > 1:
frames = np.stack(frames)
frames_idx = np.array(frames_idx)
timestamps = frames_idx * self.timestep
return frames, frames_idx, timestamps
def __len__(self):
return int(np.ceil(self.n_frames / float(self.batch_size)))
def __getitem__(self, index):
if isinstance(index, (int, np.integer)):
idx0 = index * self.batch_size
idx1 = (index + 1) * self.batch_size
else:
raise NotImplementedError
return self.read_batch(idx0, idx1)
def __next__(self):
if self.idx < len(self):
output = self.__getitem__(self.idx)
self.idx += 1
return output
else:
self.idx = 0
StopIteration
|
the-stack_0_4977 | """
Web Map Tile Service time dimension demonstration
-------------------------------------------------
This example further demonstrates WMTS support within cartopy. Optional
keyword arguments can be supplied to the OGC WMTS 'gettile' method. This
allows for the specification of the 'time' dimension for a WMTS layer
which supports it.
The example shows satellite imagery retrieved from NASA's Global Imagery
Browse Services for 5th Feb 2016. A true color MODIS image is shown on
the left, with the MODIS false color 'snow RGB' shown on the right.
"""
__tags__ = ['Web services']
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from owslib.wmts import WebMapTileService
import cartopy.crs as ccrs
def main():
# URL of NASA GIBS
URL = 'http://gibs.earthdata.nasa.gov/wmts/epsg4326/best/wmts.cgi'
wmts = WebMapTileService(URL)
# Layers for MODIS true color and snow RGB
layers = ['MODIS_Terra_SurfaceReflectance_Bands143',
'MODIS_Terra_CorrectedReflectance_Bands367']
date_str = '2016-02-05'
# Plot setup
plot_CRS = ccrs.Mercator()
geodetic_CRS = ccrs.Geodetic()
x0, y0 = plot_CRS.transform_point(4.6, 43.1, geodetic_CRS)
x1, y1 = plot_CRS.transform_point(11.0, 47.4, geodetic_CRS)
ysize = 8
xsize = 2 * ysize * (x1 - x0) / (y1 - y0)
fig = plt.figure(figsize=(xsize, ysize), dpi=100)
for layer, offset in zip(layers, [0, 0.5]):
ax = fig.add_axes([offset, 0, 0.5, 1], projection=plot_CRS)
ax.set_xlim((x0, x1))
ax.set_ylim((y0, y1))
ax.add_wmts(wmts, layer, wmts_kwargs={'time': date_str})
txt = ax.text(4.7, 43.2, wmts[layer].title, fontsize=18, color='wheat',
transform=geodetic_CRS)
txt.set_path_effects([PathEffects.withStroke(linewidth=5,
foreground='black')])
plt.show()
if __name__ == '__main__':
main()
|
the-stack_0_4978 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
tests.integration.shell.master
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
from __future__ import absolute_import
import os
import signal
import shutil
# Import 3rd-party libs
import yaml
# Import salt libs
import salt.utils
# Import salt test libs
import tests.integration.utils
from tests.support.case import ShellCase
from tests.support.paths import TMP
from tests.support.mixins import ShellCaseCommonTestsMixin
from tests.integration.utils import testprogram
class MasterTest(ShellCase, testprogram.TestProgramCase, ShellCaseCommonTestsMixin):
_call_binary_ = 'salt-master'
def test_issue_7754(self):
old_cwd = os.getcwd()
config_dir = os.path.join(TMP, 'issue-7754')
if not os.path.isdir(config_dir):
os.makedirs(config_dir)
os.chdir(config_dir)
config_file_name = 'master'
pid_path = os.path.join(config_dir, '{0}.pid'.format(config_file_name))
with salt.utils.fopen(self.get_config_file_path(config_file_name), 'r') as fhr:
config = yaml.load(fhr.read())
config['root_dir'] = config_dir
config['log_file'] = 'file:///tmp/log/LOG_LOCAL3'
config['ret_port'] = config['ret_port'] + 10
config['publish_port'] = config['publish_port'] + 10
with salt.utils.fopen(os.path.join(config_dir, config_file_name), 'w') as fhw:
fhw.write(
yaml.dump(config, default_flow_style=False)
)
ret = self.run_script(
self._call_binary_,
'--config-dir {0} --pid-file {1} -l debug'.format(
config_dir,
pid_path
),
timeout=5,
catch_stderr=True,
with_retcode=True
)
# Now kill it if still running
if os.path.exists(pid_path):
with salt.utils.fopen(pid_path) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError:
pass
try:
self.assertFalse(os.path.isdir(os.path.join(config_dir, 'file:')))
finally:
self.chdir(old_cwd)
if os.path.isdir(config_dir):
shutil.rmtree(config_dir)
def test_exit_status_unknown_user(self):
'''
Ensure correct exit status when the master is configured to run as an unknown user.
'''
master = testprogram.TestDaemonSaltMaster(
name='unknown_user',
configs={'master': {'map': {'user': 'some_unknown_user_xyz'}}},
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
try:
self.assert_exit_status(
status, 'EX_NOUSER',
message='unknown user not on system',
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr)
)
finally:
# Although the start-up should fail, call shutdown() to set the
# internal _shutdown flag and avoid the registered atexit calls to
# cause timeout exeptions and respective traceback
master.shutdown()
# pylint: disable=invalid-name
def test_exit_status_unknown_argument(self):
'''
Ensure correct exit status when an unknown argument is passed to salt-master.
'''
master = testprogram.TestDaemonSaltMaster(
name='unknown_argument',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d', '--unknown-argument'],
catch_stderr=True,
with_retcode=True,
)
try:
self.assert_exit_status(
status, 'EX_USAGE',
message='unknown argument',
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr)
)
finally:
# Although the start-up should fail, call shutdown() to set the
# internal _shutdown flag and avoid the registered atexit calls to
# cause timeout exeptions and respective traceback
master.shutdown()
def test_exit_status_correct_usage(self):
'''
Ensure correct exit status when salt-master starts correctly.
'''
master = testprogram.TestDaemonSaltMaster(
name='correct_usage',
parent_dir=self._test_dir,
)
# Call setup here to ensure config and script exist
master.setup()
stdout, stderr, status = master.run(
args=['-d'],
catch_stderr=True,
with_retcode=True,
)
try:
self.assert_exit_status(
status, 'EX_OK',
message='correct usage',
stdout=stdout,
stderr=tests.integration.utils.decode_byte_list(stderr)
)
finally:
master.shutdown(wait_for_orphans=3)
# Do the test again to check does master shut down correctly
# **Due to some underlying subprocessing issues with Minion._thread_return, this
# part of the test has been commented out. Once these underlying issues have
# been addressed, this part of the test should be uncommented. Work for this
# issue is being tracked in https://github.com/saltstack/salt-jenkins/issues/378
# stdout, stderr, status = master.run(
# args=['-d'],
# catch_stderr=True,
# with_retcode=True,
# )
# try:
# self.assert_exit_status(
# status, 'EX_OK',
# message='correct usage',
# stdout=stdout,
# stderr=tests.integration.utils.decode_byte_list(stderr)
# )
# finally:
# master.shutdown(wait_for_orphans=3)
|
the-stack_0_4979 | """Create a camera asset."""
import bpy
from openpype.pipeline import legacy_io
from openpype.hosts.blender.api import plugin, lib, ops
from openpype.hosts.blender.api.pipeline import AVALON_INSTANCES
class CreateCamera(plugin.Creator):
"""Polygonal static geometry"""
name = "cameraMain"
label = "Camera"
family = "camera"
icon = "video-camera"
def process(self):
""" Run the creator on Blender main thread"""
mti = ops.MainThreadItem(self._process)
ops.execute_in_main_thread(mti)
def _process(self):
# Get Instance Container or create it if it does not exist
instances = bpy.data.collections.get(AVALON_INSTANCES)
if not instances:
instances = bpy.data.collections.new(name=AVALON_INSTANCES)
bpy.context.scene.collection.children.link(instances)
# Create instance object
asset = self.data["asset"]
subset = self.data["subset"]
name = plugin.asset_name(asset, subset)
camera = bpy.data.cameras.new(subset)
camera_obj = bpy.data.objects.new(subset, camera)
instances.objects.link(camera_obj)
asset_group = bpy.data.objects.new(name=name, object_data=None)
asset_group.empty_display_type = 'SINGLE_ARROW'
instances.objects.link(asset_group)
self.data['task'] = legacy_io.Session.get('AVALON_TASK')
print(f"self.data: {self.data}")
lib.imprint(asset_group, self.data)
if (self.options or {}).get("useSelection"):
bpy.context.view_layer.objects.active = asset_group
selected = lib.get_selection()
for obj in selected:
obj.select_set(True)
selected.append(asset_group)
bpy.ops.object.parent_set(keep_transform=True)
else:
plugin.deselect_all()
camera_obj.select_set(True)
asset_group.select_set(True)
bpy.context.view_layer.objects.active = asset_group
bpy.ops.object.parent_set(keep_transform=True)
return asset_group
|
the-stack_0_4981 | """The tests for the WUnderground platform."""
import unittest
from homeassistant.components.sensor import wunderground
from homeassistant.const import TEMP_CELSIUS, LENGTH_INCHES
from tests.common import get_test_home_assistant
VALID_CONFIG_PWS = {
'platform': 'wunderground',
'api_key': 'foo',
'pws_id': 'bar',
'monitored_conditions': [
'weather', 'feelslike_c', 'alerts', 'elevation', 'location'
]
}
VALID_CONFIG = {
'platform': 'wunderground',
'api_key': 'foo',
'monitored_conditions': [
'weather', 'feelslike_c', 'alerts', 'elevation', 'location',
'weather_1d_metric', 'precip_1d_in'
]
}
INVALID_CONFIG = {
'platform': 'wunderground',
'api_key': 'BOB',
'pws_id': 'bar',
'lang': 'foo',
'monitored_conditions': [
'weather', 'feelslike_c', 'alerts'
]
}
FEELS_LIKE = '40'
WEATHER = 'Clear'
HTTPS_ICON_URL = 'https://icons.wxug.com/i/c/k/clear.gif'
ALERT_MESSAGE = 'This is a test alert message'
FORECAST_TEXT = 'Mostly Cloudy. Fog overnight.'
PRECIP_IN = 0.03
def mocked_requests_get(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
if str(args[0]).startswith('http://api.wunderground.com/api/foo/'):
return MockResponse({
"response": {
"version": "0.1",
"termsofService":
"http://www.wunderground.com/weather/api/d/terms.html",
"features": {
"conditions": 1,
"alerts": 1,
"forecast": 1,
}
}, "current_observation": {
"image": {
"url":
'http://icons.wxug.com/graphics/wu2/logo_130x80.png',
"title": "Weather Underground",
"link": "http://www.wunderground.com"
},
"feelslike_c": FEELS_LIKE,
"weather": WEATHER,
"icon_url": 'http://icons.wxug.com/i/c/k/clear.gif',
"display_location": {
"city": "Holly Springs",
"country": "US",
"full": "Holly Springs, NC"
},
"observation_location": {
"elevation": "413 ft",
"full": "Twin Lake, Holly Springs, North Carolina"
},
}, "alerts": [
{
"type": 'FLO',
"description": "Areal Flood Warning",
"date": "9:36 PM CDT on September 22, 2016",
"expires": "10:00 AM CDT on September 23, 2016",
"message": ALERT_MESSAGE,
},
], "forecast": {
"txt_forecast": {
"date": "22:35 CEST",
"forecastday": [
{
"period": 0,
"icon_url":
"http://icons.wxug.com/i/c/k/clear.gif",
"title": "Tuesday",
"fcttext": FORECAST_TEXT,
"fcttext_metric": FORECAST_TEXT,
"pop": "0"
},
],
}, "simpleforecast": {
"forecastday": [
{
"date": {
"pretty": "19:00 CEST 4. Duben 2017",
},
"period": 1,
"high": {
"fahrenheit": "56",
"celsius": "13",
},
"low": {
"fahrenheit": "43",
"celsius": "6",
},
"conditions": "Možnost deště",
"icon_url":
"http://icons.wxug.com/i/c/k/chancerain.gif",
"qpf_allday": {
"in": PRECIP_IN,
"mm": 1,
},
"maxwind": {
"mph": 0,
"kph": 0,
"dir": "",
"degrees": 0,
},
"avewind": {
"mph": 0,
"kph": 0,
"dir": "severní",
"degrees": 0
}
},
],
},
},
}, 200)
else:
return MockResponse({
"response": {
"version": "0.1",
"termsofService":
"http://www.wunderground.com/weather/api/d/terms.html",
"features": {},
"error": {
"type": "keynotfound",
"description": "this key does not exist"
}
}
}, 200)
class TestWundergroundSetup(unittest.TestCase):
"""Test the WUnderground platform."""
# pylint: disable=invalid-name
DEVICES = []
def add_devices(self, devices):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.DEVICES = []
self.hass = get_test_home_assistant()
self.key = 'foo'
self.config = VALID_CONFIG_PWS
self.lat = 37.8267
self.lon = -122.423
self.hass.config.latitude = self.lat
self.hass.config.longitude = self.lon
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_setup(self, req_mock):
"""Test that the component is loaded if passed in PWS Id."""
self.assertTrue(
wunderground.setup_platform(self.hass, VALID_CONFIG_PWS,
self.add_devices, None))
self.assertTrue(
wunderground.setup_platform(self.hass, VALID_CONFIG,
self.add_devices, None))
self.assertTrue(
wunderground.setup_platform(self.hass, INVALID_CONFIG,
self.add_devices, None))
@unittest.mock.patch('requests.get', side_effect=mocked_requests_get)
def test_sensor(self, req_mock):
"""Test the WUnderground sensor class and methods."""
wunderground.setup_platform(self.hass, VALID_CONFIG, self.add_devices,
None)
for device in self.DEVICES:
device.update()
self.assertTrue(str(device.name).startswith('PWS_'))
if device.name == 'PWS_weather':
self.assertEqual(HTTPS_ICON_URL, device.entity_picture)
self.assertEqual(WEATHER, device.state)
self.assertIsNone(device.unit_of_measurement)
elif device.name == 'PWS_alerts':
self.assertEqual(1, device.state)
self.assertEqual(ALERT_MESSAGE,
device.device_state_attributes['Message'])
self.assertIsNone(device.entity_picture)
elif device.name == 'PWS_location':
self.assertEqual('Holly Springs, NC', device.state)
elif device.name == 'PWS_elevation':
self.assertEqual('413', device.state)
elif device.name == 'PWS_feelslike_c':
self.assertIsNone(device.entity_picture)
self.assertEqual(FEELS_LIKE, device.state)
self.assertEqual(TEMP_CELSIUS, device.unit_of_measurement)
elif device.name == 'PWS_weather_1d_metric':
self.assertEqual(FORECAST_TEXT, device.state)
else:
self.assertEqual(device.name, 'PWS_precip_1d_in')
self.assertEqual(PRECIP_IN, device.state)
self.assertEqual(LENGTH_INCHES, device.unit_of_measurement)
|
the-stack_0_4983 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
from six.moves.urllib import request
import glob
import os
import platform # Mac or Linux special for uncompress command
import errno
import sys
import numpy as np
import codecs
import re
import subprocess
import sys
import tarfile
import matplotlib.pyplot as plt
if sys.version_info[0] == 2:
import cPickle as pickle
else:
import pickle
class xerion(object):
"""Managing xerion datafiles.
Read datafiles ending with '-nsyl.ex' and '-syl.ex' from `xerion_prefix/datadir`, and
SAve them to `pkl_dir` as pickle files.
Usage:
```python
print(xerion().Orthography) # for input data format
print(xerion().Phonology) # for output data format
X = xerion().input
y = xerion().output
```
The original datafiles can be obtained from http://www.cnbc.cmu.edu/~plaut/xerion/
"""
def __init__(self,
data='SM-nsyl',
datadir='./data/',
pkl_dir='./data/',
remake=False, readall=False, saveall=False,
forceDownload=False):
self.module_path = os.path.dirname(__file__)
self.xerion_prefix = 'nets/share/'
self.datadir = datadir # + self.xerion_prefix
self.pkl_dir = pkl_dir
self.url_base = 'http://www.cnbc.cmu.edu/~plaut/xerion/'
self.url_file = 'xerion-3.1-nets-share.tar.gz'
self.origfile_size = 1026691
self.syl_files = ['SM-syl.ex', 'besnerNW-syl.ex', 'bodies-syl.ex', 'bodiesNW-syl.ex',
'friedmanNW-syl.ex', 'glushkoNW-syl.ex', 'graphemes-syl.ex',
'jared1-syl.ex', 'jared2-syl.ex', 'megaNW-syl.ex',
'pureNW-syl.ex', 'surface-syl.ex', 'taraban-syl.ex',
'tarabanALL-syl.ex', 'tarabanEvN-syl.ex', 'tarabanNRE-syl.ex',
'vcoltheartNW-syl.ex']
self.nsyl_files = ['SM-nsyl.ex', 'besnerNW-nsyl.ex', 'glushkoNW-nsyl.ex',
'graphemes-nsyl.ex', 'jared1-nsyl.ex', 'jared2-nsyl.ex',
'markPH-nsyl.ex', 'megaNW-nsyl.ex', 'surface-nsyl.ex',
'taraban-nsyl.ex', 'tarabanALL-nsyl.ex', 'tarabanEvN-nsyl.ex',
'tarabanNRE-nsyl.ex']
self.datafilenames = [ *self.nsyl_files, *self.syl_files]
self._tags = ('#', 'seq', 'grapheme', 'phoneme', 'freq', 'tag', 'input', 'output')
self.Orthography={'onset':['Y', 'S', 'P', 'T', 'K', 'Q', 'C', 'B', 'D', 'G',
'F', 'V', 'J', 'Z', 'L', 'M', 'N', 'R', 'W', 'H',
'CH', 'GH', 'GN', 'PH', 'PS', 'RH', 'SH', 'TH', 'TS', 'WH'],
'vowel':['E', 'I', 'O', 'U', 'A', 'Y', 'AI', 'AU', 'AW', 'AY',
'EA', 'EE', 'EI', 'EU', 'EW', 'EY', 'IE', 'OA', 'OE', 'OI',
'OO', 'OU', 'OW', 'OY', 'UE', 'UI', 'UY'],
'coda':['H', 'R', 'L', 'M', 'N', 'B', 'D', 'G', 'C', 'X',
'F', 'V', '∫', 'S', 'Z', 'P', 'T', 'K', 'Q', 'BB',
'CH', 'CK', 'DD', 'DG', 'FF', 'GG', 'GH', 'GN', 'KS', 'LL',
'NG', 'NN', 'PH', 'PP', 'PS', 'RR', 'SH', 'SL', 'SS', 'TCH',
'TH', 'TS', 'TT', 'ZZ', 'U', 'E', 'ES', 'ED']}
self.Phonology={'onset':['s', 'S', 'C', 'z', 'Z', 'j', 'f', 'v', 'T', 'D',
'p', 'b', 't', 'd', 'k', 'g', 'm', 'n', 'h', 'I',
'r', 'w', 'y'],
'vowel': ['a', 'e', 'i', 'o', 'u', '@', '^', 'A', 'E', 'I',
'O', 'U', 'W', 'Y'],
'coda':['r', 'I', 'm', 'n', 'N', 'b', 'g', 'd', 'ps', 'ks',
'ts', 's', 'z', 'f', 'v', 'p', 'k', 't', 'S', 'Z',
'T', 'D', 'C', 'j']}
self.bibtex=[
'@article{SM89,',
'title={A Distributed, Developmental Model of Word Recognition and Naming},',
'auhor={Mark S. Seidenberg and James L. McClelland},',
'year={1989},',
'journal={psychological review},',
'volume={96},',
'number={4},',
'pages={523-568}}'
'}',
'@article{PMSP96,',
'title={Understanding Normal and Impaired Word Reading:',
' Computational Principles in Quasi-Regular Domains},',
'author={David C. Plaut and James L. McClelland and Mark S. Seidenberg and Karalyn Patterson},',
'year={1996},',
'volume={103},',
'number={1},',
'pages={56-115},',
'journal={psychological review}',
'}']
self.dbs = {}
if remake:
self.dbs = self.make_all()
saveall = True
if saveall == True:
self.save_all()
readall = True
if readall:
self.dbs = self.read_all()
self.dataname = data
pkl_file = self.pkl_dir + self.dataname + '.pkl'
self.db = self.load_pickle(filename=pkl_file)
self.input = self.db[self._tags.index('input')]
self.output = self.db[self._tags.index('output')]
self.freq = self.db[self._tags.index('freq')]
self.graph = self.db[self._tags.index('grapheme')]
self.phone = self.db[self._tags.index('phoneme')]
self.tag = self.db[self._tags.index('tag')]
self.dbs[self.dataname] = self.db
#def read_a_xerion_file(filename='SM-nsyl.pkl'):
# pass
def read_all(self):
"""reading data files named ening with '-nsyl.ex'."""
dbs = {}
for dname in self.datafilenames:
dname_ = re.sub('.ex', '', dname)
filename = self.pkl_dir + dname_ + '.pkl'
if not os.path.isfile(filename):
raise ValueError('{0} could not found'.format(filename))
dbs[dname_] = self.load_pickle(filename=filename)
return dbs
def save_all(self):
"""saving data files to be pickled."""
dirname = self.pkl_dir
if not os.path.exists(self.pkl_dir):
os.makedirs(self.pkl_dir)
if not os.path.exists(self.pkl_dir):
raise OSError('{} was not found'.format(self.pkl_dir))
for db in self.dbs:
dest_filename = self.pkl_dir + re.sub('.ex', '.pkl', db)
try:
with codecs.open(dest_filename, 'wb') as f:
pickle.dump(self.dbs[db], f)
except:
print('Error in processing {0}'.format(dest_filename))
def load_pickle(self, filename='SM-nsyl.pk'):
if not os.path.isfile(filename):
raise ValueError('Could not find {}'.format(filename))
with open(filename, 'rb') as f:
db = pickle.load(f)
return db
def make_all(self):
dbs = {}
for dname in self.datafilenames:
filename = self.datadir + self.xerion_prefix + dname
if not os.path.isfile(filename):
print('{0} could not found'.format(filename))
downfilename, h = self.download()
#print('downloaded file: {0}, {1}'.format(downfilename, h))
self.extractall()
inp, out, graph, phone, freq, tags = self.read_xerion(filename=filename)
dbs[dname] = [dname, '#', graph, phone, freq, tags, inp, out]
return dbs
def read_xerion(self, filename='../data/nets/share/SM-nsyl.ex'):
with codecs.open(filename,'r') as f:
lines = f.readlines()
inp_flag = False
inpbuff, outbuff, tags = {}, {}, {}
graph, phone, freq = {}, {}, {}
for i, line in enumerate(lines[1:]):
if len(line) == 0:
continue
a = line.strip().split(' ')
if line[0] == '#':
if a[0] == '#WARNING:':
continue
try:
seq = int(a[self._tags.index('seq')])
except:
continue
_graph = a[self._tags.index('grapheme')]
_phone = a[self._tags.index('phoneme')]
_freq = a[self._tags.index('freq')]
_tag = a[self._tags.index('tag')]
inp_flag = True
if not seq in inpbuff:
inpbuff[seq] = list()
outbuff[seq] = list()
graph[seq] = _graph
phone[seq] = _phone
freq[seq] = _freq
tags[seq] = _tag
continue
elif line[0] == ',':
inp_flag = False
continue
elif line[0] == ';':
inp_flag = True
continue
if inp_flag:
#print('hoge seq=', seq)
for x in a:
try:
inpbuff[seq].append(int(x))
except:
pass #print(x, end=', ')
else:
for x in a:
try:
outbuff[seq].append(int(x))
except:
pass
continue
ret_in = np.array([inpbuff[seq] for seq in inpbuff], dtype=np.int16)
ret_out = np.array([outbuff[seq] for seq in outbuff], dtype=np.int16)
ret_graph = np.array([graph[seq] for seq in graph], dtype=np.unicode_)
ret_phone = np.array([phone[seq] for seq in phone], dtype=np.unicode_)
ret_freq = np.array([freq[seq] for seq in freq], dtype=np.float32)
ret_tag = np.array([tags[seq] for seq in tags], dtype=np.unicode_)
return ret_in, ret_out, ret_graph, ret_phone, ret_freq, ret_tag
@staticmethod
def download(forcedownload=False, destdir=None):
if destdir is None:
destdir = self.datadir
if not os.path.exists(destdir):
os.mkdir(destdir)
dest_filename = destdir + self.url_file
if os.path.exists(dest_filename):
statinfo = os.stat(dest_filename)
if statinfo.st_size != self.origfile_size:
forceDownload = True
print("File {} not expected size, forcing download".format(dest_filename))
else:
print("File '{}' allready downloaded.".format(dest_filename))
if forcedownload == True or not os.path.exists(dest_filename):
print('Attempting to download: {}'.format(dest_filename))
print('From {}'.format(self.url_base + self.url_file))
fname, h = request.urlretrieve(self.url_base+self.url_file, dest_filename)
print("Downloaded '{}' successfully".format(dest_filename))
return fname, h
else:
return dest_filename, None
@staticmethod
def extractall(gzfile=None):
if gzfile is None:
gzfile, _ = self.download()
with tarfile.open(name=gzfile, mode='r:gz') as tar:
tar.extractall(path=self.datadir)
if platform.system() == 'Darwin':
cmd = '/usr/bin/uncompress'
args = self.datadir + self.xerion_prefix + '*.ex.Z'
files = glob.glob(args)
for file in sorted(files):
print(cmd, file)
try:
subprocess.Popen([cmd, file])
except:
print('cmd {0} {1} failed'.format(cmd, file))
sys.exit()
print('#extractall() completed. command:{}'.format(cmd))
else:
print('You must on Linux or Windows, Please uncompress manually')
sys.exit()
self.pkl_dir = self.datadir + self.xerion_prefix
def note(self):
print('\n\n# xerion() is the data management tool for PMSP96')
print('# The original data will be found at:',
self.url_base + self.url_file)
print('# The data format is as following:')
for l in [self.Orthography, self.Phonology]:
for x in l:
print(x, l[x])
print('\n# The bibtex format of the original papers:')
for l in self.bibtex:
print(l)
@staticmethod
def usage():
print('```python')
print('import numpy')
print('import wbai_aphasia as handson')
print()
print('from sklearn.neural_network import MLPRegressor')
print()
print('data = handson.xerion()')
print('X = np.asarray(data.input, dtype=np.float32)')
print('y = np.asarray(data.output, dtype=np.float32)')
print()
print('model = MLPRegressor()')
print('model.fit(X,y)')
print('model.score(X,y)')
print('```')
def descr(self):
fdescr_name = os.path.join(self.module_path, 'descr', 'xerion.md')
print('self.module_path={}'.format(self.module_path))
print('fdescr_name={}'.format(fdescr_name))
with codecs.open(fdescr_name, 'r') as markdownfile:
fdescr = markdownfile.read()
print(fdescr)
|
the-stack_0_4986 | # -*- coding: UTF-8 -*-
# Copyright (c) 2018, Xycart
# License: MIT License
from __future__ import unicode_literals
import sys, os # standard modules
from dxfGenerator import dxfGenerator
import ConvertPingYin
SCRIPTS_DIR = os.path.dirname(os.path.abspath(__file__))
ATD_DIR = os.path.dirname(SCRIPTS_DIR)
CADINFOS_DIR = ATD_DIR + '/CadInfos/Allegro17.0'
LIBRARYS_DIR = ATD_DIR + '/Library/Allegro'
BATCH_LATEST_CMD = \
"""call @WORKDIR@/CustomVectorTextMechanicalSymbol.bat
"""
def SaveFile(string, fname):
with open(fname, "w") as textFile:
textFile.write(string)
def CreateFile(string, fname, overwrite=True):
if overwrite:
SaveFile(string, fname)
else:
if not os.path.exists(fname):
SaveFile(string, fname)
def scrGenerator(dxffile, symbolname):
scr_srcdir = CADINFOS_DIR + '/CustomVectorTextMechanicalSymbol.scr'
scr_dstdir = LIBRARYS_DIR + '/atd-' + symbolname + '/CustomVectorTextMechanicalSymbol.scr'
origfn = scr_srcdir
targfn = scr_dstdir
with open(origfn, 'r') as ifile, open(targfn, 'w') as ofile:
content = ifile.read().replace('@SYMBOL_NAME@', symbolname[:32]).replace('@IMPORT_DXFFILE@', dxffile)
ofile.write(content)
def batGenerator(scrname, symbolname):
scr_srcdir = CADINFOS_DIR + '/CustomVectorTextMechanicalSymbol.bat'
scr_dstdir = LIBRARYS_DIR + '/atd-' + symbolname + '/CustomVectorTextMechanicalSymbol.bat'
origfn = scr_srcdir
targfn = scr_dstdir
with open(origfn, 'r') as ifile, open(targfn, 'w') as ofile:
content = ifile.read().replace('@SYMBOL_NAME@', symbolname).replace('@SCR_NAME@', scrname)
ofile.write(content)
def draGenerator(dxffile, symbolname):
scrGenerator(dxffile, symbolname)
batGenerator('CustomVectorTextMechanicalSymbol', symbolname)
workdir = LIBRARYS_DIR + '/atd-' + symbolname
CreateFile(BATCH_LATEST_CMD.replace('@WORKDIR@', workdir),
LIBRARYS_DIR + '/RunLatestGenerator.bat',
overwrite=True)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
# Create Arguments
parser.add_argument('--text', '-t', dest='text',
help='The vector text you want')
args = parser.parse_args()
if args.text is None:
text = "博大精深"
else:
text = args.text.decode('gb2312')
#print sys.getdefaultencoding()
#print text
textpy = ConvertPingYin.CConvert().convert(text) # .replace('-','')
symbolname = textpy
dxf_dstdir = LIBRARYS_DIR + '/atd-' + symbolname
dxffn = dxf_dstdir + "/atd-" + symbolname + ".dxf"
expdxffn = dxffn.split('.')[0] + ".exp.dxf"
dxffile = expdxffn
dxferror = dxfGenerator(text)
if dxferror:
print ("#### Error on dxfGenerator(%s)" % text)
draGenerator(dxffile, symbolname)
|
the-stack_0_4989 | from pathlib import Path
from typing import List, Optional
from ipywidgets import HBox, SelectMultiple
from .core import JSONType
from .mixins import TextTrainerMixin, sample_from_iterable
from .widgets import Solver, GPUIndex, Engine
alpha = "abcdefghijklmnopqrstuvwxyz0123456789,;.!?:’\“/\_@#$%^&*~`+-=<>()[]{}"
class Text(TextTrainerMixin):
def __init__(
self,
sname: str,
*,
mllib: str = "caffe",
engine: Engine = "CUDNN_SINGLE_HANDLE",
training_repo: Path,
testing_repo: Optional[Path] = None,
description: str = "Text service",
model_repo: Path = None,
host: str = "localhost",
port: int = 1234,
path: str = "",
gpuid: GPUIndex = 0,
# -- specific
regression: bool = False,
db: bool = True,
nclasses: int = -1,
ignore_label: Optional[int] = -1,
layers: List[str] = [],
dropout: float = .2,
iterations: int = 25000,
test_interval: int = 1000,
snapshot_interval: int = 1000,
base_lr: float = 0.001,
lr_policy: str = "fixed",
stepvalue: List[int] = [],
warmup_lr: float = 0.0001,
warmup_iter: int = 0,
resume: bool = False,
solver_type: Solver = "SGD",
sam : bool = False,
swa : bool = False,
lookahead : bool = False,
lookahead_steps : int = 6,
lookahead_alpha : float = 0.5,
rectified : bool = False,
decoupled_wd_periods : int = 4,
decoupled_wd_mult : float = 2.0,
lr_dropout : float = 1.0,
batch_size: int = 128,
test_batch_size: int = 32,
shuffle: bool = True,
tsplit: float = 0.2,
min_count: int = 10,
min_word_length: int = 5,
count: bool = False,
tfidf: bool = False,
sentences: bool = False,
characters: bool = False,
sequence: int = -1,
read_forward: bool = True,
alphabet: str = alpha,
sparse: bool = False,
template: Optional[str] = None,
activation: str = "relu",
embedding: bool = False,
objective: str = '',
class_weights: List[float] = [],
scale_pos_weight: float = 1.0,
autoencoder: bool = False,
lregression: bool = False,
finetune: bool = False,
weights: str = "",
iter_size: int = 1,
target_repository: str = "",
##-- new txt input conns stuff for bert and gpt2
ordered_words: bool = True,
wordpiece_tokens: bool = True,
punctuation_tokens: bool = True,
lower_case: bool =False,
word_start: str = "Ġ",
suffix_start: str = "",
##--end bert, gpt2 new stuff
embedding_size: int = 768,
freeze_traced: bool = False,
**kwargs
) -> None:
super().__init__(sname, locals())
self.train_labels = SelectMultiple(
options=[], value=[], description="Training labels", disabled=False
)
self.test_labels = SelectMultiple(
options=[], value=[], description="Testing labels", disabled=False
)
# self.testing_repo.observe(self.update_label_list, names="value")
self.training_repo.observe( # type: ignore
self.update_label_list, names="value"
)
self.train_labels.observe(self.update_train_file_list, names="value")
self.test_labels.observe(self.update_test_file_list, names="value")
self.file_list.observe(self.display_text, names="value")
self.update_label_list(())
self._img_explorer.children = [
HBox([HBox([self.train_labels, self.test_labels])]),
self.file_list,
self.output,
]
if self.characters: # type: ignore
self.db.value = True # type: ignore
if self.mllib.value == "torch":
self.db.value = False
def display_text(self, args):
self.output.clear_output()
with self.output:
for path in args["new"]:
with open(path, "r", encoding="utf-8", errors="ignore") as fh:
for i, x in enumerate(fh.readlines()):
if i == 20:
break
print(x.strip())
def update_train_file_list(self, *args):
with self.output:
if len(self.train_labels.value) == 0:
return
directory = (
Path(self.training_repo.value) / self.train_labels.value[0]
)
self.file_list.options = [
fh.as_posix()
for fh in sample_from_iterable(directory.glob("**/*"), 10)
]
self.test_labels.value = []
def update_test_file_list(self, *args):
with self.output:
if len(self.test_labels.value) == 0:
return
directory = (
Path(self.testing_repo.value) / self.test_labels.value[0]
)
self.file_list.options = [
fh.as_posix()
for fh in sample_from_iterable(directory.glob("**/*"), 10)
]
self.train_labels.value = []
def _create_parameters_input(self) -> JSONType:
return {
"connector": "txt",
"characters": self.characters.value,
"sequence": self.sequence.value,
"read_forward": self.read_forward.value,
"alphabet": self.alphabet.value,
"sparse": self.sparse.value,
"embedding": self.embedding.value,
"ordered_words": self.ordered_words.value,
"wordpiece_tokens": self.wordpiece_tokens.value,
"punctuation_tokens": self.punctuation_tokens.value,
"lower_case": self.lower_case.value,
"word_start": self.word_start.value,
"suffix_start": self.suffix_start.value,
}
def _create_parameters_mllib(self) -> JSONType:
dic = super()._create_parameters_mllib()
dic["embedding_size"] = self.embedding_size.value
dic["freeze_traced"] = self.freeze_traced.value
return dic
def _train_parameters_input(self) -> JSONType:
return {
"alphabet": self.alphabet.value,
"characters": self.characters.value,
"count": self.count.value,
"db": self.db.value,
"embedding": self.embedding.value,
"min_count": self.min_count.value,
"min_word_length": self.min_word_length.value,
"read_forward": self.read_forward.value,
"sentences": self.sentences.value,
"sequence": self.sequence.value,
"shuffle": self.shuffle.value,
"test_split": self.tsplit.value,
"tfidf": self.tfidf.value,
}
|
the-stack_0_4990 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from unittest import mock
import numpy as np
from ax.core.metric import Metric
from ax.core.objective import MultiObjective, Objective, ScalarizedObjective
from ax.core.observation import Observation, ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ComparisonOp, OutcomeConstraint
from ax.core.parameter import ChoiceParameter, ParameterType, RangeParameter
from ax.core.parameter_constraint import OrderConstraint, SumConstraint
from ax.core.search_space import SearchSpace
from ax.modelbridge.modelbridge_utils import get_bounds_and_task
from ax.modelbridge.numpy import NumpyModelBridge
from ax.models.numpy_base import NumpyModel
from ax.utils.common.testutils import TestCase
class NumpyModelBridgeTest(TestCase):
def setUp(self):
x = RangeParameter("x", ParameterType.FLOAT, lower=0, upper=1)
y = RangeParameter(
"y", ParameterType.FLOAT, lower=1, upper=2, is_fidelity=True, target_value=2
)
z = RangeParameter("z", ParameterType.FLOAT, lower=0, upper=5)
self.parameters = [x, y, z]
parameter_constraints = [
OrderConstraint(x, y),
SumConstraint([x, z], False, 3.5),
]
self.search_space = SearchSpace(self.parameters, parameter_constraints)
self.observation_features = [
ObservationFeatures(parameters={"x": 0.2, "y": 1.2, "z": 3}),
ObservationFeatures(parameters={"x": 0.4, "y": 1.4, "z": 3}),
ObservationFeatures(parameters={"x": 0.6, "y": 1.6, "z": 3}),
]
self.observation_data = [
ObservationData(
metric_names=["a", "b"],
means=np.array([1.0, -1.0]),
covariance=np.array([[1.0, 4.0], [4.0, 6.0]]),
),
ObservationData(
metric_names=["a", "b"],
means=np.array([2.0, -2.0]),
covariance=np.array([[2.0, 5.0], [5.0, 7.0]]),
),
ObservationData(
metric_names=["a"], means=np.array([3.0]), covariance=np.array([[3.0]])
),
]
self.observations = [
Observation(
features=self.observation_features[i],
data=self.observation_data[i],
arm_name=str(i),
)
for i in range(3)
]
self.pending_observations = {
"b": [ObservationFeatures(parameters={"x": 0.6, "y": 1.6, "z": 3})]
}
self.model_gen_options = {"option": "yes"}
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testFitAndUpdate(self, mock_init):
sq_feat = ObservationFeatures({})
sq_data = self.observation_data[2]
sq_obs = Observation(
features=ObservationFeatures({}),
data=self.observation_data[2],
arm_name="status_quo",
)
ma = NumpyModelBridge()
ma._training_data = self.observations + [sq_obs]
model = mock.create_autospec(NumpyModel, instance=True)
# No out of design points allowed in direct calls to fit.
with self.assertRaises(ValueError):
ma._fit(
model,
self.search_space,
self.observation_features + [sq_feat],
self.observation_data + [sq_data],
)
ma._fit(
model, self.search_space, self.observation_features, self.observation_data
)
self.assertEqual(ma.parameters, ["x", "y", "z"])
self.assertEqual(sorted(ma.outcomes), ["a", "b"])
Xs = {
"a": np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3.0]]),
"b": np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0]]),
}
Ys = {"a": np.array([[1.0], [2.0], [3.0]]), "b": np.array([[-1.0], [-2.0]])}
Yvars = {"a": np.array([[1.0], [2.0], [3.0]]), "b": np.array([[6.0], [7.0]])}
bounds = [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)]
model_fit_args = model.fit.mock_calls[0][2]
for i, x in enumerate(model_fit_args["Xs"]):
self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
for i, y in enumerate(model_fit_args["Ys"]):
self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
for i, v in enumerate(model_fit_args["Yvars"]):
self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
self.assertEqual(model_fit_args["bounds"], bounds)
self.assertEqual(model_fit_args["feature_names"], ["x", "y", "z"])
# And update
ma._update(
observation_features=self.observation_features,
observation_data=self.observation_data,
)
# Calling _update requires passing ALL data.
model_update_args = model.update.mock_calls[0][2]
for i, x in enumerate(model_update_args["Xs"]):
self.assertTrue(np.array_equal(x, Xs[ma.outcomes[i]]))
for i, y in enumerate(model_update_args["Ys"]):
self.assertTrue(np.array_equal(y, Ys[ma.outcomes[i]]))
for i, v in enumerate(model_update_args["Yvars"]):
self.assertTrue(np.array_equal(v, Yvars[ma.outcomes[i]]))
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_predict",
return_value=(
np.array([[1.0, -1], [2.0, -2]]),
np.stack(
(np.array([[1.0, 4.0], [4.0, 6]]), np.array([[2.0, 5.0], [5.0, 7]]))
),
),
autospec=True,
)
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testPredict(self, mock_init, mock_predict):
ma = NumpyModelBridge()
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
observation_data = ma._predict(self.observation_features)
X = np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3]])
self.assertTrue(np.array_equal(mock_predict.mock_calls[0][2]["X"], X))
for i, od in enumerate(observation_data):
self.assertEqual(od, self.observation_data[i])
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_gen",
autospec=True,
return_value=(
np.array([[1.0, 2.0, 3.0], [3.0, 4.0, 3.0]]),
np.array([1.0, 2.0]),
{},
[],
),
)
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_best_point",
autospec=True,
return_value=None,
)
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testGen(self, mock_init, mock_best_point, mock_gen):
# Test with constraints
optimization_config = OptimizationConfig(
objective=Objective(Metric("a"), minimize=True),
outcome_constraints=[
OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
],
)
ma = NumpyModelBridge()
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
ma.transforms = OrderedDict()
observation_features, weights, best_obsf, _ = ma._gen(
n=3,
search_space=self.search_space,
optimization_config=optimization_config,
pending_observations=self.pending_observations,
fixed_features=ObservationFeatures({"z": 3.0}),
model_gen_options=self.model_gen_options,
)
gen_args = mock_gen.mock_calls[0][2]
self.assertEqual(gen_args["n"], 3)
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertTrue(
np.array_equal(gen_args["objective_weights"], np.array([-1.0, 0.0]))
)
self.assertTrue(
np.array_equal(gen_args["outcome_constraints"][0], np.array([[0.0, -1.0]]))
)
self.assertTrue(
np.array_equal(gen_args["outcome_constraints"][1], np.array([[-2]]))
)
self.assertTrue(
np.array_equal(
gen_args["linear_constraints"][0],
np.array([[1.0, -1, 0.0], [-1.0, 0.0, -1.0]]),
)
)
self.assertTrue(
np.array_equal(gen_args["linear_constraints"][1], np.array([[0.0], [-3.5]]))
)
self.assertEqual(gen_args["fixed_features"], {2: 3.0})
self.assertTrue(
np.array_equal(gen_args["pending_observations"][0], np.array([]))
)
self.assertTrue(
np.array_equal(
gen_args["pending_observations"][1], np.array([[0.6, 1.6, 3.0]])
)
)
self.assertEqual(gen_args["model_gen_options"], {"option": "yes"})
self.assertEqual(
observation_features[0].parameters, {"x": 1.0, "y": 2.0, "z": 3.0}
)
self.assertEqual(
observation_features[1].parameters, {"x": 3.0, "y": 4.0, "z": 3.0}
)
self.assertTrue(np.array_equal(weights, np.array([1.0, 2.0])))
# Test with multiple objectives.
oc2 = OptimizationConfig(
objective=ScalarizedObjective(
metrics=[Metric(name="a"), Metric(name="b")], minimize=True
)
)
observation_features, weights, best_obsf, _ = ma._gen(
n=3,
search_space=self.search_space,
optimization_config=oc2,
pending_observations=self.pending_observations,
fixed_features=ObservationFeatures({"z": 3.0}),
model_gen_options=self.model_gen_options,
)
gen_args = mock_gen.mock_calls[1][2]
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertIsNone(gen_args["outcome_constraints"])
self.assertTrue(
np.array_equal(gen_args["objective_weights"], np.array([-1.0, -1.0]))
)
# Test with MultiObjective (unweighted multiple objectives)
oc3 = OptimizationConfig(
objective=MultiObjective(
metrics=[Metric(name="a"), Metric(name="b", lower_is_better=True)],
minimize=True,
)
)
search_space = SearchSpace(self.parameters) # Unconstrained
observation_features, weights, best_obsf, _ = ma._gen(
n=3,
search_space=search_space,
optimization_config=oc3,
pending_observations=self.pending_observations,
fixed_features=ObservationFeatures({"z": 3.0}),
model_gen_options=self.model_gen_options,
)
gen_args = mock_gen.mock_calls[2][2]
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertIsNone(gen_args["outcome_constraints"])
self.assertTrue(
np.array_equal(gen_args["objective_weights"], np.array([1.0, -1.0]))
)
# Test with no constraints, no fixed feature, no pending observations
search_space = SearchSpace(self.parameters[:2])
optimization_config.outcome_constraints = []
ma.parameters = ["x", "y"]
ma._gen(3, search_space, {}, ObservationFeatures({}), None, optimization_config)
gen_args = mock_gen.mock_calls[3][2]
self.assertEqual(gen_args["bounds"], [(0.0, 1.0), (1.0, 2.0)])
self.assertIsNone(gen_args["outcome_constraints"])
self.assertIsNone(gen_args["linear_constraints"])
self.assertIsNone(gen_args["fixed_features"])
self.assertIsNone(gen_args["pending_observations"])
# Test validation
optimization_config = OptimizationConfig(
objective=Objective(Metric("a"), minimize=False),
outcome_constraints=[
OutcomeConstraint(Metric("b"), ComparisonOp.GEQ, 2, False)
],
)
with self.assertRaises(ValueError):
ma._gen(
n=3,
search_space=self.search_space,
optimization_config=optimization_config,
pending_observations={},
fixed_features=ObservationFeatures({}),
)
optimization_config.objective.minimize = True
optimization_config.outcome_constraints[0].relative = True
with self.assertRaises(ValueError):
ma._gen(
n=3,
search_space=self.search_space,
optimization_config=optimization_config,
pending_observations={},
fixed_features=ObservationFeatures({}),
)
@mock.patch(
"ax.modelbridge.numpy.NumpyModelBridge._model_cross_validate",
return_value=(
np.array([[1.0, -1], [2.0, -2]]),
np.stack(
(np.array([[1.0, 4.0], [4.0, 6]]), np.array([[2.0, 5.0], [5.0, 7]]))
),
),
autospec=True,
)
@mock.patch("ax.modelbridge.numpy.NumpyModelBridge.__init__", return_value=None)
def testCrossValidate(self, mock_init, mock_cv):
ma = NumpyModelBridge()
ma.parameters = ["x", "y", "z"]
ma.outcomes = ["a", "b"]
observation_data = ma._cross_validate(
self.observation_features, self.observation_data, self.observation_features
)
Xs = [
np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3]]),
np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0]]),
]
Ys = [np.array([[1.0], [2.0], [3.0]]), np.array([[-1.0], [-2.0]])]
Yvars = [np.array([[1.0], [2.0], [3.0]]), np.array([[6.0], [7.0]])]
Xtest = np.array([[0.2, 1.2, 3.0], [0.4, 1.4, 3.0], [0.6, 1.6, 3]])
# Transform to arrays:
model_cv_args = mock_cv.mock_calls[0][2]
for i, x in enumerate(model_cv_args["Xs_train"]):
self.assertTrue(np.array_equal(x, Xs[i]))
for i, y in enumerate(model_cv_args["Ys_train"]):
self.assertTrue(np.array_equal(y, Ys[i]))
for i, v in enumerate(model_cv_args["Yvars_train"]):
self.assertTrue(np.array_equal(v, Yvars[i]))
self.assertTrue(np.array_equal(model_cv_args["X_test"], Xtest))
# Transform from arrays:
for i, od in enumerate(observation_data):
self.assertEqual(od, self.observation_data[i])
def testGetBoundsAndTask(self):
bounds, task_features, target_fidelities = get_bounds_and_task(
self.search_space, ["x", "y", "z"]
)
self.assertEqual(bounds, [(0.0, 1.0), (1.0, 2.0), (0.0, 5.0)])
self.assertEqual(task_features, [])
self.assertEqual(target_fidelities, {1: 2.0})
bounds, task_features, target_fidelities = get_bounds_and_task(
self.search_space, ["x", "z"]
)
self.assertEqual(target_fidelities, {})
# Test that Int param is treated as task feature
search_space = SearchSpace(self.parameters)
search_space._parameters["x"] = RangeParameter(
"x", ParameterType.INT, lower=1, upper=4
)
bounds, task_features, target_fidelities = get_bounds_and_task(
search_space, ["x", "y", "z"]
)
self.assertEqual(task_features, [0])
# Test validation
search_space._parameters["x"] = ChoiceParameter(
"x", ParameterType.FLOAT, [0.1, 0.4]
)
with self.assertRaises(ValueError):
get_bounds_and_task(search_space, ["x", "y", "z"])
search_space._parameters["x"] = RangeParameter(
"x", ParameterType.FLOAT, lower=1.0, upper=4.0, log_scale=True
)
with self.assertRaises(ValueError):
get_bounds_and_task(search_space, ["x", "y", "z"])
|
the-stack_0_4991 | import matplotlib.pyplot as plt
import numpy as np
with open("log1.txt",'r',encoding='utf-8') as f:
train_x = []
train_y = []
dev_x = []
dev_y = []
step = 0
log=f.readline()
while(log):
log = log.split()
if "Step" in log:
index = log.index("Step")
step = int(log[index + 1].split('/')[0])
if step>950:
acc = float(log[index + 3][:-1])
ppl = float(log[index + 5][:-1])
train_x.append(step)
train_y.append([acc,ppl])
if "perplexity:" in log:
dev_x.append(step)
ppl = float(log[-1])
log = f.readline().split()
acc = float(log[-1])
dev_y.append([acc,ppl])
log = f.readline()
y = 'acc'
if y == 'acc':
train_y = np.array(train_y)[:,0]
dev_y = np.array(dev_y)[:,0]
else:
train_y = np.array(train_y)[:,1]
dev_y = np.array(dev_y)[:,1]
y = 'ppl'
plt.plot(train_x, train_y, label = "train")
plt.plot(dev_x, dev_y, label = "test")
plt.xlabel("steps")
plt.ylabel(y)
plt.legend()
plt.show() |
the-stack_0_4992 | # Copyright (c) 2021 AllSeeingEyeTolledEweSew
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
"""BTN support for TVAF."""
import contextlib
import logging
import pathlib
import sqlite3
from typing import Any
from typing import AsyncIterator
from typing import Awaitable
from typing import Callable
from typing import cast
from typing import Dict
from typing import Iterator
from typing import Optional
from typing import Tuple
from btn_cache import metadata_db
from btn_cache import site as btn_site
from btn_cache import storage as btn_storage
import dbver
import libtorrent as lt
import requests
from tvaf import concurrency
from tvaf import config as config_lib
from tvaf import lifecycle
from tvaf import services
from tvaf import swarm as tvaf_swarm
from tvaf import torrent_info
from tvaf.swarm import ConfigureSwarm
_LOG = logging.getLogger(__name__)
@lifecycle.singleton()
def get_storage() -> btn_storage.Storage:
return btn_storage.Storage(pathlib.Path("btn"))
def get_auth_from(config: config_lib.Config) -> btn_site.UserAuth:
return btn_site.UserAuth(
user_id=config.get_int("btn_user_id"),
auth=config.get_str("btn_auth"),
authkey=config.get_str("btn_authkey"),
passkey=config.get_str("btn_passkey"),
api_key=config.get_str("btn_api_key"),
)
@lifecycle.asingleton()
@services.startup_plugin("50_btn")
async def get_auth() -> btn_site.UserAuth:
return get_auth_from(await services.get_config())
@lifecycle.asingleton()
async def get_requests_session() -> requests.Session:
session = requests.Session()
session.headers.update({"User-Agent": "tvaf-btn"})
return session
@lifecycle.asingleton()
async def get_access() -> btn_site.UserAccess:
return btn_site.UserAccess(
auth=await get_auth(), session=await get_requests_session()
)
@services.stage_config_plugin("50_btn")
@contextlib.asynccontextmanager
async def stage_config(config: config_lib.Config) -> AsyncIterator[None]:
get_auth_from(config)
yield
get_auth.cache_clear()
get_access.cache_clear()
METADATA_DB_VERSION_SUPPORTED = 1_000_000
def get_metadata_db_conn() -> sqlite3.Connection:
path = get_storage().metadata_db_path
path.parent.mkdir(exist_ok=True, parents=True)
return sqlite3.Connection(path, isolation_level=None)
metadata_db_pool = dbver.null_pool(get_metadata_db_conn)
@contextlib.contextmanager
def read_metadata_db() -> Iterator[Tuple[sqlite3.Connection, int]]:
with dbver.begin_pool(metadata_db_pool, dbver.LockMode.DEFERRED) as conn:
version = metadata_db.get_version(conn)
dbver.semver_check_breaking(version, METADATA_DB_VERSION_SUPPORTED)
yield (conn, version)
@contextlib.contextmanager
def write_metadata_db() -> Iterator[Tuple[sqlite3.Connection, int]]:
# TODO: should we set WAL? where?
with dbver.begin_pool(metadata_db_pool, dbver.LockMode.IMMEDIATE) as conn:
version = metadata_db.upgrade(conn)
dbver.semver_check_breaking(version, METADATA_DB_VERSION_SUPPORTED)
yield (conn, version)
async def get_fetcher(
torrent_entry_id: int,
) -> Optional[Callable[[], Awaitable[bytes]]]:
access = await get_access()
# TODO: should btn_cache do this validation?
if access._auth.passkey is None:
return None
async def fetch() -> bytes:
# TODO: change to aiohttp
resp = await concurrency.to_thread(access.get_torrent, torrent_entry_id)
resp.raise_for_status()
return await concurrency.to_thread(getattr, resp, "content")
return fetch
async def fetch_and_store(info_hashes: lt.info_hash_t) -> None:
torrent_entry_id = await concurrency.to_thread(get_torrent_entry_id, info_hashes)
fetch = await get_fetcher(torrent_entry_id)
if fetch is None:
return
bencoded = await fetch()
bdecoded = cast(Dict[bytes, Any], lt.bdecode(bencoded))
# TODO: top-level publish
await concurrency.to_thread(
receive_bdecoded_info, torrent_entry_id, bdecoded[b"info"]
)
def map_file_sync(info_hashes: lt.info_hash_t, file_index: int) -> Tuple[int, int]:
with read_metadata_db() as (conn, version):
if version == 0:
raise KeyError(info_hashes)
hexdigest = info_hashes.get_best().to_bytes().hex()
cur = conn.cursor().execute(
"select file_info.id from torrent_entry inner join file_info "
"on torrent_entry.id = file_info.id "
"where torrent_entry.info_hash = ?",
(hexdigest,),
)
if cur.fetchone() is None:
_LOG.debug("map_file: no cached file_info")
raise KeyError(info_hashes)
cur = conn.cursor().execute(
"select file_info.start, file_info.stop from torrent_entry "
"inner join file_info on torrent_entry.id = file_info.id "
"where torrent_entry.info_hash = ? and file_index = ?",
(hexdigest, file_index),
)
row = cur.fetchone()
if row is None:
_LOG.debug("map_file: not found")
raise IndexError()
return cast(Tuple[int, int], row)
@torrent_info.map_file_plugin("30_btn")
async def map_file(info_hashes: lt.info_hash_t, file_index: int) -> Tuple[int, int]:
return await concurrency.to_thread(map_file_sync, info_hashes, file_index)
@torrent_info.map_file_plugin("90_btn_fetch")
async def fetch_and_map_file(
info_hashes: lt.info_hash_t, file_index: int
) -> Tuple[int, int]:
await fetch_and_store(info_hashes)
return await map_file(info_hashes, file_index)
def get_torrent_entry_id(info_hashes: lt.info_hash_t) -> int:
digest = info_hashes.get_best().to_bytes()
with read_metadata_db() as (conn, version):
if version == 0:
_LOG.debug("get_torrent_entry_id: empty db")
raise KeyError(info_hashes)
cur = conn.cursor().execute(
"select id from torrent_entry where info_hash = ? and not deleted "
"order by id desc",
(digest.hex(),),
)
row = cur.fetchone()
if row is None:
_LOG.debug("get_torrent_entry_id: not found")
raise KeyError(info_hashes)
(torrent_entry_id,) = cast(Tuple[int], row)
return torrent_entry_id
@tvaf_swarm.access_swarm_plugin("btn")
async def access_swarm(info_hashes: lt.info_hash_t) -> ConfigureSwarm:
torrent_entry_id = await concurrency.to_thread(get_torrent_entry_id, info_hashes)
fetch = await get_fetcher(torrent_entry_id)
if fetch is None:
raise KeyError(info_hashes)
async def configure_swarm(atp: lt.add_torrent_params) -> None:
assert fetch is not None # helps mypy
bencoded = await fetch()
bdecoded = cast(Dict[bytes, Any], lt.bdecode(bencoded))
atp.ti = lt.torrent_info(bdecoded)
# TODO: top-level publish
await concurrency.to_thread(
receive_bdecoded_info, torrent_entry_id, bdecoded[b"info"]
)
return configure_swarm
def receive_bdecoded_info(torrent_entry_id: int, info: Dict[bytes, Any]) -> None:
# We expect the common case to fail to find any ids to update, so we don't
# bother preparing the update outside the lock
with write_metadata_db() as (conn, _):
cur = conn.cursor().execute(
"SELECT id FROM file_info WHERE id = ?", (torrent_entry_id,)
)
row = cur.fetchone()
if row is not None:
return
update = metadata_db.ParsedTorrentInfoUpdate(
info, torrent_entry_id=torrent_entry_id
)
update.apply(conn)
@torrent_info.is_private_plugin("50_btn")
async def is_private(info_hashes: lt.info_hash_t) -> bool:
await concurrency.to_thread(get_torrent_entry_id, info_hashes)
return True
|
the-stack_0_4993 | import sys
class Graph:
def __init__(self, v):
self.vertices_count = v
self.vertices = [i for i in range(v)]
self.adj_mat = [[0 for _ in range(v)] for _ in range(v)]
def connect_all(self):
self.adj_mat = []
for i in range(self.vertices_count):
raw_mat = []
for j in range(self.vertices_count):
raw_mat.append(0 if i == j else 1)
self.adj_mat.append(raw_mat)
def add_edge(self, u, v, weight=1, ordered=False):
#print("ADDING EDGE: (u: {}, v: {})".format(u, v))
self.adj_mat[u][v] = weight
if not ordered:
self.adj_mat[v][u] = weight
def print_graph(self):
for i in range(self.vertices_count):
for j in range(self.vertices_count):
print(self.adj_mat[i][j], end=' ')
print()
def breadth_first_search(self, src, dest):
visited = [False]*self.vertices_count
distance = [sys.maxsize]*self.vertices_count
previous_cell = [-1]*self.vertices_count
queue = []
visited[src] = True
distance[src] = 0
queue.append(src)
while not len(queue) == 0:
u = queue[0]
queue.remove(u)
for v in range(len(self.adj_mat[u])):
if not visited[v] and self.adj_mat[u][v] != 0:
visited[v] = True
distance[v] = distance[u] + 1
previous_cell[v] = u
queue.append(v)
return previous_cell
def get_shortest_path(self, src, dest):
return self.breadth_first_search(src, dest)
|
the-stack_0_4994 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ComputeManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for ComputeManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-03-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(ComputeManagementClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2021-03-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-compute/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
|
the-stack_0_4997 | from typing import List, Optional, Set, Dict
import aiosqlite
from mint.protocols.wallet_protocol import CoinState
from mint.types.blockchain_format.coin import Coin
from mint.types.blockchain_format.sized_bytes import bytes32
from mint.types.coin_record import CoinRecord
from mint.util.db_wrapper import DBWrapper
from mint.util.ints import uint32, uint64
from mint.util.lru_cache import LRUCache
from time import time
import logging
log = logging.getLogger(__name__)
class CoinStore:
"""
This object handles CoinRecords in DB.
A cache is maintained for quicker access to recent coins.
"""
coin_record_db: aiosqlite.Connection
coin_record_cache: LRUCache
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(60000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.coin_record_db = db_wrapper.db
# the coin_name is unique in this table because the CoinStore always
# only represent a single peak
await self.coin_record_db.execute(
(
"CREATE TABLE IF NOT EXISTS coin_record("
"coin_name text PRIMARY KEY,"
" confirmed_index bigint,"
" spent_index bigint,"
" spent int,"
" coinbase int,"
" puzzle_hash text,"
" coin_parent text,"
" amount blob,"
" timestamp bigint)"
)
)
# Useful for reorg lookups
await self.coin_record_db.execute(
"CREATE INDEX IF NOT EXISTS coin_confirmed_index on coin_record(confirmed_index)"
)
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_spent_index on coin_record(spent_index)")
# earlier versions of mint created this index despite no lookups needing
# it. For now, just don't create it for new installs. In the future we
# may remove the index from existing installations as well
# await self.coin_record_db.execute("DROP INDEX IF EXISTS coin_spent")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_puzzle_hash on coin_record(puzzle_hash)")
await self.coin_record_db.execute("CREATE INDEX IF NOT EXISTS coin_parent_index on coin_record(coin_parent)")
await self.coin_record_db.commit()
self.coin_record_cache = LRUCache(cache_size)
return self
async def new_block(
self,
height: uint32,
timestamp: uint64,
included_reward_coins: Set[Coin],
tx_additions: List[Coin],
tx_removals: List[bytes32],
) -> List[CoinRecord]:
"""
Only called for blocks which are blocks (and thus have rewards and transactions)
Returns a list of the CoinRecords that were added by this block
"""
start = time()
additions = []
for coin in tx_additions:
record: CoinRecord = CoinRecord(
coin,
height,
uint32(0),
False,
False,
timestamp,
)
additions.append(record)
if height == 0:
assert len(included_reward_coins) == 0
else:
assert len(included_reward_coins) >= 2
for coin in included_reward_coins:
reward_coin_r: CoinRecord = CoinRecord(
coin,
height,
uint32(0),
False,
True,
timestamp,
)
additions.append(reward_coin_r)
await self._add_coin_records(additions)
await self._set_spent(tx_removals, height)
end = time()
log.log(
logging.WARNING if end - start > 10 else logging.DEBUG,
f"It took {end - start:0.2f}s to apply {len(tx_additions)} additions and "
+ f"{len(tx_removals)} removals to the coin store. Make sure "
+ "blockchain database is on a fast drive",
)
return additions
# Checks DB and DiffStores for CoinRecord with coin_name and returns it
async def get_coin_record(self, coin_name: bytes32) -> Optional[CoinRecord]:
cached = self.coin_record_cache.get(coin_name)
if cached is not None:
return cached
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE coin_name=?", (coin_name.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin = self.row_to_coin(row)
record = CoinRecord(coin, row[1], row[2], row[3], row[4], row[8])
self.coin_record_cache.put(record.coin.name(), record)
return record
return None
async def get_coins_added_at_height(self, height: uint32) -> List[CoinRecord]:
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE confirmed_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
coin = self.row_to_coin(row)
coins.append(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return coins
async def get_coins_removed_at_height(self, height: uint32) -> List[CoinRecord]:
# Special case to avoid querying all unspent coins (spent_index=0)
if height == 0:
return []
cursor = await self.coin_record_db.execute("SELECT * from coin_record WHERE spent_index=?", (height,))
rows = await cursor.fetchall()
await cursor.close()
coins = []
for row in rows:
spent: bool = bool(row[3])
if spent:
coin = self.row_to_coin(row)
coin_record = CoinRecord(coin, row[1], row[2], spent, row[4], row[8])
coins.append(coin_record)
return coins
# Checks DB and DiffStores for CoinRecords with puzzle_hash and returns them
async def get_coin_records_by_puzzle_hash(
self,
include_spent_coins: bool,
puzzle_hash: bytes32,
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
coins = set()
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record INDEXED BY coin_puzzle_hash WHERE puzzle_hash=? "
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
(puzzle_hash.hex(), start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f"SELECT * from coin_record INDEXED BY coin_puzzle_hash "
f'WHERE puzzle_hash in ({"?," * (len(puzzle_hashes) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_records_by_names(
self,
include_spent_coins: bool,
names: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(names) == 0:
return []
coins = set()
names_db = tuple([name.hex() for name in names])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE coin_name in ({"?," * (len(names) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
names_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
def row_to_coin(self, row) -> Coin:
return Coin(bytes32(bytes.fromhex(row[6])), bytes32(bytes.fromhex(row[5])), uint64.from_bytes(row[7]))
def row_to_coin_state(self, row):
coin = self.row_to_coin(row)
spent_h = None
if row[3]:
spent_h = row[2]
return CoinState(coin, spent_h, row[1])
async def get_coin_states_by_puzzle_hashes(
self,
include_spent_coins: bool,
puzzle_hashes: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinState]:
if len(puzzle_hashes) == 0:
return []
coins = set()
puzzle_hashes_db = tuple([ph.hex() for ph in puzzle_hashes])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE puzzle_hash in ({"?," * (len(puzzle_hashes) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
puzzle_hashes_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coins.add(self.row_to_coin_state(row))
return list(coins)
async def get_coin_records_by_parent_ids(
self,
include_spent_coins: bool,
parent_ids: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinRecord]:
if len(parent_ids) == 0:
return []
coins = set()
parent_ids_db = tuple([pid.hex() for pid in parent_ids])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE coin_parent in ({"?," * (len(parent_ids) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
parent_ids_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coin = self.row_to_coin(row)
coins.add(CoinRecord(coin, row[1], row[2], row[3], row[4], row[8]))
return list(coins)
async def get_coin_state_by_ids(
self,
include_spent_coins: bool,
coin_ids: List[bytes32],
start_height: uint32 = uint32(0),
end_height: uint32 = uint32((2 ** 32) - 1),
) -> List[CoinState]:
if len(coin_ids) == 0:
return []
coins = set()
coin_ids_db = tuple([pid.hex() for pid in coin_ids])
cursor = await self.coin_record_db.execute(
f'SELECT * from coin_record WHERE coin_name in ({"?," * (len(coin_ids) - 1)}?) '
f"AND confirmed_index>=? AND confirmed_index<? "
f"{'' if include_spent_coins else 'AND spent=0'}",
coin_ids_db + (start_height, end_height),
)
rows = await cursor.fetchall()
await cursor.close()
for row in rows:
coins.add(self.row_to_coin_state(row))
return list(coins)
async def rollback_to_block(self, block_index: int) -> List[CoinRecord]:
"""
Note that block_index can be negative, in which case everything is rolled back
Returns the list of coin records that have been modified
"""
# Update memory cache
delete_queue: bytes32 = []
for coin_name, coin_record in list(self.coin_record_cache.cache.items()):
if int(coin_record.spent_block_index) > block_index:
new_record = CoinRecord(
coin_record.coin,
coin_record.confirmed_block_index,
uint32(0),
False,
coin_record.coinbase,
coin_record.timestamp,
)
self.coin_record_cache.put(coin_record.coin.name(), new_record)
if int(coin_record.confirmed_block_index) > block_index:
delete_queue.append(coin_name)
for coin_name in delete_queue:
self.coin_record_cache.remove(coin_name)
coin_changes: Dict[bytes32, CoinRecord] = {}
cursor_deleted = await self.coin_record_db.execute(
"SELECT * FROM coin_record WHERE confirmed_index>?", (block_index,)
)
rows = await cursor_deleted.fetchall()
for row in rows:
coin = self.row_to_coin(row)
record = CoinRecord(coin, uint32(0), row[2], row[3], row[4], uint64(0))
coin_changes[record.name] = record
await cursor_deleted.close()
# Delete from storage
c1 = await self.coin_record_db.execute("DELETE FROM coin_record WHERE confirmed_index>?", (block_index,))
await c1.close()
cursor_unspent = await self.coin_record_db.execute(
"SELECT * FROM coin_record WHERE confirmed_index>?", (block_index,)
)
rows = await cursor_unspent.fetchall()
for row in rows:
coin = self.row_to_coin(row)
record = CoinRecord(coin, row[1], uint32(0), False, row[4], row[8])
if record.name not in coin_changes:
coin_changes[record.name] = record
await cursor_unspent.close()
c2 = await self.coin_record_db.execute(
"UPDATE coin_record SET spent_index = 0, spent = 0 WHERE spent_index>?",
(block_index,),
)
await c2.close()
return list(coin_changes.values())
# Store CoinRecord in DB and ram cache
async def _add_coin_records(self, records: List[CoinRecord]) -> None:
values = []
for record in records:
self.coin_record_cache.put(record.coin.name(), record)
values.append(
(
record.coin.name().hex(),
record.confirmed_block_index,
record.spent_block_index,
int(record.spent),
int(record.coinbase),
record.coin.puzzle_hash.hex(),
record.coin.parent_coin_info.hex(),
bytes(record.coin.amount),
record.timestamp,
)
)
cursor = await self.coin_record_db.executemany(
"INSERT INTO coin_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)",
values,
)
await cursor.close()
# Update coin_record to be spent in DB
async def _set_spent(self, coin_names: List[bytes32], index: uint32):
# if this coin is in the cache, mark it as spent in there
updates = []
for coin_name in coin_names:
r = self.coin_record_cache.get(coin_name)
if r is not None:
self.coin_record_cache.put(
r.name, CoinRecord(r.coin, r.confirmed_block_index, index, True, r.coinbase, r.timestamp)
)
updates.append((index, coin_name.hex()))
await self.coin_record_db.executemany(
"UPDATE OR FAIL coin_record SET spent=1,spent_index=? WHERE coin_name=?", updates
)
|
the-stack_0_4999 | #
# Copyright (c) 2015, Adam Meily <[email protected]>
# Pypsi - https://github.com/ameily/pypsi
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import threading
import sys
from pypsi.ansi import AnsiCode, AnsiCodes
from pypsi.os import make_ansi_stream
class ThreadLocalStream(object):
'''
A stream wrapper that is thread-local. This class enables thread-based
pipes by wrapping :attr:`sys.stdout`, :attr:`sys.stderr`, and
:attr:`sys.stdin` and making access to them thread-local. This allows each
thread to, potentially, each thread to write to a different stream.
A single stream, such as stdout, is wrapped in Pypsi as such:
stdout -> thread local stream -> os-specific ansi stream
'''
DefaultAnsiStreamKwargs = dict()
def __init__(self, target, **kwargs):
'''
:param file target: the original target stream (typically either
:attr:`sys.stdout`, :attr:`sys.stderr`, and :attr:`sys.stdin`).
:param int width: the width of the stream in characters, this attribute
determines if word wrapping is enabled and how wide the lines are.
:param bool isatty: whether the underlying stream is a tty stream,
which supports ANSI escape cdes.
'''
if ThreadLocalStream.DefaultAnsiStreamKwargs:
kw = dict(ThreadLocalStream.DefaultAnsiStreamKwargs)
kw.update(kwargs)
kwargs = kw
self._target = make_ansi_stream(target, **kwargs)
self._proxies = {}
def _get_target(self):
'''
Get the target tuple for the current thread.
:returns tuple: (target, width, isatty).
'''
return self._proxies.get(threading.current_thread().ident,
self._target)
def __getattr__(self, name):
return getattr(self._get_target(), name)
def __hasattr__(self, name):
attrs = ('_proxy', '_unproxy', '_get_target', '_proxies', '_target')
return name in attrs or hasattr(self._get_target(), name)
def _proxy(self, target, **kwargs):
'''
Set a thread-local stream.
:param file target: the target stream.
:param int width: the stream width, in characters.
:param bool isatty: whether the target stream is a tty stream.
'''
self._proxies[threading.current_thread().ident] = make_ansi_stream(
target, **kwargs
)
def _unproxy(self, ident=None):
'''
Delete the proxy for a thread.
:param int ident: the thread's :attr:`~threading.Thread.ident`
attribute, or :const:`None` if the current thread's proxy is being
deleted.
'''
ident = ident or threading.current_thread().ident
if ident in self._proxies:
del self._proxies[ident]
def ansi_format(self, tmpl, **kwargs):
'''
Format a string that contains ansi code terms. This function allows
the following string to be the color red:
``sys.stdout.ansi_format("{red}Hello, {name}{reset}", name="Adam")``
The :data:`pypsi.ansi.AnsiCodesSingleton.codes` dict contains all
valid ansi escape code terms. If the current stream does not support
ansi escape codes, they are dropped from the template prior to
printing.
:param str tmpl: the string template
'''
atty = self._get_target().isatty()
for (name, value) in kwargs.items():
if isinstance(value, AnsiCode):
kwargs[name] = str(value) if atty else ''
for (name, code) in AnsiCodes.codes.items():
kwargs[name] = code.code if atty else ''
return tmpl.format(**kwargs)
def ansi_format_prompt(self, tmpl, **kwargs):
'''
Format a string that contains ansi code terms. This function allows
performs the same formatting as :meth:`ansi_format`, except this is
intended for formatting strings in prompt by calling
:meth:`pypsi.ansi.AnsiCode.prompt` for each code.
'''
atty = self._get_target().isatty()
for (name, value) in kwargs.items():
if isinstance(value, AnsiCode):
kwargs[name] = value.prompt() if atty else ''
for (name, code) in AnsiCodes.codes.items():
kwargs[name] = code.prompt() if atty else ''
return tmpl.format(**kwargs)
def render(self, parts, prompt=False):
'''
Render a list of objects as single string. This method is the
string version of the :meth:`print` method. Also, this method will
honor the current thread's :meth:`isatty` when rendering ANSI escape
codes.
:param list parts: list of object to render.
:param bool prompt: whether to render
:class:`~pypsi.ansi.AnsiCode` objects as prompts or not.
:returns str: the rendered string.
'''
r = []
target = self._get_target()
for part in parts:
if isinstance(part, AnsiCode):
if target.isatty():
if prompt:
r.append(part.prompt())
else:
r.append(str(part))
elif part.s:
r.append(part.s)
else:
r.append(str(part))
return ''.join(r)
class InvocationThread(threading.Thread):
'''
An invocation of a command from the command line interface.
'''
def __init__(self, shell, invoke, stdin=None, stdout=None, stderr=None):
'''
:param pypsi.shell.Shell shell: the active shell.
:param pypsi.cmdline.CommandInvocation invoke: the invocation to
execute.
:param stream stdin: override the invocation's stdin stream.
:param stream stdout: override the invocation's stdout stream.
:param stream stderr; override the invocation's stder stream.
'''
super(InvocationThread, self).__init__()
#: The active Shell
self.shell = shell
#: The :class:`~pypsi.cmdline.CommandInvocation` to execute.
self.invoke = invoke
#: Exception info, as returned by :meth:`sys.exc_info` if an exception
#: occurred.
self.exc_info = None
#: The invocation return code.
self.rc = None
if stdin:
self.invoke.stdin = stdin
if stdout:
self.invoke.stdout = stdout
if stderr:
self.invoke.stderr = stderr
def run(self):
'''
Run the command invocation.
'''
try:
self.rc = self.invoke(self.shell)
except:
self.exc_info = sys.exc_info()
self.rc = None
finally:
pass
def stop(self):
'''
Attempt to stop the thread by explitily closing the stdin, stdout, and
stderr streams.
'''
if self.is_alive():
try:
self.invoke.close_streams()
except:
pass
|
the-stack_0_5001 | import segmentation_models_pytorch as smp
import torch.optim
from .losses import CombinedLoss, BinaryFocalLoss
def get_optimizer(config, model):
"""
"""
optimizer_name = config.SOLVER.OPTIMIZER
if optimizer_name == 'adam':
return torch.optim.Adam(
model.parameters(),
lr=config.SOLVER.LR,
weight_decay=config.SOLVER.WEIGHT_DECAY
)
elif optimizer_name == 'adamw':
return torch.optim.AdamW(
model.parameters(),
lr=config.SOLVER.LR,
weight_decay=config.SOLVER.WEIGHT_DECAY
)
else:
raise ValueError()
def get_lr_scheduler(config, optimizer):
"""
"""
scheduler_name = config.SOLVER.LR_SCHEDULER
if scheduler_name == 'multistep':
return torch.optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=config.SOLVER.LR_MULTISTEP_MILESTONES,
gamma=config.SOLVER.LR_MULTISTEP_GAMMA
)
elif scheduler_name == 'annealing':
return torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer,
T_max=config.SOLVER.LR_ANNEALING_T_MAX,
eta_min=config.SOLVER.LR_ANNEALING_ETA_MIN,
)
else:
raise ValueError()
def get_loss(config):
"""
"""
def _get_loss(config, loss_name):
if loss_name == 'bce':
return smp.utils.losses.BCELoss()
elif loss_name == 'dice':
return smp.utils.losses.DiceLoss()
elif loss_name == 'focal':
return BinaryFocalLoss(
gamma=config.SOLVER.FOCAL_LOSS_GAMMA
)
else:
raise ValueError()
loss_modules = []
for loss_name in config.SOLVER.LOSSES:
loss_modules.append(_get_loss(config, loss_name))
return CombinedLoss(
loss_modules,
config.SOLVER.LOSS_WEIGHTS
)
|
the-stack_0_5002 | import asyncio
import logging
import time
from typing import Callable
from covid.protocols.protocol_message_types import ProtocolMessageTypes
log = logging.getLogger(__name__)
async def time_out_assert_custom_interval(timeout: int, interval, function, value=True, *args, **kwargs):
start = time.time()
while time.time() - start < timeout:
if asyncio.iscoroutinefunction(function):
f_res = await function(*args, **kwargs)
else:
f_res = function(*args, **kwargs)
if value == f_res:
return None
await asyncio.sleep(interval)
assert False, "Timed assertion timed out"
async def time_out_assert(timeout: int, function, value=True, *args, **kwargs):
await time_out_assert_custom_interval(timeout, 0.05, function, value, *args, **kwargs)
async def time_out_assert_not_none(timeout: int, function, *args, **kwargs):
start = time.time()
while time.time() - start < timeout:
if asyncio.iscoroutinefunction(function):
f_res = await function(*args, **kwargs)
else:
f_res = function(*args, **kwargs)
if f_res is not None:
return None
await asyncio.sleep(0.05)
assert False, "Timed assertion timed out"
def time_out_messages(incoming_queue: asyncio.Queue, msg_name: str, count: int = 1) -> Callable:
async def bool_f():
if incoming_queue.qsize() < count:
return False
for _ in range(count):
response = (await incoming_queue.get())[0].type
if ProtocolMessageTypes(response).name != msg_name:
# log.warning(f"time_out_message: found {response} instead of {msg_name}")
return False
return True
return bool_f
|
the-stack_0_5003 | # encoding: UTF-8
__author__ = 'CHENXY'
# C++和python类型的映射字典
type_dict = {
'int': 'int',
'char': 'string',
'double': 'float',
'short': 'int'
}
def process_line(line):
"""处理每行"""
if '///' in line: # 注释
py_line = process_comment(line)
elif 'typedef' in line: # 类型申明
py_line = process_typedef(line)
elif '#define' in line: # 定义常量
py_line = process_define(line)
elif line == '\n': # 空行
py_line = line
else:
py_line = ''
return py_line
def process_comment(line):
"""处理注释"""
# if line[3] == '/':
# py_line = ''
# else:
# py_line = '#' + line[3:]
py_line = '#' + line[3:]
return py_line
def process_typedef(line):
"""处理类型申明"""
content = line.split(' ')
type_ = type_dict[content[1]]
keyword = content[2]
if '[' in keyword:
i = keyword.index('[')
keyword = keyword[:i]
else:
keyword = keyword.replace(';\n', '') # 删除行末分号
py_line = 'typedefDict["%s"] = "%s"\n' % (keyword, type_)
return py_line
def process_define(line):
"""处理定义常量"""
content = line.split(' ')
constant = content[1]
if len(content)>2:
value = content[-1]
py_line = 'defineDict["%s"] = %s' % (constant, value)
else:
py_line = ''
return py_line
def main():
"""主函数"""
try:
fcpp = open('SecurityFtdcL2MDUserApiDataType.h','r')
fpy = open('l2_data_type.py', 'w')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('defineDict = {}\n')
fpy.write('typedefDict = {}\n')
fpy.write('\n')
for line in fcpp:
py_line = process_line(line)
if py_line:
fpy.write(py_line.decode('gbk').encode('utf-8'))
fcpp.close()
fpy.close()
print('data_type.py生成过程完成')
except:
print('data_type.py生成过程出错')
if __name__ == '__main__':
main()
|
the-stack_0_5004 | # coding=utf-8
# Copyright 2020, The T5 Authors and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" T5 model configuration """
from collections import OrderedDict
from typing import Any, Dict, Iterable, Mapping, Optional
from transformers import PreTrainedTokenizer, TensorType
from ... import is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
logger = logging.get_logger(__name__)
T5_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class T5Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.T5Model` or a
:class:`~transformers.TFT5Model`. It is used to instantiate a T5 model according to the specified arguments,
defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration
to that of the T5 `t5-small <https://huggingface.co/t5-small>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model
outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information.
Arguments:
vocab_size (:obj:`int`, `optional`, defaults to 32128):
Vocabulary size of the T5 model. Defines the number of different tokens that can be represented by the
:obj:`inputs_ids` passed when calling :class:`~transformers.T5Model` or :class:`~transformers.TFT5Model`.
d_model (:obj:`int`, `optional`, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (:obj:`int`, `optional`, defaults to 64):
Size of the key, query, value projections per attention head. :obj:`d_kv` has to be equal to :obj:`d_model
// num_heads`.
d_ff (:obj:`int`, `optional`, defaults to 2048):
Size of the intermediate feed forward layer in each :obj:`T5Block`.
num_layers (:obj:`int`, `optional`, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (:obj:`int`, `optional`):
Number of hidden layers in the Transformer decoder. Will use the same value as :obj:`num_layers` if not
set.
num_heads (:obj:`int`, `optional`, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
relative_attention_num_buckets (:obj:`int`, `optional`, defaults to 32):
The number of buckets to use for each attention layer.
dropout_rate (:obj:`float`, `optional`, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (:obj:`float`, `optional`, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (:obj:`string`, `optional`, defaults to :obj:`"relu"`):
Type of feed forward layer to be used. Should be one of :obj:`"relu"` or :obj:`"gated-gelu"`. T5v1.1 uses
the :obj:`"gated-gelu"` feed forward projection. Original T5 uses :obj:`"relu"`.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return the last key/values attentions (not used by all models).
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
"""
model_type = "t5"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32128,
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_decoder_layers=None,
num_heads=8,
relative_attention_num_buckets=32,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="relu",
is_encoder_decoder=True,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
gradient_checkpointing=False,
**kwargs
):
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
self.gradient_checkpointing = gradient_checkpointing
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.num_heads
@property
def num_hidden_layers(self):
return self.num_layers
class T5OnnxConfig(OnnxConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch"}),
("decoder_attention_mask", {0: "batch"}),
]
)
if self.use_past:
for i in range(0, self._config.num_layers):
common_inputs[f"past_key_values.{i}.decoder.key"] = {0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.decoder.value"] = {0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.encoder.key"] = {0: "batch", 2: "past_sequence"}
common_inputs[f"past_key_values.{i}.encoder.value"] = {0: "batch", 2: "past_sequence"}
return common_inputs
@property
def outputs(self) -> Mapping[str, Mapping[int, str]]:
common_outputs = super().outputs
if "last_hidden_state" in common_outputs:
common_outputs["last_hidden_state"] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
for i in range(self._config.num_layers):
common_outputs[f"present.{i}.decoder.key"] = {0: "batch", 2: "decoder_sequence"}
common_outputs[f"present.{i}.decoder.value"] = {0: "batch", 2: "decoder_sequence"}
common_outputs[f"present.{i}.encoder.key"] = {0: "batch", 2: "encoder_sequence"}
common_outputs[f"present.{i}.encoder.value"] = {0: "batch", 2: "encoder_sequence"}
if self.task == "default":
common_outputs["encoder_last_hidden_state"] = {0: "batch", 2: "encoder_sequence"}
return common_outputs
def generate_dummy_inputs(
self,
tokenizer: PreTrainedTokenizer,
batch_size: int = -1,
seq_length: int = -1,
is_pair: bool = False,
framework: Optional[TensorType] = None,
) -> Mapping[str, Any]:
# Generate encoder inputs
encoder_inputs = super().generate_dummy_inputs(tokenizer, batch_size, seq_length, is_pair, framework)
# Generate decoder inputs
decoder_inputs = super().generate_dummy_inputs(tokenizer, batch_size, 1, is_pair, framework)
decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
ordered_inputs = dict(**encoder_inputs, **decoder_inputs)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
batch = encoder_inputs["input_ids"].shape[0]
encoder_seq_length = encoder_inputs["input_ids"].shape[1]
encoder_shape = (
batch,
self._config.num_heads,
encoder_seq_length,
self._config.hidden_size // self._config.num_heads,
)
decoder_shape = (batch, self._config.num_heads, 1, self._config.hidden_size // self._config.num_heads)
ordered_inputs["past_key_values"] = []
for _ in range(self._config.num_layers):
ordered_inputs["past_key_values"].append(
(
torch.zeros(decoder_shape),
torch.zeros(decoder_shape),
torch.zeros(encoder_shape),
torch.zeros(encoder_shape),
)
)
return ordered_inputs
@staticmethod
def flatten_output_collection_property(name: str, field: Iterable[Any]) -> Dict[str, Any]:
if name in ["present", "past_key_values"]:
flatten_output = {}
for idx, t in enumerate(field):
flatten_output[f"{name}.{idx}.decoder.key"] = t[0]
flatten_output[f"{name}.{idx}.decoder.value"] = t[1]
flatten_output[f"{name}.{idx}.encoder.key"] = t[2]
flatten_output[f"{name}.{idx}.encoder.value"] = t[3]
return flatten_output
return super().flatten_output_collection_property(name, field)
|
the-stack_0_5008 | import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
requires = ['opencv-python', 'numpy', 'gym']
setup(name='Flappy_Bird_with_Segmentation',
version='1.0',
description='Flappy bird environment with ground truth segmentation',
author='Xiangyu Chen',
author_email='[email protected]',
url='https://github.com/cxy1997/Flappy-Bird-with-Segmentation/',
keywords='Flappy Bird',
packages=find_packages(),
license='LICENSE',
install_requires=requires) |
the-stack_0_5009 | #!/usr/bin/env python
# coding: utf-8
import argparse
import logging
import os
import sys
import time
import numpy as np
import pandas as pd
import scanpy as sc
import torch
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from torch import nn, optim
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader, TensorDataset
import DaNN.mmd as mmd
import scanpypip.preprocessing as pp
import trainers as t
import utils as ut
from models import (AEBase, DaNN, PretrainedPredictor,
PretrainedVAEPredictor, VAEBase)
DATA_MAP={
"GSE117872":"data/GSE117872/GSE117872_good_Data_TPM.txt",
"GSE117309":'data/GSE117309/filtered_gene_bc_matrices_HBCx-22/hg19/',
"GSE117309_TAMR":'data/GSE117309/filtered_gene_bc_matrices_HBCx22-TAMR/hg19/',
"GSE121107":'data/GSE121107/GSM3426289_untreated_out_gene_exon_tagged.dge.txt',
"GSE121107_1H":'data/GSE121107/GSM3426290_entinostat_1hr_out_gene_exon_tagged.dge.txt',
"GSE121107_6H":'data/GSE121107/GSM3426291_entinostat_6hr_out_gene_exon_tagged.dge.txt',
"GSE111014":'data/GSE111014/',
"GSE110894":"data/GSE110894/GSE110894.csv",
"GSE122843":"data/GSE122843/GSE122843.txt",
"GSE112274":"data/GSE112274/GSE112274_cell_gene_FPKM.csv",
"GSE116237":"data/GSE116237/GSE116237_bulkRNAseq_expressionMatrix.txt",
"GSE108383":"data/GSE108383/GSE108383_Melanoma_fluidigm.txt",
"GSE140440":"data/GSE140440/GSE140440.csv",
"GSE129730":"data/GSE129730/GSE129730.h5ad",
"GSE149383":"data/GSE149383/erl_total_data_2K.csv",
"GSE110894_small":"data/GSE110894/GSE110894_small.h5ad"
}
def run_main(args):
################################################# START SECTION OF LOADING PARAMETERS #################################################
# Read parameters
t0 = time.time()
epochs = args.epochs
dim_au_out = args.bottleneck #8, 16, 32, 64, 128, 256,512
na = args.missing_value
if args.sc_data in DATA_MAP:
data_path = DATA_MAP[args.sc_data]
else:
data_path = args.sc_data
test_size = args.test_size
select_drug = args.drug
freeze = args.freeze_pretrain
valid_size = args.valid_size
g_disperson = args.var_genes_disp
min_n_genes = args.min_n_genes
max_n_genes = args.max_n_genes
source_model_path = args.bulk_model_path
target_model_path = args.sc_model_path
log_path = args.logging_file
batch_size = args.batch_size
encoder_hdims = args.bulk_h_dims.split(",")
encoder_hdims = list(map(int, encoder_hdims))
source_data_path = args.bulk_data
pretrain = args.pretrain
data_name = args.sc_data
label_path = args.label
reduce_model = args.dimreduce
predict_hdims = args.predictor_h_dims.split(",")
predict_hdims = list(map(int, predict_hdims))
leiden_res = args.cluster_res
load_model = bool(args.load_sc_model)
# Misc
now=time.strftime("%Y-%m-%d-%H-%M-%S")
# Initialize logging and std out
out_path = log_path+now+".err"
log_path = log_path+now+".log"
out=open(out_path,"w")
sys.stderr=out
#Logging infomaion
logging.basicConfig(level=logging.INFO,
filename=log_path,
filemode='a',
format=
'%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
)
logging.getLogger('matplotlib.font_manager').disabled = True
logging.info(args)
logging.info("Start at " + str(t0))
# Save arguments
args_df = ut.save_arguments(args,now)
################################################# END SECTION OF LOADING PARAMETERS #################################################
################################################# START SECTION OF SINGLE CELL DATA REPROCESSING #################################################
# Load data and preprocessing
adata = pp.read_sc_file(data_path)
if data_name == 'GSE117872':
adata = ut.specific_process(adata,dataname=data_name,select_origin=args.batch_id)
elif data_name =='GSE122843':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE110894':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE112274':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE116237':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE108383':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE140440':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE129730':
adata = ut.specific_process(adata,dataname=data_name)
elif data_name =='GSE149383':
adata = ut.specific_process(adata,dataname=data_name)
else:
adata=adata
sc.pp.filter_cells(adata, min_genes=200)
sc.pp.filter_genes(adata, min_cells=3)
adata = pp.cal_ncount_ngenes(adata)
#Preprocess data by filtering
if data_name not in ['GSE112274','GSE140440']:
adata = pp.receipe_my(adata,l_n_genes=min_n_genes,r_n_genes=max_n_genes,filter_mincells=args.min_c,
filter_mingenes=args.min_g,normalize=True,log=True)
else:
adata = pp.receipe_my(adata,l_n_genes=min_n_genes,r_n_genes=max_n_genes,filter_mincells=args.min_c,percent_mito = args.percent_mito,
filter_mingenes=args.min_g,normalize=True,log=True)
# Select highly variable genes
sc.pp.highly_variable_genes(adata,min_disp=g_disperson,max_disp=np.inf,max_mean=6)
adata.raw = adata
adata = adata[:, adata.var.highly_variable]
# Preprocess data if spcific process is required
data=adata.X
# PCA
# Generate neighbor graph
sc.tl.pca(adata,svd_solver='arpack')
sc.pp.neighbors(adata, n_neighbors=10)
# Generate cluster labels
sc.tl.leiden(adata,resolution=leiden_res)
sc.tl.umap(adata)
adata.obs['leiden_origin']= adata.obs['leiden']
adata.obsm['X_umap_origin']= adata.obsm['X_umap']
data_c = adata.obs['leiden'].astype("long").to_list()
################################################# END SECTION OF SINGLE CELL DATA REPROCESSING #################################################
################################################# START SECTION OF LOADING SC DATA TO THE TENSORS #################################################
#Prepare to normailize and split target data
mmscaler = preprocessing.MinMaxScaler()
try:
data = mmscaler.fit_transform(data)
except:
logging.warning("Only one class, no ROC")
# Process sparse data
data = data.todense()
data = mmscaler.fit_transform(data)
# Split data to train and valid set
# Along with the leiden conditions for CVAE propose
Xtarget_train, Xtarget_valid, Ctarget_train, Ctarget_valid = train_test_split(data,data_c, test_size=valid_size, random_state=42)
# Select the device of gpu
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
logging.info(device)
torch.cuda.set_device(device)
# Construct datasets and data loaders
Xtarget_trainTensor = torch.FloatTensor(Xtarget_train).to(device)
Xtarget_validTensor = torch.FloatTensor(Xtarget_valid).to(device)
# Use leiden label if CVAE is applied
Ctarget_trainTensor = torch.LongTensor(Ctarget_train).to(device)
Ctarget_validTensor = torch.LongTensor(Ctarget_valid).to(device)
X_allTensor = torch.FloatTensor(data).to(device)
C_allTensor = torch.LongTensor(data_c).to(device)
train_dataset = TensorDataset(Xtarget_trainTensor, Ctarget_trainTensor)
valid_dataset = TensorDataset(Xtarget_validTensor, Ctarget_validTensor)
Xtarget_trainDataLoader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
Xtarget_validDataLoader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=True)
dataloaders_pretrain = {'train':Xtarget_trainDataLoader,'val':Xtarget_validDataLoader}
################################################# START SECTION OF LOADING SC DATA TO THE TENSORS #################################################
################################################# START SECTION OF LOADING BULK DATA #################################################
# Read source data
data_r=pd.read_csv(source_data_path,index_col=0)
label_r=pd.read_csv(label_path,index_col=0)
label_r=label_r.fillna(na)
# Extract labels
selected_idx = label_r.loc[:,select_drug]!=na
label = label_r.loc[selected_idx.index,select_drug]
data_r = data_r.loc[selected_idx.index,:]
label = label.values.reshape(-1,1)
le = preprocessing.LabelEncoder()
label = le.fit_transform(label)
dim_model_out = 2
# Process source data
mmscaler = preprocessing.MinMaxScaler()
source_data = mmscaler.fit_transform(data_r)
# Split source data
Xsource_train_all, Xsource_test, Ysource_train_all, Ysource_test = train_test_split(source_data,label, test_size=test_size, random_state=42)
Xsource_train, Xsource_valid, Ysource_train, Ysource_valid = train_test_split(Xsource_train_all,Ysource_train_all, test_size=valid_size, random_state=42)
# Transform source data
# Construct datasets and data loaders
Xsource_trainTensor = torch.FloatTensor(Xsource_train).to(device)
Xsource_validTensor = torch.FloatTensor(Xsource_valid).to(device)
Ysource_trainTensor = torch.LongTensor(Ysource_train).to(device)
Ysource_validTensor = torch.LongTensor(Ysource_valid).to(device)
sourcetrain_dataset = TensorDataset(Xsource_trainTensor, Ysource_trainTensor)
sourcevalid_dataset = TensorDataset(Xsource_validTensor, Ysource_validTensor)
Xsource_trainDataLoader = DataLoader(dataset=sourcetrain_dataset, batch_size=batch_size, shuffle=True)
Xsource_validDataLoader = DataLoader(dataset=sourcevalid_dataset, batch_size=batch_size, shuffle=True)
dataloaders_source = {'train':Xsource_trainDataLoader,'val':Xsource_validDataLoader}
################################################# END SECTION OF LOADING BULK DATA #################################################
################################################# START SECTION OF MODEL CUNSTRUCTION #################################################
# Construct target encoder
if reduce_model == "AE":
encoder = AEBase(input_dim=data.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims)
loss_function_e = nn.MSELoss()
elif reduce_model == "VAE":
encoder = VAEBase(input_dim=data.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims)
if torch.cuda.is_available():
encoder.cuda()
logging.info("Target encoder structure is: ")
logging.info(encoder)
encoder.to(device)
optimizer_e = optim.Adam(encoder.parameters(), lr=1e-2)
loss_function_e = nn.MSELoss()
exp_lr_scheduler_e = lr_scheduler.ReduceLROnPlateau(optimizer_e)
dim_model_out = 2
# Load AE model
if reduce_model == "AE":
source_model = PretrainedPredictor(input_dim=Xsource_train.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims,
hidden_dims_predictor=predict_hdims,output_dim=dim_model_out,
pretrained_weights=None,freezed=freeze)
source_model.load_state_dict(torch.load(source_model_path))
source_encoder = source_model
# Load VAE model
elif reduce_model in ["VAE"]:
source_model = PretrainedVAEPredictor(input_dim=Xsource_train.shape[1],latent_dim=dim_au_out,h_dims=encoder_hdims,
hidden_dims_predictor=predict_hdims,output_dim=dim_model_out,
pretrained_weights=None,freezed=freeze,z_reparam=bool(args.VAErepram))
source_model.load_state_dict(torch.load(source_model_path))
source_encoder = source_model
logging.info("Load pretrained source model from: "+source_model_path)
source_encoder.to(device)
################################################# END SECTION OF MODEL CUNSTRUCTION #################################################
################################################# START SECTION OF SC MODEL PRETRAININIG #################################################
# Pretrain target encoder
# Pretain using autoencoder is pretrain is not False
if(str(pretrain)!='0'):
# Pretrained target encoder if there are not stored files in the harddisk
train_flag = True
pretrain = str(pretrain)
if(os.path.exists(pretrain)==True):
try:
encoder.load_state_dict(torch.load(pretrain))
logging.info("Load pretrained target encoder from "+pretrain)
train_flag = False
except:
logging.warning("Loading failed, procceed to re-train model")
if train_flag == True:
if reduce_model == "AE":
encoder,loss_report_en = t.train_AE_model(net=encoder,data_loaders=dataloaders_pretrain,
optimizer=optimizer_e,loss_function=loss_function_e,
n_epochs=epochs,scheduler=exp_lr_scheduler_e,save_path=pretrain)
elif reduce_model == "VAE":
encoder,loss_report_en = t.train_VAE_model(net=encoder,data_loaders=dataloaders_pretrain,
optimizer=optimizer_e,
n_epochs=epochs,scheduler=exp_lr_scheduler_e,save_path=pretrain)
logging.info("Pretrained finished")
# Before Transfer learning, we test the performance of using no transfer performance:
# Use vae result to predict
embeddings_pretrain = encoder.encode(X_allTensor)
pretrain_prob_prediction = source_model.predict(embeddings_pretrain).detach().cpu().numpy()
adata.obs["sens_preds_pret"] = pretrain_prob_prediction[:,1]
adata.obs["sens_label_pret"] = pretrain_prob_prediction.argmax(axis=1)
# Add embeddings to the adata object
embeddings_pretrain = embeddings_pretrain.detach().cpu().numpy()
adata.obsm["X_pre"] = embeddings_pretrain
################################################# END SECTION OF SC MODEL PRETRAININIG #################################################
################################################# START SECTION OF TRANSFER LEARNING TRAINING #################################################
# Using ADDA transfer learning
# DaNN model
# Set predictor loss
loss_d = nn.CrossEntropyLoss()
optimizer_d = optim.Adam(encoder.parameters(), lr=1e-2)
exp_lr_scheduler_d = lr_scheduler.ReduceLROnPlateau(optimizer_d)
# Set DaNN model
DaNN_model = DaNN(source_model=source_encoder,target_model=encoder)
DaNN_model.to(device)
def loss(x,y,GAMMA=args.mmd_GAMMA):
result = mmd.mmd_loss(x,y,GAMMA)
return result
loss_disrtibution = loss
# Tran DaNN model
DaNN_model, report_ = t.train_DaNN_model(DaNN_model,
dataloaders_source,dataloaders_pretrain,
# Should here be all optimizer d?
optimizer_d, loss_d,
epochs,exp_lr_scheduler_d,
dist_loss=loss_disrtibution,
load=load_model,
weight = args.mmd_weight,
save_path=target_model_path+"_DaNN.pkl")
encoder = DaNN_model.target_model
source_model = DaNN_model.source_model
logging.info("Transfer DaNN finished")
################################################# END SECTION OF TRANSER LEARNING TRAINING #################################################
################################################# START SECTION OF PREPROCESSING FEATURES #################################################
# Extract feature embeddings
# Extract prediction probabilities
embedding_tensors = encoder.encode(X_allTensor)
prediction_tensors = source_model.predictor(embedding_tensors)
embeddings = embedding_tensors.detach().cpu().numpy()
predictions = prediction_tensors.detach().cpu().numpy()
# Transform predict8ion probabilities to 0-1 labels
adata.obs["sens_preds"] = predictions[:,1]
adata.obs["sens_label"] = predictions.argmax(axis=1)
adata.obs["sens_label"] = adata.obs["sens_label"].astype('category')
adata.obs["rest_preds"] = predictions[:,0]
################################################# END SECTION OF PREPROCESSING FEATURES #################################################
################################################# START SECTION OF ANALYSIS AND POST PROCESSING #################################################
################################################# END SECTION OF ANALYSIS AND POST PROCESSING #################################################
################################################# START SECTION OF ANALYSIS FOR BULK DATA #################################################
# Save adata
adata.write("saved/adata/"+data_name+now+".h5ad")
################################################# END SECTION OF ANALYSIS FOR BULK DATA #################################################
t1 = time.time()
logging.info("End at " + str(t1)+", takes :" )
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data
parser.add_argument('--bulk_data', type=str, default='data/GDSC2_expression.csv',help='Path of the bulk RNA-Seq expression profile')
parser.add_argument('--label', type=str, default='data/GDSC2_label_9drugs_binary.csv',help='Path of the processed bulk RNA-Seq drug screening annotation')
parser.add_argument('--sc_data', type=str, default="GSE110894",help='Accession id for testing data, only support pre-built data.')
parser.add_argument('--drug', type=str, default='Cisplatin',help='Name of the selected drug, should be a column name in the input file of --label')
parser.add_argument('--missing_value', type=int, default=1,help='The value filled in the missing entry in the drug screening annotation, default: 1')
parser.add_argument('--test_size', type=float, default=0.2,help='Size of the test set for the bulk model traning, default: 0.2')
parser.add_argument('--valid_size', type=float, default=0.2,help='Size of the validation set for the bulk model traning, default: 0.2')
parser.add_argument('--var_genes_disp', type=float, default=0,help='Dispersion of highly variable genes selection when pre-processing the data. \
If None, all genes will be selected .default: None')
parser.add_argument('--min_n_genes', type=int, default=0,help="Minimum number of genes for a cell that have UMI counts >1 for filtering propose, default: 0 ")
parser.add_argument('--max_n_genes', type=int, default=20000,help="Maximum number of genes for a cell that have UMI counts >1 for filtering propose, default: 20000 ")
parser.add_argument('--min_g', type=int, default=200,help="Minimum number of genes for a cell >1 for filtering propose, default: 200")
parser.add_argument('--min_c', type=int, default=3,help="Minimum number of cell that each gene express for filtering propose, default: 3")
parser.add_argument('--percent_mito', type=int, default=100,help="Percentage of expreesion level of moticondrial genes of a cell for filtering propose, default: 100")
parser.add_argument('--cluster_res', type=float, default=0.3,help="Resolution of Leiden clustering of scRNA-Seq data, default: 0.3")
parser.add_argument('--mmd_weight', type=float, default=0.25,help="Weight of the MMD loss of the transfer learning, default: 0.25")
parser.add_argument('--mmd_GAMMA', type=int, default=1000,help="Gamma parameter in the kernel of the MMD loss of the transfer learning, default: 1000")
# train
parser.add_argument('--bulk_model_path','-s', type=str, default='saved/models/predictor_bulk.pkl',help='Path of the trained predictor in the bulk level')
parser.add_argument('--sc_model_path', '-p', type=str, default='saved/models/predictor_sc_',help='Path (prefix) of the trained predictor in the single cell level')
parser.add_argument('--pretrain', type=str, default='saved/models/encoder_sc.pkl',help='Path of the pre-trained encoder in the single-cell level')
parser.add_argument('--lr', type=float, default=1e-2,help='Learning rate of model training. Default: 1e-2')
parser.add_argument('--epochs', type=int, default=500,help='Number of epoches training. Default: 500')
parser.add_argument('--batch_size', type=int, default=200,help='Number of batch size when training. Default: 200')
parser.add_argument('--bottleneck', type=int, default=512,help='Size of the bottleneck layer of the model. Default: 32')
parser.add_argument('--dimreduce', type=str, default="AE",help='Encoder model type. Can be AE or VAE. Default: AE')
parser.add_argument('--freeze_pretrain', type=int,default=0,help='Fix the prarmeters in the pretrained model. 0: do not freeze, 1: freeze. Default: 0')
parser.add_argument('--bulk_h_dims', type=str, default="512,256",help='Shape of the source encoder. Each number represent the number of neuron in a layer. \
Layers are seperated by a comma. Default: 512,256')
parser.add_argument('--sc_h_dims', type=str, default="512,256",help='Shape of the encoder. Each number represent the number of neuron in a layer. \
Layers are seperated by a comma. Default: 512,256')
parser.add_argument('--predictor_h_dims', type=str, default="16,8",help='Shape of the predictor. Each number represent the number of neuron in a layer. \
Layers are seperated by a comma. Default: 16,8')
parser.add_argument('--VAErepram', type=int, default=1)
parser.add_argument('--batch_id', type=str, default="HN137",help="Batch id only for testing")
parser.add_argument('--load_sc_model', type=int, default=0,help='Load a trained model or not. 0: do not load, 1: load. Default: 0')
# mis
parser.add_argument('--logging_file', '-l', type=str, default='saved/logs/transfer_',help='Path of training log')
#
args, unknown = parser.parse_known_args()
run_main(args)
|
the-stack_0_5010 | from cellpose import io, models, metrics, plot
from pathlib import Path
from subprocess import check_output, STDOUT
import os, shutil
def test_class_train(data_dir, image_names):
train_dir = str(data_dir.joinpath('2D').joinpath('train'))
model_dir = str(data_dir.joinpath('2D').joinpath('train').joinpath('models'))
shutil.rmtree(model_dir, ignore_errors=True)
output = io.load_train_test_data(train_dir, mask_filter='_cyto_masks')
images, labels, image_names, test_images, test_labels, image_names_test = output
model = models.CellposeModel(pretrained_model=None, diam_mean=30)
cpmodel_path = model.train(images, labels, train_files=image_names,
test_data=test_images, test_labels=test_labels, test_files=image_names_test,
channels=[2,1], save_path=train_dir, n_epochs=10)
print('>>>> model trained and saved to %s'%cpmodel_path)
def test_cli_train(data_dir, image_names):
train_dir = str(data_dir.joinpath('2D').joinpath('train'))
model_dir = str(data_dir.joinpath('2D').joinpath('train').joinpath('models'))
shutil.rmtree(model_dir, ignore_errors=True)
cmd = 'python -m cellpose --train --train_size --n_epochs 10 --dir %s --mask_filter _cyto_masks --pretrained_model cyto --chan 2 --chan2 1 --diameter 30'%train_dir
try:
cmd_stdout = check_output(cmd, stderr=STDOUT, shell=True).decode()
except Exception as e:
print(e)
raise ValueError(e)
|
the-stack_0_5011 | import torch
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
class Keypoints(object):
def __init__(self, keypoints, size, mode=None):
# FIXME remove check once we have better integration with device
# in my version this would consistently return a CPU tensor
device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device('cpu')
keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
# TODO should I split them?
# self.visibility = keypoints[..., 2]
self.keypoints = keypoints# [..., :2]
self.size = size
self.mode = mode
self.extra_fields = {}
def crop(self, box):
raise NotImplementedError()
def resize(self, size, *args, **kwargs):
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(size, self.size))
ratio_w, ratio_h = ratios
resized_data = self.keypoints.clone()
resized_data[..., 0] *= ratio_w
resized_data[..., 1] *= ratio_h
keypoints = type(self)(resized_data, size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v)
# print("keypoints resize!!")
return keypoints
def transpose(self, method):
if method not in (FLIP_LEFT_RIGHT,):
raise NotImplementedError(
"Only FLIP_LEFT_RIGHT implemented")
# flip_inds = type(self).FLIP_INDS
# flipped_data = self.keypoints[:, flip_inds]
# width = self.size[0]
# TO_REMOVE = 1
# # Flip x coordinates
# flipped_data[..., 0] = width - flipped_data[..., 0] - TO_REMOVE
# # Maintain COCO convention that if visibility == 0, then x, y = 0
# inds = flipped_data[..., 2] == 0
# flipped_data[inds] = 0
flipped_data=self.keypoints
keypoints = type(self)(flipped_data, self.size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v)
return keypoints
def to(self, *args, **kwargs):
keypoints = type(self)(self.keypoints.to(*args, **kwargs), self.size, self.mode)
for k, v in self.extra_fields.items():
if hasattr(v, "to"):
v = v.to(*args, **kwargs)
keypoints.add_field(k, v)
return keypoints
def __getitem__(self, item):
keypoints = type(self)(self.keypoints[item], self.size, self.mode)
for k, v in self.extra_fields.items():
keypoints.add_field(k, v[item])
return keypoints
def add_field(self, field, field_data):
self.extra_fields[field] = field_data
def get_field(self, field):
return self.extra_fields[field]
def __repr__(self):
s = self.__class__.__name__ + '('
s += 'num_instances={}, '.format(len(self.keypoints))
s += 'image_width={}, '.format(self.size[0])
s += 'image_height={})'.format(self.size[1])
return s
def _create_flip_indices(names, flip_map):
full_flip_map = flip_map.copy()
full_flip_map.update({v: k for k, v in flip_map.items()})
flipped_names = [i if i not in full_flip_map else full_flip_map[i] for i in names]
flip_indices = [names.index(i) for i in flipped_names]
return torch.tensor(flip_indices)
class PersonKeypoints(Keypoints):
NAMES = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
FLIP_MAP = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
# TODO this doesn't look great
PersonKeypoints.FLIP_INDS = _create_flip_indices(PersonKeypoints.NAMES, PersonKeypoints.FLIP_MAP)
def kp_connections(keypoints):
kp_lines = [
[keypoints.index('left_eye'), keypoints.index('right_eye')],
[keypoints.index('left_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('right_ear')],
[keypoints.index('left_eye'), keypoints.index('left_ear')],
[keypoints.index('right_shoulder'), keypoints.index('right_elbow')],
[keypoints.index('right_elbow'), keypoints.index('right_wrist')],
[keypoints.index('left_shoulder'), keypoints.index('left_elbow')],
[keypoints.index('left_elbow'), keypoints.index('left_wrist')],
[keypoints.index('right_hip'), keypoints.index('right_knee')],
[keypoints.index('right_knee'), keypoints.index('right_ankle')],
[keypoints.index('left_hip'), keypoints.index('left_knee')],
[keypoints.index('left_knee'), keypoints.index('left_ankle')],
[keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],
[keypoints.index('right_hip'), keypoints.index('left_hip')],
]
return kp_lines
PersonKeypoints.CONNECTIONS = kp_connections(PersonKeypoints.NAMES)
# TODO make this nicer, this is a direct translation from C2 (but removing the inner loop)
def keypoints_to_heat_map(keypoints, rois, heatmap_size):
if rois.numel() == 0:
return rois.new().long(), rois.new().long()
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = heatmap_size / (rois[:, 2] - rois[:, 0])
scale_y = heatmap_size / (rois[:, 3] - rois[:, 1])
offset_x = offset_x[:, None]
offset_y = offset_y[:, None]
scale_x = scale_x[:, None]
scale_y = scale_y[:, None]
x = keypoints[..., 0]
y = keypoints[..., 1]
x_boundary_inds = x == rois[:, 2][:, None]
y_boundary_inds = y == rois[:, 3][:, None]
x = (x - offset_x) * scale_x
x = x.floor().long()
y = (y - offset_y) * scale_y
y = y.floor().long()
x[x_boundary_inds] = heatmap_size - 1
y[y_boundary_inds] = heatmap_size - 1
valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size)
vis = keypoints[..., 2] > 0
valid = (valid_loc & vis).long()
lin_ind = y * heatmap_size + x
heatmaps = lin_ind * valid
return heatmaps, valid
|
the-stack_0_5012 | """
make_bmap.py
Creates an image that can be used as a bump mapping texture.
Mahesh Venkitachalam
shader.in
"""
import numpy as np
from PIL import Image
from math import sqrt
def main():
NX, NY = 256, 256
nmap = np.zeros([NX, NY, 3], np.float32)
r = 32.0
rsq = r*r
centers = [(64, 64), (192, 64), (64, 192), (192, 192)]
for i in range(NX):
for j in range(NY):
inside = False
for C in centers:
x = (i-C[0])
y = (j-C[1])
if x*x + y*y < rsq :
nmap[i][j][0] = x / r
nmap[i][j][1] = y / r
nmap[i][j][2] = sqrt(rsq - (x*x + y*y))/ r
inside = True
if not inside:
nmap[i][j][0] = 0.0
nmap[i][j][1] = 0.0
nmap[i][j][2] = 1.0
# [-1, 1] to [0, 255]
nmap = 255.0*0.5*(nmap + 1.0)
img = np.array(nmap, np.uint8)
img = Image.fromarray(img)
img.save("bmap.png")
# call main
if __name__ == '__main__':
main()
|
the-stack_0_5014 | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.9 Python SDK
Pure Storage FlashBlade REST 1.9 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.9
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CertificateGroupUse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'name': 'str',
'use': 'FixedReferenceWithRemote'
}
attribute_map = {
'id': 'id',
'name': 'name',
'use': 'use'
}
def __init__(self, id=None, name=None, use=None): # noqa: E501
"""CertificateGroupUse - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._use = None
self.discriminator = None
if id is not None:
self.id = id
if name is not None:
self.name = name
if use is not None:
self.use = use
@property
def id(self):
"""Gets the id of this CertificateGroupUse. # noqa: E501
A non-modifiable, globally unique ID chosen by the system. # noqa: E501
:return: The id of this CertificateGroupUse. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CertificateGroupUse.
A non-modifiable, globally unique ID chosen by the system. # noqa: E501
:param id: The id of this CertificateGroupUse. # noqa: E501
:type: str
"""
self._id = id
@property
def name(self):
"""Gets the name of this CertificateGroupUse. # noqa: E501
The name of the object (e.g., a file system or snapshot). # noqa: E501
:return: The name of this CertificateGroupUse. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CertificateGroupUse.
The name of the object (e.g., a file system or snapshot). # noqa: E501
:param name: The name of this CertificateGroupUse. # noqa: E501
:type: str
"""
self._name = name
@property
def use(self):
"""Gets the use of this CertificateGroupUse. # noqa: E501
A reference to an object using this certificate group. # noqa: E501
:return: The use of this CertificateGroupUse. # noqa: E501
:rtype: FixedReferenceWithRemote
"""
return self._use
@use.setter
def use(self, use):
"""Sets the use of this CertificateGroupUse.
A reference to an object using this certificate group. # noqa: E501
:param use: The use of this CertificateGroupUse. # noqa: E501
:type: FixedReferenceWithRemote
"""
self._use = use
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CertificateGroupUse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateGroupUse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_5015 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'cur_version':micropy_version,
'all_versions':[
(ver, url_pattern % ver) for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % micropy_version + '/micropython-docs.pdf'),
],
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '- The MicroPython Documentation is Copyright © 2014-2021, Damien P. George, Paul Sokolovsky, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags"
# breakdown, so use the same version identifier for both to avoid confusion.
version = release = '1.17'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', '.venv']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Global include files. Sphinx docs suggest using rst_epilog in preference
# of rst_prolog, so we follow. Absolute paths below mean "from the base
# of the doctree".
rst_epilog = """
.. include:: /templates/replace.inc
"""
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Include 3 levels of headers in PDF ToC
'preamble': '\setcounter{tocdepth}{2}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3.5', None)}
|
the-stack_0_5018 | from concurrent.futures import Future
from functools import wraps
from typing import Callable, Optional, TypeVar, Union
CallableReturnsInt = Callable[..., int]
IntOrBool = TypeVar(
"IntOrBool",
int,
bool,
)
CallableReturnsIntOrBool = TypeVar(
"CallableReturnsIntOrBool",
Callable[..., int],
Callable[..., bool],
)
IntCompatible = TypeVar(
"IntCompatible",
int,
Callable[..., int],
Future,
)
BoolCompatible = TypeVar(
"BoolCompatible",
int,
bool,
Callable[..., int],
Callable[..., bool],
Future,
)
class KiwoomOpenApiPlusError(Exception):
def __init__(self, message: Optional[str] = None):
if message is not None:
super().__init__(message)
else:
super().__init__()
self._message = message
@property
def message(self):
return self._message
@classmethod
def try_or_raise(
cls,
arg: IntCompatible,
message: Optional[str] = None,
except_callback: Optional[Callable] = None,
) -> IntCompatible:
return KiwoomOpenApiPlusNegativeReturnCodeError.try_or_raise(
arg, message, except_callback
)
@classmethod
def try_or_raise_boolean(
cls,
arg: BoolCompatible,
message: str,
except_callback: Optional[Callable] = None,
) -> BoolCompatible:
return KiwoomOpenApiPlusBooleanReturnCodeError.try_or_raise(
arg, message, except_callback
)
@classmethod
def get_error_message_by_code(cls, code: int, default: Optional[str] = None):
return KiwoomOpenApiPlusNegativeReturnCodeError.get_error_message_by_code(
code, default
)
class KiwoomOpenApiPlusNegativeReturnCodeError(KiwoomOpenApiPlusError):
OP_ERR_NONE = 0
OP_ERR_FAIL = -10
OP_ERR_COND_NOTFOUND = -11
OP_ERR_COND_MISMATCH = -12
OP_ERR_COND_OVERFLOW = -13
OP_ERR_TR_FAIL = -22
OP_ERR_LOGIN = -100
OP_ERR_CONNECT = -101
OP_ERR_VERSION = -102
OP_ERR_FIREWALL = -103
OP_ERR_MEMORY = -104
OP_ERR_INPUT = -105
OP_ERR_SOCKET_CLOSED = -106
OP_ERR_SISE_OVERFLOW = -200
OP_ERR_RQ_STRUCT_FAIL = -201
OP_ERR_RQ_STRING_FAIL = -202
OP_ERR_NO_DATA = -203
OP_ERR_OVER_MAX_DATA = -204
OP_ERR_DATA_RCV_FAIL = -205
OP_ERR_OVER_MAX_FID = -206
OP_ERR_REAL_CANCEL = -207
OP_ERR_ORD_WRONG_INPUT = -300
OP_ERR_ORD_WRONG_ACCTNO = -301
OP_ERR_OTHER_ACC_USE = -302
OP_ERR_MIS_2BILL_EXC = -303
OP_ERR_MIS_5BILL_EXC = -304
OP_ERR_MIS_1PER_EXC = -305
OP_ERR_MIS_3PER_EXC = -306
OP_ERR_SEND_FAIL = -307
OP_ERR_ORD_OVERFLOW = -308
OP_ERR_ORD_OVERFLOW2 = -311
OP_ERR_MIS_300CNT_EXC = -309
OP_ERR_MIS_500CNT_EXC = -310
OP_ERR_ORD_WRONG_ACCTINFO = -340
OP_ERR_ORD_SYMCODE_EMPTY = -500
MSG_ERR_NONE = "정상처리"
MSG_ERR_FAIL = "실패"
MSG_ERR_COND_NOTFOUND = "조건번호 없음"
MSG_ERR_COND_MISMATCH = "조건번호와 조건식 틀림"
MSG_ERR_COND_OVERFLOW = "조건검색 조회요청 초과"
MSG_ERR_TR_FAIL = "전문 처리 실패"
MSG_ERR_LOGIN = "사용자정보 교환 실패"
MSG_ERR_CONNECT = "서버접속 실패"
MSG_ERR_VERSION = "버전처리 실패"
MSG_ERR_FIREWALL = "개인방화벽 실패"
MSG_ERR_MEMORY = "메모리보호 실패"
MSG_ERR_INPUT = "함수입력값 오류"
MSG_ERR_SOCKET_CLOSED = "통신 연결종료"
MSG_ERR_SISE_OVERFLOW = "시세조회 과부하"
MSG_ERR_RQ_STRUCT_FAIL = "전문작성 초기화 실패"
MSG_ERR_RQ_STRING_FAIL = "전문작성 입력값 오류"
MSG_ERR_NO_DATA = "데이터 없음"
MSG_ERR_OVER_MAX_DATA = "조회 가능한 종목수 초과"
MSG_ERR_DATA_RCV_FAIL = "데이터수신 실패"
MSG_ERR_OVER_MAX_FID = "조회 가능한 FID수 초과"
MSG_ERR_REAL_CANCEL = "실시간 해제 오류"
MSG_ERR_ORD_WRONG_INPUT = "입력값 오류"
MSG_ERR_ORD_WRONG_ACCTNO = "계좌 비밀번호 없음"
MSG_ERR_OTHER_ACC_USE = "타인계좌사용 오류"
MSG_ERR_MIS_2BILL_EXC = "주문가격이 20억원을 초과"
MSG_ERR_MIS_5BILL_EXC = "주문가격이 50억원을 초과"
MSG_ERR_MIS_1PER_EXC = "주문수량이 총발행주수의 1%초과오류"
MSG_ERR_MIS_3PER_EXC = "주문수량이 총발행주수의 3%초과오류"
MSG_ERR_SEND_FAIL = "주문전송 실패"
MSG_ERR_ORD_OVERFLOW = "주문전송 과부하"
MSG_ERR_ORD_OVERFLOW2 = "주문전송 과부하"
MSG_ERR_MIS_300CNT_EXC = "주문수량 300계약 초과"
MSG_ERR_MIS_500CNT_EXC = "주문수량 500계약 초과"
MSG_ERR_ORD_WRONG_ACCTINFO = "계좌정보없음"
MSG_ERR_ORD_SYMCODE_EMPTY = "종목코드없음"
ERROR_MESSAGE_BY_CODE = {
OP_ERR_NONE: MSG_ERR_NONE,
OP_ERR_FAIL: MSG_ERR_FAIL,
OP_ERR_COND_NOTFOUND: MSG_ERR_COND_NOTFOUND,
OP_ERR_COND_MISMATCH: MSG_ERR_COND_MISMATCH,
OP_ERR_COND_OVERFLOW: MSG_ERR_COND_OVERFLOW,
OP_ERR_TR_FAIL: MSG_ERR_TR_FAIL,
OP_ERR_LOGIN: MSG_ERR_LOGIN,
OP_ERR_CONNECT: MSG_ERR_CONNECT,
OP_ERR_VERSION: MSG_ERR_VERSION,
OP_ERR_FIREWALL: MSG_ERR_FIREWALL,
OP_ERR_MEMORY: MSG_ERR_MEMORY,
OP_ERR_INPUT: MSG_ERR_INPUT,
OP_ERR_SOCKET_CLOSED: MSG_ERR_SOCKET_CLOSED,
OP_ERR_SISE_OVERFLOW: MSG_ERR_SISE_OVERFLOW,
OP_ERR_RQ_STRUCT_FAIL: MSG_ERR_RQ_STRUCT_FAIL,
OP_ERR_RQ_STRING_FAIL: MSG_ERR_RQ_STRING_FAIL,
OP_ERR_NO_DATA: MSG_ERR_NO_DATA,
OP_ERR_OVER_MAX_DATA: MSG_ERR_OVER_MAX_DATA,
OP_ERR_DATA_RCV_FAIL: MSG_ERR_DATA_RCV_FAIL,
OP_ERR_OVER_MAX_FID: MSG_ERR_OVER_MAX_FID,
OP_ERR_REAL_CANCEL: MSG_ERR_REAL_CANCEL,
OP_ERR_ORD_WRONG_INPUT: MSG_ERR_ORD_WRONG_INPUT,
OP_ERR_ORD_WRONG_ACCTNO: MSG_ERR_ORD_WRONG_ACCTNO,
OP_ERR_OTHER_ACC_USE: MSG_ERR_OTHER_ACC_USE,
OP_ERR_MIS_2BILL_EXC: MSG_ERR_MIS_2BILL_EXC,
OP_ERR_MIS_5BILL_EXC: MSG_ERR_MIS_5BILL_EXC,
OP_ERR_MIS_1PER_EXC: MSG_ERR_MIS_1PER_EXC,
OP_ERR_MIS_3PER_EXC: MSG_ERR_MIS_3PER_EXC,
OP_ERR_SEND_FAIL: MSG_ERR_SEND_FAIL,
OP_ERR_ORD_OVERFLOW: MSG_ERR_ORD_OVERFLOW,
OP_ERR_ORD_OVERFLOW2: MSG_ERR_ORD_OVERFLOW2,
OP_ERR_MIS_300CNT_EXC: MSG_ERR_MIS_300CNT_EXC,
OP_ERR_MIS_500CNT_EXC: MSG_ERR_MIS_500CNT_EXC,
OP_ERR_ORD_WRONG_ACCTINFO: MSG_ERR_ORD_WRONG_ACCTINFO,
OP_ERR_ORD_SYMCODE_EMPTY: MSG_ERR_ORD_SYMCODE_EMPTY,
}
@classmethod
def get_error_message_by_code(cls, code: int, default: Optional[str] = None):
return cls.ERROR_MESSAGE_BY_CODE.get(code, default)
@classmethod
def check_code_or_raise(cls, code: int):
if code < 0:
raise cls(code)
return code
@classmethod
def wrap_to_check_code_or_raise(
cls, func: CallableReturnsInt
) -> CallableReturnsInt:
@wraps(func)
def wrapper(*args, **kwargs):
return cls.check_code_or_raise(func(*args, **kwargs))
return wrapper
@classmethod
def try_or_raise(
cls,
arg: IntCompatible,
message: Optional[str] = None,
except_callback: Optional[Callable] = None,
) -> IntCompatible:
if isinstance(arg, Future):
def callback(future):
exc = future.exception()
if exc:
if except_callback:
except_callback(exc)
else:
raise exc
result = future.result()
try:
cls.try_or_raise(result, message)
except cls as e:
if except_callback:
except_callback(e)
else:
raise
arg.add_done_callback(callback)
return arg
elif isinstance(arg, int):
return cls.check_code_or_raise(arg)
elif callable(arg):
return cls.wrap_to_check_code_or_raise(arg)
else:
raise TypeError(
"Expected 'int', 'callable' or 'Future' but %s found" % type(arg)
)
def __init__(self, code: int, message: Optional[str] = None):
if message is None:
message = self.get_error_message_by_code(code)
super().__init__(message)
self._code = code
self._message = message
def __str__(self):
return self._message
def __repr__(self):
return "{}({!r}, {!r})".format(
self.__class__.__name__, self._code, self._message
)
@property
def code(self):
return self._code
class KiwoomOpenApiPlusBooleanReturnCodeError(KiwoomOpenApiPlusError):
OP_ERR_SUCCESS = 1
OP_ERR_FAILURE = 0
@classmethod
def check_code_or_raise(
cls, code: IntOrBool, message: Optional[str] = None
) -> IntOrBool:
if not code:
raise cls(code, message)
return code
@classmethod
def wrap_to_check_code_or_raise(
cls, func: CallableReturnsIntOrBool, message: Optional[str] = None
) -> CallableReturnsIntOrBool:
@wraps(func)
def wrapper(*args, **kwargs):
return cls.check_code_or_raise(func(*args, **kwargs), message)
return wrapper
@classmethod
def try_or_raise(
cls,
arg: BoolCompatible,
message: Optional[str] = None,
except_callback: Optional[Callable] = None,
) -> BoolCompatible:
if isinstance(arg, Future):
def callback(future):
exc = future.exception()
if exc:
if except_callback:
except_callback(exc)
else:
raise exc
result = future.result()
try:
cls.try_or_raise(result, message)
except cls as e:
if except_callback:
except_callback(e)
else:
raise
arg.add_done_callback(callback)
return arg
elif isinstance(arg, (int, bool)):
return cls.check_code_or_raise(arg, message)
elif callable(arg):
return cls.wrap_to_check_code_or_raise(arg, message)
else:
raise TypeError(
"Expected 'int', 'bool', 'callable' or 'Future' but %s found"
% type(arg)
)
def __init__(self, code: Union[int, bool], message: Optional[str] = None):
super().__init__(message)
self._code = code
self._message = message
def __str__(self):
if self._message:
return self._message
else:
return self.__repr__()
def __repr__(self):
return "{}({!r}, {!r})".format(
self.__class__.__name__, self._code, self._message
)
@property
def code(self):
return self._code
|
the-stack_0_5019 | import pytest
from mixer.backend.django import mixer
from .. import forms
pytestmark = pytest.mark.django_db
class TestPostForm:
def test_form(self):
form = forms.PostForm(data={})
assert form.is_valid() is False, ('Should be invalid if no data is given')
data = {'body': 'Hello'}
form = forms.PostForm(data=data)
assert form.is_valid() is False, ('Should be invalid if body text is less than 10 characters')
assert 'body' in form.errors, 'Should return field error for `body`'
data = {'body': 'Hello World!'}
form = forms.PostForm(data=data)
assert form.is_valid() is True, 'Should be valid when data is given' |
the-stack_0_5020 | """
This file offers the methods to automatically retrieve the graph Janthinobacterium sp. Marseille.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def JanthinobacteriumSpMarseille(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Janthinobacterium sp. Marseille graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Janthinobacterium sp. Marseille graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="JanthinobacteriumSpMarseille",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_5026 | # -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from json.decoder import JSONDecodeError
from time import time
from typing import Union
from urllib.parse import urlparse
import requests
from multipledispatch import dispatch
from iconsdk.exception import JSONRPCException, URLException
from iconsdk.providers.provider import Provider
from iconsdk.utils import to_dict
class HTTPProvider(Provider):
"""
The HTTPProvider takes the full URI where the server can be found.
For local development this would be something like 'http://localhost:9000'.
"""
@dispatch(str, int, dict=None)
def __init__(self, base_domain_url: str, version: int, request_kwargs: dict = None):
"""
The initializer to be set with base domain URL and version.
:param base_domain_url: base domain URL as like <scheme>://<host>:<port>
:param version: version for RPC server
:param request_kwargs: kwargs for setting to head of request
"""
uri = urlparse(base_domain_url)
if uri.path != '':
raise URLException('Path is not allowed')
self._serverUri = f'{uri.scheme}://{uri.netloc}'
self._channel = ''
self._version = version
self._request_kwargs = request_kwargs or {}
self._generate_url_map()
@dispatch(str, dict=None)
def __init__(self, full_path_url: str, request_kwargs: dict = None):
"""
The initializer to be set with full path url as like <scheme>://<host>:<port>/api/v3.
If you need to use a channel, you can use it such as <scheme>://<host>:<port>/api/v3/{channel}.
:param full_path_url: full path URL as like <scheme>://<host>:<port>/api/v3
:param request_kwargs: kwargs for setting to head of request
"""
uri = urlparse(full_path_url)
self._serverUri = f'{uri.scheme}://{uri.netloc}'
self._channel = self._get_channel(uri.path)
self._version = 3
self._request_kwargs = request_kwargs or {}
self._generate_url_map()
def _generate_url_map(self):
def _add_channel_path(url: str):
if self._channel:
return f"{url}/{self._channel}"
return url
self._URL_MAP = {
'icx': _add_channel_path(f"{self._serverUri}/api/v{self._version}"),
'debug': _add_channel_path(f"{self._serverUri}/api/debug/v{self._version}")
}
@staticmethod
def _get_channel(path: str):
tokens = re.split("/(?=[^/]+$)", path.rstrip('/'))
if tokens[0] == '/api/v3':
return tokens[1]
elif tokens == ['/api', 'v3']:
return ''
raise URLException('Invalid URI path')
def __str__(self):
return "RPC connection to {0}".format(self._serverUri)
@to_dict
def _get_request_kwargs(self) -> dict:
if 'headers' not in self._request_kwargs:
yield 'headers', {'Content-Type': 'application/json'}
for key, value in self._request_kwargs.items():
yield key, value
@staticmethod
def _make_post_request(request_url: str, data: dict, **kwargs) -> requests.Response:
kwargs.setdefault('timeout', 10)
with requests.Session() as session:
response = session.post(url=request_url, data=json.dumps(data), **kwargs)
return response
def _make_id(self) -> int:
return int(time())
def make_request(self, method: str, params=None, full_response: bool = False) -> Union[str, list, dict]:
rpc_dict = {
'jsonrpc': '2.0',
'method': method,
'id': self._make_id()
}
if params:
rpc_dict['params'] = params
req_key = method.split('_')[0]
retry_count = 2
raw_response = ''
while retry_count > 0:
request_url = self._URL_MAP.get(req_key)
response = self._make_post_request(request_url, rpc_dict, **self._get_request_kwargs())
try:
return self._return_custom_response(response, full_response)
except JSONDecodeError:
retry_count -= 1
raw_response = response.content.decode()
if req_key == 'debug':
self._URL_MAP['debug'] = "{0}/api/v{1}d/{2}".format(self._serverUri, self._version, self._channel)
else:
break
raise JSONRPCException(f'Unknown response: {raw_response}')
@staticmethod
def _return_custom_response(response: requests.Response, full_response: bool = False) -> Union[str, list, dict]:
content = json.loads(response.content)
if full_response:
return content
if response.ok:
return content['result']
raise JSONRPCException(content["error"])
|
the-stack_0_5029 | import xml.etree.ElementTree as ET
from nltk.tokenize import WordPunctTokenizer
from sentence_splitter import SentenceSplitter
from src.parser import Word, Dataset
class Aspect(object):
def __init__(self, begin=0, end=0, target="", polarity=1, category="", aspect_type=0, mark=0):
self.type_values = {
'explicit': 0,
'implicit': 1,
'fct': 2
}
self.rev_type_values = {value: key for key, value in self.type_values.items()}
self.sentiment_values = {
'positive': 3,
'neutral': 1,
'negative': 0,
'both': 2
}
self.rev_sentiment_values = {value: key for key, value in self.sentiment_values.items()}
self.mark_values = {
'Rel': 0,
'Irr': 1,
'Cmpr': 2,
'Prev': 3,
'Irn': 4
}
self.rev_mark_values = {value: key for key, value in self.mark_values.items()}
self.begin = begin
self.end = end
self.target = target
self.polarity = polarity
self.category = category
self.type = aspect_type
self.mark = mark
self.words = []
def parse(self, node):
self.begin = int(node.get('from'))
self.end = int(node.get('to'))
self.target = node.get('term')
self.polarity = self.sentiment_values[node.get('sentiment')]
self.category = node.get('category')
self.type = self.type_values[node.get('type')]
self.mark = self.mark_values[node.get('mark')]
def is_empty(self):
return self.target == ""
def inflate_target(self):
self.target = " ".join([word.text for word in self.words]).replace('"', "'").replace('&', '#')
def to_xml(self):
return '<aspect mark="{mark}" category="{category}" type="{aspect_type}" from="{begin}" to="{end}" sentiment="{polarity}" term="{term}"/>\n'.format(
begin=self.begin, end=self.end, term=self.target, mark=self.rev_mark_values[self.mark],
aspect_type=self.rev_type_values[self.type], category=self.category,
polarity=self.rev_sentiment_values[self.polarity])
def __repr__(self):
return "<Aspect {begin}:{end} {t} {category} {polarity} at {hid}>".format(
begin=self.begin,
end=self.end,
category=self.category,
polarity=self.polarity,
t=self.type,
hid=hex(id(self))
)
class Review(object):
def __init__(self, text="", rid=0):
self.text = text
self.rid = rid
self.aspects = []
self.sentences = []
self.categories = {}
self.sentiment_values = {
'positive': 3,
'neutral': 1,
'negative': 0,
'both': 2,
'absence': 4
}
self.rev_sentiment_values = {value: key for key, value in self.sentiment_values.items()}
def parse(self, node):
self.text = node.find(".//text").text
self.rid = node.get("id")
self.aspects = []
for aspect_node in node.findall(".//aspect"):
aspect = Aspect()
aspect.parse(aspect_node)
self.aspects.append(aspect)
for category_node in node.findall(".//category"):
category_name = category_node.get('name')
sentiment = category_node.get('sentiment')
self.categories[category_name] = self.sentiment_values[sentiment]
def to_xml(self):
aspects_xml = "".join([aspect.to_xml() for aspect in self.aspects])
categories_xml = ''
for name, sentiment_num in self.categories.items():
categories_xml += '<category name="{name}" sentiment="{sentiment}"/>\n'.format(
name=name,
sentiment=self.rev_sentiment_values[sentiment_num]
)
return '<review id="{rid}">\n<categories>\n{categories}</categories>\n<text>{text}</text>\n<aspects>\n{aspects}</aspects>\n</review>\n'.format(
rid=self.rid,
text=self.text.replace("&", "#"),
aspects=aspects_xml,
categories=categories_xml)
class SentiRuEvalDataset(Dataset):
def __init__(self):
super().__init__()
self.language = "ru"
def parse(self, filename, vectorizer=None, **kwargs):
assert filename.endswith('xml')
tree = ET.parse(filename)
root = tree.getroot()
self.reviews = []
for review_node in root.findall(".//review"):
review = Review()
review.parse(review_node)
self.reviews.append(review)
self.tokenize()
self.pos_tag()
def tokenize(self):
sentence_splitter = SentenceSplitter(language='ru')
for i, review in enumerate(self.reviews):
text = review.text
sentences = sentence_splitter.split(text)
words_borders = list(WordPunctTokenizer().span_tokenize(text))
for sentence in sentences:
tokenized_sentence = []
sentence_begin = text.find(sentence)
sentence_end = sentence_begin + len(sentence)
for word_begin, word_end in words_borders:
if word_begin >= sentence_begin and word_end <= sentence_end:
word_text = text[word_begin: word_end]
word = Word(word_text, word_begin, word_end)
for opinion in review.aspects:
if word.begin >= opinion.begin and word.end <= opinion.end:
word.add_opinion(opinion)
opinion.words.append(word)
tokenized_sentence.append(word)
self.reviews[i].sentences.append(tokenized_sentence)
def print_stat(self):
print("Num of reviews: " + str(len(self.reviews)))
print("Num of opinions: " + str(self.get_opinion_count()))
print("Max review length: " + str(max(self.get_lengths())))
print(self.reviews[0].sentences[0])
print(self.reviews[0].sentences[0])
def get_aspect_categories(self):
categories = set()
for review in self.reviews:
for aspect in review.aspects:
categories.add(aspect.category)
categories = list(sorted(list(categories)))
return {category: i for i, category in enumerate(categories)}
def get_review_categories(self):
categories = set()
for review in self.reviews:
for category in review.categories.keys():
categories.add(category)
categories = list(sorted(list(categories)))
return {category: i for i, category in enumerate(categories)}
|
the-stack_0_5030 | import json
import pytest
from unittest import mock
from asynctest import patch
from blebox_uniapi.box import Box
from blebox_uniapi import error
pytestmark = pytest.mark.asyncio
@pytest.fixture
def mock_session():
return mock.MagicMock(host="172.1.2.3", port=80)
@pytest.fixture
def data():
return {
"id": "abcd1234ef",
"type": "airSensor",
"deviceName": "foobar",
"fv": "1.23",
"hv": "4.56",
"apiLevel": "20180403",
}
async def test_json_paths(mock_session, data):
box = Box(mock_session, data)
assert "foo" == box.follow(json.loads("""["foo"]"""), "[0]")
assert 4 == box.follow(
json.loads("""[{"foo":"3", "value":4}]"""), "[foo='3']/value"
)
assert 4 == box.follow(json.loads("""[{"foo":3, "value":4}]"""), "[foo=3]/value")
with pytest.raises(error.JPathFailed, match=r"with: foo=bc at .* within .*"):
box.follow(json.loads("""[{"foo":"ab", "value":4}]"""), "[foo='bc']/value")
with pytest.raises(
error.JPathFailed, match=r"with value at index 1 at .* within .*"
):
box.follow(json.loads("""[{"value":4}]"""), "[1]/value")
with pytest.raises(
error.JPathFailed, match=r"with value at index 1 at .* within .*"
):
box.follow(json.loads("""{"value":4}"""), "[1]/value")
with pytest.raises(error.JPathFailed, match=r"with: foo=7 at .* within .*"):
box.follow(json.loads("""[{"foo":3, "value":4}]"""), "[foo=7]/value")
with pytest.raises(
error.JPathFailed, match=r"item 'foo' not among \['value'\] at .* within .*"
):
box.follow(json.loads("""{"value":4}"""), "foo")
with pytest.raises(
error.JPathFailed,
match=r"unexpected item type: 'foo' not in: \[4\] at .* within .*",
):
box.follow(json.loads("""[4]"""), "foo")
with pytest.raises(
error.JPathFailed,
match=r"list expected but got {'foo': \[4\]} at .* within .*",
):
box.follow(json.loads("""{"foo": [4]}"""), "[bar=0]/value")
async def test_without_id(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse, match="Device at 172.1.2.3:80 has no id"
):
del data["id"]
Box(mock_session, data)
async def test_without_type(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match="Device:abcd1234ef at 172.1.2.3:80 has no type",
):
del data["type"]
Box(mock_session, data)
async def test_with_unknown_type(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(unknownBox:abcd1234ef/1.23 at 172.1.2.3:80\) is not a supported type",
):
data["type"] = "unknownBox"
Box(mock_session, data)
async def test_without_name(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match="airSensor:abcd1234ef at 172.1.2.3:80 has no name",
):
del data["deviceName"]
Box(mock_session, data)
async def test_without_firmware_version(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef at 172.1.2.3:80\) has no firmware version",
):
del data["fv"]
Box(mock_session, data)
async def test_without_hardware_version(mock_session, data):
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) has no hardware version",
):
del data["hv"]
Box(mock_session, data)
async def test_without_api_level(mock_session, data):
with pytest.raises(
error.UnsupportedBoxVersion,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) has unsupported version",
):
del data["apiLevel"]
Box(mock_session, data)
async def test_with_init_failure(mock_session, data):
with patch(
"blebox_uniapi.box.AirQuality", spec_set=True, autospec=True
) as mock_sensor:
mock_sensor.side_effect = KeyError
with pytest.raises(
error.UnsupportedBoxResponse,
match=r"'foobar' \(airSensor:abcd1234ef/1.23 at 172.1.2.3:80\) failed to initialize: ",
):
Box(mock_session, data)
async def test_properties(mock_session, data):
box = Box(mock_session, data)
assert "foobar" == box.name
assert None is box.last_data
assert "airSensor" == box.type
assert "airSensor" == box.model
assert "abcd1234ef" == box.unique_id
assert "1.23" == box.firmware_version
assert "4.56" == box.hardware_version
assert "BleBox" == box.brand
assert 20180403 == box.api_version
async def test_validations(mock_session, data):
box = Box(mock_session, data)
with pytest.raises(
error.BadFieldExceedsMax,
match=r"foobar.field1 is 123 which exceeds max \(100\)",
):
box.check_int_range(123, "field1", 100, 0)
with pytest.raises(
error.BadFieldLessThanMin,
match=r"foobar.field1 is 123 which is less than minimum \(200\)",
):
box.check_int_range(123, "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_int(None, "field1", 300, 200)
with pytest.raises(
error.BadFieldNotANumber, match=r"foobar.field1 is '123' which is not a number"
):
box.check_int("123", "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_hex_str(None, "field1", 300, 200)
with pytest.raises(
error.BadFieldNotAString, match=r"foobar.field1 is 123 which is not a string"
):
box.check_hex_str(123, "field1", 300, 200)
with pytest.raises(error.BadFieldMissing, match=r"foobar.field1 is missing"):
box.check_rgbw(None, "field1")
with pytest.raises(
error.BadFieldNotAString, match=r"foobar.field1 is 123 which is not a string"
):
box.check_rgbw(123, "field1")
with pytest.raises(
error.BadFieldNotRGBW, match=r"foobar.field1 is 123 which is not a rgbw string"
):
box.check_rgbw("123", "field1")
|
the-stack_0_5031 | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2019 Megvii Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# This file has been modified by Megvii ("Megvii Modifications").
# All Megvii Modifications are Copyright (C) 2014-2019 Megvii Inc. All rights reserved.
# ------------------------------------------------------------------------------
import megengine.functional as F
import megengine.hub as hub
import megengine.module as M
class ShuffleV2Block(M.Module):
def __init__(self, inp, oup, mid_channels, *, ksize, stride):
super().__init__()
self.stride = stride
assert stride in [1, 2]
self.mid_channels = mid_channels
self.ksize = ksize
pad = ksize // 2
self.pad = pad
self.inp = inp
outputs = oup - inp
branch_main = [
# pw
M.Conv2d(inp, mid_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(mid_channels),
M.ReLU(),
# dw
M.Conv2d(
mid_channels, mid_channels, ksize, stride, pad,
groups=mid_channels, bias=False,
),
M.BatchNorm2d(mid_channels),
# pw-linear
M.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False),
M.BatchNorm2d(outputs),
M.ReLU(),
]
self.branch_main = M.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
M.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False),
M.BatchNorm2d(inp),
# pw-linear
M.Conv2d(inp, inp, 1, 1, 0, bias=False),
M.BatchNorm2d(inp),
M.ReLU(),
]
self.branch_proj = M.Sequential(*branch_proj)
else:
self.branch_proj = None
def forward(self, old_x):
if self.stride == 1:
x_proj, x = self.channel_shuffle(old_x)
return F.concat((x_proj, self.branch_main(x)), 1)
elif self.stride == 2:
x_proj = old_x
x = old_x
return F.concat((self.branch_proj(x_proj), self.branch_main(x)), 1)
else:
raise ValueError("use stride 1 or 2, current stride {}".format(self.stride))
def channel_shuffle(self, x):
batchsize, num_channels, height, width = x.shape
# assert (num_channels % 4 == 0)
x = x.reshape(batchsize * num_channels // 2, 2, height * width)
x = F.transpose(x, (1, 0, 2))
x = x.reshape(2, -1, num_channels // 2, height, width)
return x[0], x[1]
class ShuffleNetV2(M.Module):
def __init__(self, num_classes=1000, model_size="1.5x"):
super().__init__()
self.stage_repeats = [4, 8, 4]
self.model_size = model_size
if model_size == "0.5x":
self.stage_out_channels = [-1, 24, 48, 96, 192, 1024]
elif model_size == "1.0x":
self.stage_out_channels = [-1, 24, 116, 232, 464, 1024]
elif model_size == "1.5x":
self.stage_out_channels = [-1, 24, 176, 352, 704, 1024]
elif model_size == "2.0x":
self.stage_out_channels = [-1, 24, 244, 488, 976, 2048]
else:
raise NotImplementedError
# building first layer
input_channel = self.stage_out_channels[1]
self.first_conv = M.Sequential(
M.Conv2d(3, input_channel, 3, 2, 1, bias=False), M.BatchNorm2d(input_channel), M.ReLU(),
)
self.maxpool = M.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.features = []
for idxstage in range(len(self.stage_repeats)):
numrepeat = self.stage_repeats[idxstage]
output_channel = self.stage_out_channels[idxstage + 2]
for i in range(numrepeat):
if i == 0:
self.features.append(
ShuffleV2Block(
input_channel, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=2,
)
)
else:
self.features.append(
ShuffleV2Block(
input_channel // 2, output_channel,
mid_channels=output_channel // 2, ksize=3, stride=1,
)
)
input_channel = output_channel
self.features = M.Sequential(*self.features)
self.conv_last = M.Sequential(
M.Conv2d(input_channel, self.stage_out_channels[-1], 1, 1, 0, bias=False),
M.BatchNorm2d(self.stage_out_channels[-1]),
M.ReLU(),
)
self.globalpool = M.AvgPool2d(7)
if self.model_size == "2.0x":
self.dropout = M.Dropout(0.2)
self.classifier = M.Sequential(
M.Linear(self.stage_out_channels[-1], num_classes, bias=False)
)
self._initialize_weights()
def forward(self, x):
x = self.first_conv(x)
x = self.maxpool(x)
x = self.features(x)
x = self.conv_last(x)
x = self.globalpool(x)
if self.model_size == "2.0x":
x = self.dropout(x)
x = x.reshape(-1, self.stage_out_channels[-1])
x = self.classifier(x)
return x
def _initialize_weights(self):
for name, m in self.named_modules():
if isinstance(m, M.Conv2d):
if "first" in name:
M.init.normal_(m.weight, 0, 0.01)
else:
M.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
M.init.fill_(m.bias, 0)
elif isinstance(m, M.BatchNorm2d):
M.init.fill_(m.weight, 1)
if m.bias is not None:
M.init.fill_(m.bias, 0.0001)
M.init.fill_(m.running_mean, 0)
elif isinstance(m, M.BatchNorm1d):
M.init.fill_(m.weight, 1)
if m.bias is not None:
M.init.fill_(m.bias, 0.0001)
M.init.fill_(m.running_mean, 0)
elif isinstance(m, M.Linear):
M.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
M.init.fill_(m.bias, 0)
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x2_0_75115_497d4601.pkl")
def shufflenet_v2_x2_0(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="2.0x")
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x1_5_72775_38ac4273.pkl")
def shufflenet_v2_x1_5(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="1.5x")
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x1_0_69369_daf9dba0.pkl")
def shufflenet_v2_x1_0(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="1.0x")
@hub.pretrained("https://data.megengine.org.cn/models/weights/snetv2_x0_5_60750_c28db1a2.pkl")
def shufflenet_v2_x0_5(num_classes=1000):
return ShuffleNetV2(num_classes=num_classes, model_size="0.5x")
|
the-stack_0_5033 | # qubit number=3
# total number=12
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.y(input_qubit[2]) # number=9
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.x(input_qubit[2]) # number=6
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.y(input_qubit[2]) # number=7
prog.y(input_qubit[2]) # number=8
prog.swap(input_qubit[2],input_qubit[0]) # number=10
prog.swap(input_qubit[2],input_qubit[0]) # number=11
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy500.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
the-stack_0_5034 | import logging
from enum import Enum
import json
from iota import Address, TryteString
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
vendor_address = "OPMGOSBITOTGSZRESXAO9SGPAOOFEQ9OIPEMY9DEHPVOUULUHXIHHWBNFNMKXPEZWIMHB9JPEXSE9SFLA"
class ChargingStationStatus(Enum):
FREE = "free"
OCCUPIED = "occupied"
CLOSED = "closed"
class ChargingStation:
def __init__(self, longitude, latitude, price, id, owner,
status=ChargingStationStatus.FREE.value):
self.longitude = longitude
self.latitude = latitude
self.price = price
self.id = id
self.owner = owner
self.status = status
def get_message(self):
return {
"long": self.longitude,
"lat": self.latitude,
"owner": self.owner,
"price": self.price,
"id": self.id,
"status": self.status
}
# Advertise charging station (FREE or OCCUPIED OR CLOSED)
# Return: bundle hash
# Params:
# iota:IotaWrapper, the instance to use for sending
def advertise(self, iota):
msg = json.dumps(self.get_message())
bundle = iota.send_transfer(
transfers=iota.create_transfers(self.owner, msg, 0),
inputs=[Address(self.owner, key_index=0, security_level=0)]
)
return bundle["bundle"].as_json_compatible()
def __str__(self):
return ("Station %s (%s) at (%f, %f) " %
(self.id, self.status, self.latitude, self.longitude))
# This is a monopoly
five_stations_data = [
[18.5772788, 54.4060541, 3, "ExpensiveStation", vendor_address,
ChargingStationStatus.FREE.value],
[18.5772656, 54.404569, 1, "BestStation", vendor_address,
ChargingStationStatus.OCCUPIED.value],
[18.578795, 54.406126, 1.3, "FriendlyGarage", vendor_address,
ChargingStationStatus.FREE.value],
[18.578126, 54.404454, 1.3, "CoolCharger", vendor_address,
ChargingStationStatus.FREE.value],
[18.577074, 54.405355, 1.3, "Favourite", vendor_address,
ChargingStationStatus.CLOSED.value],
]
FIVE_STATIONS = [ChargingStation(*stnd) for stnd in five_stations_data]
|
the-stack_0_5035 | # IMPORTATION STANDARD
import gzip
import json
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.cryptocurrency.defi import llama_view
def filter_json_data(response):
"""To reduce cassette size."""
headers = response["headers"]
if "FILTERED" in headers:
return response
limit = 10
content = response["body"]["string"]
if content.decode().startswith("H4sI"):
content = gzip.decompress(content).decode()
content = json.loads(content)
else:
content = json.loads(content)
if isinstance(content, list):
new_content = content[:limit]
elif isinstance(content, dict):
new_content = {k: content[k] for k in list(content)[:limit]}
else:
raise AttributeError(f"Content type not supported : {content}")
new_content_json = json.dumps(new_content)
new_content_gz = gzip.compress(new_content_json.encode())
response["body"]["string"] = new_content_gz
response["headers"]["Content-Encoding"] = ["gzip"]
response["headers"]["FILTERED"] = ["TRUE"]
return response
def gzip_data(response):
"""To reduce cassette size."""
headers = response["headers"]
if "COMPRESSED" in headers:
return response
content = response["body"]["string"].decode()
if content.startswith("H4sI"):
content = gzip.decompress(content)
new_content_gz = gzip.compress(content.encode())
response["body"]["string"] = new_content_gz
response["headers"]["Content-Encoding"] = ["gzip"]
response["headers"]["COMPRESSED"] = ["TRUE"]
return response
@pytest.mark.vcr(before_record_response=filter_json_data)
@pytest.mark.record_stdout
def test_display_defi_protocols():
llama_view.display_defi_protocols(20, "tvl", False, False)
@pytest.mark.vcr(before_record_response=gzip_data)
@pytest.mark.record_stdout
def test_display_defi_tvl(mocker):
# MOCK EXPORT_DATA
mocker.patch(target="openbb_terminal.cryptocurrency.defi.llama_view.export_data")
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
llama_view.display_defi_tvl(20)
@pytest.mark.vcr(before_record_response=filter_json_data)
@pytest.mark.record_stdout
def test_display_grouped_defi_protocols(mocker):
# MOCK EXPORT_DATA
mocker.patch(target="openbb_terminal.cryptocurrency.defi.llama_view.export_data")
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
llama_view.display_grouped_defi_protocols(20)
@pytest.mark.vcr(before_record_response=gzip_data)
@pytest.mark.record_stdout
def test_display_historical_tvl(mocker):
# MOCK EXPORT_DATA
mocker.patch(target="openbb_terminal.cryptocurrency.defi.llama_view.export_data")
# MOCK VISUALIZE_OUTPUT
mocker.patch(target="openbb_terminal.helper_classes.TerminalStyle.visualize_output")
llama_view.display_historical_tvl("anchor")
|
the-stack_0_5037 | from bs4 import BeautifulSoup
from inspect import getmembers
import urllib.request
import urllib.error
import urllib.parse
import threading
import requests
import sys
import pprint
import string
import time
import threading
import hashlib
import psycopg2
class Novoterm(threading.Thread):
item_links = list()
category_links = []
i = 0
running = True
conn = None
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.19 (KHTML, like Gecko) Ubuntu/12.04 Chromium/18.0.1025.168 Chrome/18.0.1025.168 Safari/535.19'
def __init__(self, id):
self.id = str(id)
threading.Thread.__init__(self)
@staticmethod
def getCategoryLinks(self):
self.dbConnection(self)
try:
url = "http://novoterm.pl/kerra/"
url = "http://novoterm.pl/loge/"
response = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
x = soup.find('div', {"id":"nasze-produkty"})
cat_name = x.select('div.bottommargin-sm h3')
cat_link = x.select('div.bottommargin-sm > a')
i = 0
for cn in cat_name:
cname = str(cn).lstrip('<h3>').rstrip('</h3>')
# insert category to db
cur = self.conn.cursor()
sql = "INSERT INTO categories (name, description, created_at, updated_at) VALUES ("
sql += "'"+cname +"', 'Desc', now(), now())"
cur.execute(sql)
self.conn.commit()
# get inserted row id
cur.execute("SELECT currval('categories_id_seq')")
response = urllib.request.urlopen(urllib.request.Request(cat_link[i]['href'], headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
x = soup.find('div', {"id":"isotope-container"}).select('div > a')
i = i + 1
cat_id = cur.fetchone()[0]
j = 0
for link in x:
self.item_links.append((link['href'], int(cat_id)))
j += 1
print(cname+": "+str(j))
except urllib.error.HTTPError:
print('err')
@staticmethod
def getItemLinks(self):
try:
url = 'http://novoterm.pl/kerra/kategoria-produktu/stelaze-podtynkowe-pl-pl/'
items_stelaze_podtynkowe = list()
#response = urllib.request.urlopen(url).read()
response = urllib.request.urlopen(urllib.request.Request(url, headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
x = soup.find('div', {"id":"isotope-container"}).select('div > a')
for link in x:
self.item_links.append(link['href'])
except urllib.error.HTTPError:
print('err')
def getItemsDataFromNovoterm(self):
if self.item_links:
items = self.item_links.pop()
item_url = items[0]
try:
response = urllib.request.urlopen(urllib.request.Request(item_url, headers={'User-Agent': self.user_agent}))
soup = BeautifulSoup(response)
# pobieranie obrazka głównego
item = soup.find('a', {"class":"fancybox"})
t = item['href'].split('.');
tl = len(t)
image_name = hashlib.md5(str(time.time()).replace('.', '').encode('utf-8')).hexdigest()+"."+t[tl-1]
urllib.request.urlretrieve(item['href'], '/home/error/kod/hiveware/storage/shop/items/'+image_name)
# pobieranie schematu
# schema_src = soup.find('div', {"class":"product-content"}).select('div.topmargin-sm > div > img')[0]['src']
# t = schema_src.split('.');
# tl = len(t)
# urllib.request.urlretrieve(schema_src, "schema/"+image_name+"."+t[tl-1])
# pobiera name
item_name = str(soup.find('div', {"class":"product-head-info"}).select('h2')[0]).lstrip('<h2>').rstrip('</h2>')
# pobieranie opisu (razem z html - strongi)
item_desc = str(soup.find('div', {"class":"product-content"}).select('div.topmargin-sm > div > p')[0])
self.dbInsert(item_name, item_desc, "items/"+image_name, items[1])
except urllib.error.HTTPError:
print('error in get item')
else:
self.running = False
def dbConnection(self):
conn_string = "host='localhost' dbname='hive' user='postgres' password='123123'"
#conn_string = "host='148.251.156.146' dbname='qwer34_test' user='qwer34_test' password='aWXkNlaDJk'"
self.conn = psycopg2.connect(conn_string)
def dbInsert(self, name, desc, img, cat_id):
cur = self.conn.cursor()
sql = "INSERT INTO items (name, description, image_path, price, weight, count, created_at, updated_at) VALUES ("
sql += "'"+name +"', '"+desc+"', '"+img+"', 1, 1, 1, now(), now())"
cur.execute(sql)
self.conn.commit()
cur.execute("SELECT currval('items_id_seq')")
item_id = cur.fetchone()[0]
sql = "INSERT INTO category_item (item_id, category_id) VALUES ("
sql += str(item_id) + ", " + str(cat_id) + ")"
cur.execute(sql)
self.conn.commit()
def run(self):
while self.running:
self.getItemsDataFromNovoterm()
# ========================================================================================================= #
# ========================================================================================================= #
# pobieram linki z itemami zeby poetm wielowatkowo pobierac z tych stron dane i jeb do bazy
Novoterm.getCategoryLinks(Novoterm)
i = 0
threads = [Novoterm(i) for i in range(0, 8)]
for t in threads:
try:
t.start()
except:
exit()
|
the-stack_0_5038 | """Provides RootCauseAnalysis class for computing RCA."""
import warnings
from itertools import combinations
from math import isclose
from textwrap import wrap
from typing import Dict, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from chaos_genius.core.rca.constants import TIME_RANGES_BY_KEY
from chaos_genius.core.rca.rca_utils.string_helpers import (
convert_df_dims_to_query_strings,
convert_query_string_to_user_string,
)
from chaos_genius.core.rca.rca_utils.waterfall_utils import (
get_best_subgroups_using_superset_algo,
get_waterfall_ylims,
waterfall_plot_mpl,
)
from chaos_genius.core.utils.round import round_df, round_number
SUPPORTED_AGGREGATIONS = ["mean", "sum", "count"]
EPSILON = 1e-8
class RootCauseAnalysis:
"""RCA Processor class which computes the RCA."""
def __init__(
self,
grp1_df: pd.DataFrame,
grp2_df: pd.DataFrame,
dims: List[str],
metric: str,
num_dim_combs: List[int] = None,
agg: str = "mean",
preaggregated: bool = False,
preaggregated_count_col: str = "count",
) -> None:
"""Initialize the RCA class.
:param grp1_df: baseline dataframe
:type grp1_df: pd.DataFrame
:param grp2_df: rca/focus dataframe
:type grp2_df: pd.DataFrame
:param dims: list of dimensions to consider
:type dims: List[str]
:param metric: name of metric column
:type metric: str
:param num_dim_combs: which number of dimension combinations to
consider, defaults to None
:type num_dim_combs: List[int], optional
:param agg: aggregation to use, defaults to "mean"
:type agg: str, optional
:param preaggregated: whether the dataframes are preaggregated,
defaults to False
:type preaggregated: bool, optional
:param preaggregated_count_col: name of the column containing the
count of the aggregated dataframe, defaults to "count"
:type preaggregated_count_col: str, optional
"""
self._grp1_df = grp1_df
self._grp2_df = grp2_df
self._preprocess_rca_dfs()
self._full_df = pd.concat([self._grp1_df, self._grp2_df])
self._check_columns(dims)
self._dims = dims
self._check_columns(metric)
self._metric = metric
self._metric_is_cat = self._full_df[metric].dtype == object
if agg not in SUPPORTED_AGGREGATIONS:
raise ValueError(f"Aggregation {agg} is not supported.")
self._agg = agg
if num_dim_combs is None or not dims:
num_dim_combs = list(range(1, len(dims) + 1))
else:
if max(num_dim_combs) > len(self._dims) or min(num_dim_combs) < 1:
raise ValueError(f"n {num_dim_combs} is out of range.")
if len(set(num_dim_combs)) != len(num_dim_combs):
raise ValueError(f"n {num_dim_combs} has duplicates.")
if len(num_dim_combs) > 4:
warnings.warn(
"Passing more than 4 values for n will take a while."
)
self._num_dim_combs_to_consider = num_dim_combs
self._impact_table = None
self._waterfall_table = None
self._max_waterfall_columns = 5
self._max_subgroups_considered = 100
self._preaggregated = preaggregated
self._preaggregated_count_col = preaggregated_count_col
def _initialize_impact_table(self):
self._create_binned_columns()
dim_combs_list = self._generate_all_dim_combinations()
impacts = []
for dim_comb in dim_combs_list:
dim_comb_impact = self._compare_subgroups(dim_comb)
impacts.append(dim_comb_impact)
impact_table = pd.concat(impacts)
# sort by absolute impact values
impact_table = impact_table.sort_values(
by="impact",
ascending=False,
key=lambda x: x.abs(),
ignore_index=True,
)
# add query string
impact_table.loc[:, "string"] = impact_table[self._dims].apply(
lambda inp: convert_df_dims_to_query_strings(inp), axis=1
)
# keeping only relevant features
# impact_table.drop(self._dims, axis= 1, inplace= True)
metric_columns = [
"impact",
"val_g1",
"val_g2",
"size_g1",
"size_g2",
"count_g1",
"count_g2",
]
impact_table = impact_table[["string"] + self._dims + metric_columns]
return impact_table
def _get_single_dim_impact_table(self, single_dim):
if self._impact_table is None:
self._impact_table = self._initialize_impact_table()
impact_table = self._impact_table.copy()
other_dims = set(self._dims)
other_dims.remove(single_dim)
impact_table = impact_table[
(~impact_table[single_dim].isna())
& (impact_table[other_dims].isna().sum(axis=1) == len(other_dims))
]
impact_table = impact_table.reset_index(drop=True)
return impact_table
def _initialize_waterfall_table(self, single_dim=None):
if self._impact_table is None:
self._impact_table = self._initialize_impact_table()
# get impact values
if single_dim is not None:
impact_table = self._get_single_dim_impact_table(single_dim)
else:
impact_table = self._impact_table.copy()
# getting subgroups for waterfall
best_subgroups = get_best_subgroups_using_superset_algo(
impact_table,
self._max_waterfall_columns,
self._max_subgroups_considered,
)
best_subgroups = best_subgroups[
best_subgroups["ignored"] == False # noqa E712
]
best_subgroups = best_subgroups.merge(
impact_table[["string", "impact"]], how="inner", on="string"
)
best_subgroups["impact_non_overlap"] = best_subgroups["impact"]
best_subgroups.rename(
columns={"impact": "impact_full_group"}, inplace=True
)
best_subgroups[["indices_in_group", "non_overlap_indices"]] = 0
# calculate overlap values
best_subgroups = self._get_overlap_values_for_waterfall(best_subgroups)
return best_subgroups
def _preprocess_rca_dfs(self):
"""Preprocess dataframes for RCA Analysis."""
self._grp1_df = self._grp1_df.reset_index(drop=True)
self._grp2_df = self._grp2_df.reset_index(drop=True)
self._grp2_df.index = self._grp2_df.index + len(self._grp1_df)
def _check_columns(self, cols):
if isinstance(cols, str):
cols = [cols]
for col in cols:
if col not in self._full_df.columns:
raise ValueError(f"Column {col} not in data.")
def _create_binned_columns(self):
non_cat_cols = self._full_df.dtypes[self._dims][
self._full_df.dtypes[self._dims] != object
]
for col in non_cat_cols.index:
binned_values = pd.qcut(
self._full_df[col], 4, duplicates="drop"
).astype(str)
self._full_df[col] = binned_values
self._grp1_df = self._full_df.loc[self._grp1_df.index]
self._grp2_df = self._full_df.loc[self._grp2_df.index]
def _generate_all_dim_combinations(self) -> List[List[str]]:
"""Create a dictionary of all possible combinations of dims.
Returns:
List[List[str]]: Returns a list of all possible subgroups
"""
list_subgroups = []
for i in self._num_dim_combs_to_consider:
list_subgroups_of_level = list(
map(list, combinations(self._dims, i))
)
list_subgroups.extend(list_subgroups_of_level)
return list_subgroups
def _calculate_subgroup_values(self, data, suffix):
agg_name = self._agg + suffix
count_name = "count" + suffix
if self._agg == "mean":
value_numerator = data[agg_name] * data[count_name]
value_denominator = data[count_name].sum() + EPSILON
value = value_numerator / value_denominator
elif self._agg in ["sum", "count"]:
value = data[agg_name]
else:
raise ValueError(f"Aggregation {self._agg} is not defined.")
size = data[count_name] * 100 / (data[count_name].sum() + EPSILON)
return value, size
def _compare_subgroups(self, dim_comb: List[str]) -> pd.DataFrame:
if self._preaggregated:
if self._agg == "count":
# if agg is count, sum across the count column
# to get the correct count
grp1_df = self._grp1_df.groupby(dim_comb)[
self._preaggregated_count_col
].agg(["sum"]).reset_index().rename(columns={"sum": "count"})
grp2_df = self._grp2_df.groupby(dim_comb)[
self._preaggregated_count_col
].agg(["sum"]).reset_index().rename(columns={"sum": "count"})
elif self._agg == "sum":
# if agg is sum, sum across the sum and count column
# to get the correct values
grp1_df = self._grp1_df.groupby(dim_comb)[
[self._metric, self._preaggregated_count_col]
].sum().reset_index().rename(columns={
self._metric: "sum",
self._preaggregated_count_col: "count"
})
grp2_df = self._grp2_df.groupby(dim_comb)[
[self._metric, self._preaggregated_count_col]
].sum().reset_index().rename(columns={
self._metric: "sum",
self._preaggregated_count_col: "count"
})
else:
raise ValueError(
f"Unsupported aggregation: {self._agg} for preaggregated data."
)
else:
agg_list = [self._agg, "count"] if self._agg != "count" else ["count"]
grp1_df = (
self._grp1_df.groupby(dim_comb)[self._metric]
.agg(agg_list)
.reset_index()
)
grp2_df = (
self._grp2_df.groupby(dim_comb)[self._metric]
.agg(agg_list)
.reset_index()
)
combined_df = grp1_df.merge(
grp2_df, how="outer", on=dim_comb, suffixes=["_g1", "_g2"]
).fillna(0)
for i, suffix in enumerate(["_g1", "_g2"]):
agg_name = self._agg + suffix
count_name = "count" + suffix
if self._agg == "mean":
value_numerator = (
combined_df[agg_name] * combined_df[count_name]
)
value_denominator = combined_df[count_name].sum() + EPSILON
value = value_numerator / value_denominator
elif self._agg in ["sum", "count"]:
value = combined_df[agg_name]
else:
raise ValueError(f"Aggregation {self._agg} is not defined.")
combined_df["val" + suffix] = value
combined_df["size" + suffix] = combined_df[count_name] * 100
if i == 0:
combined_df["size" + suffix] /= len(self._grp1_df) + EPSILON
elif i == 1:
combined_df["size" + suffix] /= len(self._grp2_df) + EPSILON
(
combined_df["val_g1"],
combined_df["size_g1"],
) = self._calculate_subgroup_values(combined_df, "_g1")
(
combined_df["val_g2"],
combined_df["size_g2"],
) = self._calculate_subgroup_values(combined_df, "_g2")
combined_df["impact"] = combined_df["val_g2"] - combined_df["val_g1"]
return combined_df
def _get_overlap_values_for_waterfall(
self,
subgroups_df: pd.DataFrame,
):
subgroups_df_output = subgroups_df.copy()
len_d1 = self._grp1_df[self._metric].count()
len_d2 = self._grp2_df[self._metric].count()
for subgroup in subgroups_df_output["string"]:
all_indices = set()
# others are all subgroups minus the current subgroup
other_subgroups = subgroups_df_output["string"].values.tolist()
other_subgroups.remove(subgroup)
other_combinations = {
i: combinations(other_subgroups, i)
for i in range(1, len(subgroups_df_output))
}
d1_idxs = set(self._grp1_df.query(subgroup).index)
d2_idxs = set(self._grp2_df.query(subgroup).index)
overlap_indices_count = 0
curr_loc = 0
for i in range(1, len(subgroups_df_output)):
for combo in other_combinations[i]:
query = " and ".join(combo)
d1_combo = set(self._grp1_df.query(query).index)
d2_combo = set(self._grp2_df.query(query).index)
overlap_points_d1 = (
d1_idxs.intersection(d1_combo) - all_indices
)
overlap_points_d2 = (
d2_idxs.intersection(d2_combo) - all_indices
)
overlap_indices_count += len(overlap_points_d1) + len(
overlap_points_d2
)
t_d1 = self._grp1_df.loc[overlap_points_d1]
t_d2 = self._grp2_df.loc[overlap_points_d2]
if self._agg == "mean":
grp1_val = (
t_d1[self._metric].mean()
* t_d1[self._metric].count()
/ len_d1
)
grp2_val = (
t_d2[self._metric].mean()
* t_d2[self._metric].count()
/ len_d2
)
elif self._agg == "sum":
grp1_val = t_d1[self._metric].sum()
grp2_val = t_d2[self._metric].sum()
elif self._agg == "count":
grp1_val = t_d1[self._metric].count()
grp2_val = t_d2[self._metric].count()
overlap_impact = grp2_val - grp1_val
if np.isnan(overlap_impact):
overlap_impact = 0
curr_loc = subgroups_df_output[
subgroups_df_output["string"] == subgroup
].index[0]
subgroups_df_output.loc[
curr_loc, "impact_non_overlap"
] = subgroups_df_output.loc[
curr_loc, "impact_non_overlap"
] - (
overlap_impact * len(combo) / (len(combo) + 1)
)
all_indices = all_indices.union(overlap_points_d1).union(
overlap_points_d2
)
subgroups_df_output.loc[curr_loc, "indices_in_group"] = len(
d1_idxs
) + len(d2_idxs)
subgroups_df_output.loc[curr_loc, "non_overlap_indices"] = (
len(d1_idxs) + len(d2_idxs) - overlap_indices_count
)
return subgroups_df_output
def _get_waterfall_output_data(
self,
df_subgroups: pd.DataFrame,
word_wrap_num: int,
plot_in_mpl: bool,
) -> Tuple[Tuple[float, float], pd.DataFrame]:
if self._preaggregated:
if self._agg == "count":
d1_agg = self._grp1_df[self._preaggregated_count_col].sum()
d2_agg = self._grp2_df[self._preaggregated_count_col].sum()
elif self._agg == "sum":
d1_agg = self._grp1_df[self._metric].sum()
d2_agg = self._grp2_df[self._metric].sum()
else:
raise ValueError(
f"Unsupported aggregation {self._agg} for preaggregated data."
)
else:
d1_agg = self._grp1_df[self._metric].agg(self._agg)
d2_agg = self._grp2_df[self._metric].agg(self._agg)
d1_agg = 0 if pd.isna(d1_agg) else d1_agg
d2_agg = 0 if pd.isna(d2_agg) else d2_agg
impact = d2_agg - d1_agg
non_overlap_impact = df_subgroups["impact_non_overlap"].sum()
waterfall_df = df_subgroups[["string", "impact_non_overlap"]].copy()
others_impact = impact - non_overlap_impact
# only if impact of others is not close to 0, we add it
if not isclose(others_impact, 0, rel_tol=0.0001, abs_tol=EPSILON):
waterfall_df = waterfall_df.append(
{"string": "others", "impact_non_overlap": others_impact},
ignore_index=True,
)
col_names_for_mpl = [
"start",
*[
"\n".join(wrap(i, word_wrap_num))
for i in waterfall_df["string"].values.tolist()
],
]
col_values = [
d1_agg,
*waterfall_df["impact_non_overlap"].values.tolist(),
]
col_names_for_mpl.append("end")
col_values.append(d2_agg)
y_axis_lims = get_waterfall_ylims(
pd.DataFrame(
data={self._metric: col_values}, index=col_names_for_mpl
),
self._metric,
)
if plot_in_mpl:
print("plot")
waterfall_plot_mpl(
pd.DataFrame(
data={self._metric: col_values}, index=col_names_for_mpl
),
self._metric,
y_axis_lims,
)
plt.show()
# Calculate steps for each subgroup
col_values = (
col_values[:1]
+ [sum(col_values[: i + 1]) for i in range(1, len(col_values) - 1)]
+ col_values[-1:]
)
js_df = pd.DataFrame(
data={
"value": col_values,
"category": ["start"]
+ waterfall_df["string"].values.tolist()
+ ["end"],
"stepValue": col_values,
}
)
js_df["open"] = js_df["value"].shift(1, fill_value=0)
js_df["color"] = [
"#FA5252" if val <= 0 else "#05A677"
for val in [0]
+ waterfall_df["impact_non_overlap"].values.tolist()
+ [0]
]
js_df.loc[[0, len(js_df) - 1], ["open", "color"]] = [
[0, "#778CA3"],
[0, "#778CA3"],
]
js_df["displayValue"] = js_df["value"] - js_df["open"]
return y_axis_lims, js_df
def _get_best_subgroups_waterfall(
self,
single_dim,
max_waterfall_columns,
max_subgroups_considered,
):
recalc = False
if (
max_waterfall_columns is not None
and max_waterfall_columns != self._max_waterfall_columns
):
recalc = True
self._max_waterfall_columns = max_waterfall_columns
if (
max_subgroups_considered is not None
and max_subgroups_considered != self._max_subgroups_considered
):
recalc = True
self._max_subgroups_considered = max_subgroups_considered
if single_dim is None:
if self._waterfall_table is None or recalc:
self._waterfall_table = self._initialize_waterfall_table(
single_dim
)
best_subgroups = self._waterfall_table.copy()
else:
best_subgroups = self._initialize_waterfall_table(single_dim)
best_subgroups.drop("ignored", axis=1, inplace=True)
return best_subgroups
def get_panel_metrics(self) -> Dict[str, float]:
"""Return panel metrics for the KPI.
:return: Dictionary with metrics
:rtype: Dict[str, float]
"""
if self._preaggregated:
if self._agg == "count":
g1_agg = self._grp1_df[self._preaggregated_count_col].sum()
g2_agg = self._grp2_df[self._preaggregated_count_col].sum()
elif self._agg == "sum":
g1_agg = self._grp1_df[self._metric].sum()
g2_agg = self._grp2_df[self._metric].sum()
else:
raise ValueError(
f"Unsupported aggregation: {self._agg} for preaggregated data."
)
else:
g1 = self._grp1_df[self._metric]
g2 = self._grp2_df[self._metric]
# set aggregations to 0 if data is empty
g1_agg = g1.agg(self._agg) if len(g1) > 0 else 0
g2_agg = g2.agg(self._agg) if len(g2) > 0 else 0
impact = g2_agg - g1_agg
perc_diff = (impact / g1_agg) * 100 if g1_agg != 0 else np.inf
panel_metrics = {
"group1_value": round_number(g1_agg),
"group2_value": round_number(g2_agg),
"difference": round_number(impact),
"perc_change": round_number(perc_diff)
if not np.isinf(perc_diff)
else "inf",
}
# Check for None or NaN values in output
for k, v in panel_metrics.items():
if v is None or pd.isna(v):
raise ValueError(f"{k} with value: {v} is either None or NaN")
return panel_metrics
def get_impact_rows(
self, single_dim: str = None
) -> List[Dict[str, object]]:
"""Return impact dataframe as a list.
:param single_dim: dimension to use, defaults to None
:type single_dim: str, optional
:return: list with rows of impact table
:rtype: List[Dict[str, object]]
"""
if self._impact_table is None:
self._impact_table = self._initialize_impact_table()
impact_table = self._impact_table.copy()
if single_dim is not None:
impact_table = impact_table[~impact_table[single_dim].isna()]
impact_table = impact_table.reset_index(drop=True)
impact_table.drop(self._dims, axis=1, inplace=True)
impact_table["string"] = impact_table["string"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in impact values and raise ValueError if found
self._check_nan(
impact_table, f"Impact table for dimension {single_dim}"
)
return round_df(impact_table).to_dict("records")
def get_impact_column_map(
self, timeline: str = "last_30_days"
) -> List[Dict[str, str]]:
"""Return a mapping of column names to values for UI.
:param timeline: timeline to use, defaults to "last_30_days"
:type timeline: str, optional
:return: List of mappings
:rtype: List[Dict[str, str]]
"""
prev_timestr = TIME_RANGES_BY_KEY[timeline]["last_period_name"]
curr_timestr = TIME_RANGES_BY_KEY[timeline]["current_period_name"]
mapping = [
("subgroup", "Subgroup Name"),
("g1_agg", f"{prev_timestr} Value"),
("g1_count", f"{prev_timestr} Count (#)"),
("g1_size", f"{prev_timestr} Size (%)"),
("g2_agg", f"{curr_timestr} Value"),
("g2_count", f"{curr_timestr} Count (#)"),
("g2_size", f"{curr_timestr} Size (%)"),
("impact", "Impact"),
]
mapping = [{"title": v, "field": k} for k, v in mapping]
return mapping
def get_waterfall_table_rows(
self,
single_dim: str = None,
max_waterfall_columns: int = None, # defaults to 5 or last value
max_subgroups_considered: int = None, # defaults to 100 or last value
) -> List[Dict]:
"""Return rows for the waterfall table.
:param single_dim: dimension to use, defaults to None
:type single_dim: str, optional
:param max_waterfall_columns: max columns in waterfall, defaults to
None
:type max_waterfall_columns: int, optional
:return: list of all rows in table
:rtype: List[Dict]
"""
best_subgroups = self._get_best_subgroups_waterfall(
single_dim, max_waterfall_columns, max_subgroups_considered
)
best_subgroups["string"] = best_subgroups["string"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in best subgroups and raise ValueError if found
self._check_nan(
best_subgroups, f"Waterfall table for dimension {single_dim}"
)
return round_df(best_subgroups).to_dict("records")
def get_waterfall_plot_data(
self,
single_dim: str = None,
plot_in_mpl: bool = False,
word_wrap_num: int = 15,
max_waterfall_columns: int = None, # defaults to 5 or last value
max_subgroups_considered: int = None, # defaults to 100 or last value
) -> Tuple[List[Dict], List[float]]:
"""Return plot data for waterfall chart.
:param single_dim: dimension to use, defaults to None
:type single_dim: str, optional
:param plot_in_mpl: flag to plot in matplotlib, defaults to False
:type plot_in_mpl: bool, optional
:param word_wrap_num: wordwrapping for columns, defaults to 15
:type word_wrap_num: int, optional
:param max_waterfall_columns: max columns in waterfall, defaults to
None
:type max_waterfall_columns: int, optional
:return: plot data for waterfall chart
:rtype: Tuple[List[Dict], List[float, float]]
"""
best_subgroups = self._get_best_subgroups_waterfall(
single_dim, max_waterfall_columns, max_subgroups_considered
)
# get waterfall chart data
y_axis_lims, waterfall_df = self._get_waterfall_output_data(
best_subgroups, word_wrap_num, plot_in_mpl
)
# convert query strings to user strings
waterfall_df["category"] = waterfall_df["category"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in waterfall df and raise ValueError if found
self._check_nan(
waterfall_df, f"Waterfall chart for dimension {single_dim}"
)
return (
round_df(waterfall_df).to_dict("records"),
[round_number(i) for i in y_axis_lims],
)
def get_hierarchical_table(
self,
single_dim: str,
max_depth: int = 3,
max_children: int = 5,
max_parents: int = 5,
) -> List[Dict]:
"""Return rows for hierarchical table.
:param single_dim: dimension to use
:type single_dim: str
:param max_depth: maximum depth for the hierarchy, defaults to 3
:type max_depth: int, optional
:param max_children: max children per row, defaults to 5
:type max_children: int, optional
:param max_parents: max first level rows, defaults to 5
:type max_parents: int, optional
:return: list of rows for the table
:rtype: List[Dict]
"""
other_dims = self._dims[:]
other_dims.remove(single_dim)
impact_table = self._initialize_impact_table()
impact_table["parentId"] = None
# impact_table["id"] = impact_table.index
impact_table["depth"] = None
output_table = self._get_single_dim_impact_table(single_dim)
output_table = output_table.iloc[:max_parents]
output_table["depth"] = 1
for depth in range(1, max_depth):
parents = output_table[output_table["depth"] == depth]
for index, row in parents.iterrows():
string = row["string"]
filters = string.split(" and ")
children = impact_table
for filter_string in filters:
children = children[
children["string"].str.contains(
filter_string, regex=False
)
]
children = children[
children[other_dims].isna().sum(axis=1)
== len(other_dims) - depth
]
children = children.iloc[:max_children]
children["depth"] = depth + 1
children["parentId"] = index
output_table = output_table.append(children, ignore_index=True)
output_table.drop(self._dims, axis=1, inplace=True)
output_table = output_table.reset_index().rename(
columns={"index": "id"}
)
output_table["string"] = output_table["string"].apply(
convert_query_string_to_user_string
)
# Check for any nan values in output table and raise ValueError if found
self._check_nan(
output_table.drop("parentId", axis=1),
f"Hierarchical table for dimension {single_dim}",
)
return round_df(output_table).to_dict("records")
def _check_nan(self, df: pd.DataFrame, message: str) -> None:
"""Check if NaN values in dataframe."""
nan_df = df.isna().sum()
nan_dict: dict = nan_df[nan_df > 0].to_dict()
if nan_dict:
raise ValueError(f"{message} contains NaN values. {nan_dict}")
|
the-stack_0_5040 | import torch.nn as nn
import torch.nn.functional as F
import torch
from einops.layers.torch import Rearrange
from einops import rearrange
import numpy as np
from typing import Any, List
import math
import warnings
from collections import OrderedDict
__all__ = ['ConTBlock', 'ConTNet']
r""" The following trunc_normal method is pasted from timm https://github.com/rwightman/pytorch-image-models/tree/master/timm
"""
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def fixed_padding(inputs, kernel_size, dilation):
kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padded_inputs = F.pad(inputs, (pad_beg, pad_end, pad_beg, pad_end))
return padded_inputs
class ConvBN(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, groups=1, bn=True):
padding = (kernel_size - 1) // 2
if bn:
super(ConvBN, self).__init__(OrderedDict([
('conv', nn.Conv2d(in_planes, out_planes, kernel_size, stride,
padding=padding, groups=groups, bias=False)),
('bn', nn.BatchNorm2d(out_planes))
]))
else:
super(ConvBN, self).__init__(OrderedDict([
('conv', nn.Conv2d(in_planes, out_planes, kernel_size, stride,
padding=padding, groups=groups, bias=False)),
]))
class MHSA(nn.Module):
r"""
Build a Multi-Head Self-Attention:
- https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
def __init__(self,
planes,
head_num,
dropout,
patch_size,
qkv_bias,
relative):
super(MHSA, self).__init__()
self.head_num = head_num
head_dim = planes // head_num
self.qkv = nn.Linear(planes, 3*planes, bias=qkv_bias)
self.relative = relative
self.patch_size = patch_size
self.scale = head_dim ** -0.5
if self.relative:
# print('### relative position embedding ###')
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * patch_size - 1) * (2 * patch_size - 1), head_num))
coords_w = coords_h = torch.arange(patch_size)
coords = torch.stack(torch.meshgrid([coords_h, coords_w]))
coords_flatten = torch.flatten(coords, 1)
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
relative_coords = relative_coords.permute(1, 2, 0).contiguous()
relative_coords[:, :, 0] += patch_size - 1
relative_coords[:, :, 1] += patch_size - 1
relative_coords[:, :, 0] *= 2 * patch_size - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.attn_drop = nn.Dropout(p=dropout)
self.proj = nn.Linear(planes, planes)
self.proj_drop = nn.Dropout(p=dropout)
def forward(self, x):
B, N, C, H = *x.shape, self.head_num
# print(x.shape)
qkv = self.qkv(x).reshape(B, N, 3, H, C // H).permute(2, 0, 3, 1, 4) # x: (3, B, H, N, C//H)
q, k, v = qkv[0], qkv[1], qkv[2] # x: (B, H, N, C//N)
q = q * self.scale
attn = (q @ k.transpose(-2, -1)) # attn: (B, H, N, N)
if self.relative:
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.patch_size ** 2, self.patch_size ** 2, -1)
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
attn = attn + relative_position_bias.unsqueeze(0)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MLP(nn.Module):
r"""
Build a Multi-Layer Perceptron
- https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
def __init__(self,
planes,
mlp_dim,
dropout):
super(MLP, self).__init__()
self.fc1 = nn.Linear(planes, mlp_dim)
self.act = nn.GELU()
self.fc2 = nn.Linear(mlp_dim, planes)
self.drop = nn.Dropout(dropout)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class STE(nn.Module):
r"""
Build a Standard Transformer Encoder(STE)
input: Tensor (b, c, h, w)
output: Tensor (b, c, h, w)
"""
def __init__(self,
planes: int,
mlp_dim: int,
head_num: int,
dropout: float,
patch_size: int,
relative: bool,
qkv_bias: bool,
pre_norm: bool,
**kwargs):
super(STE, self).__init__()
self.patch_size = patch_size
self.pre_norm = pre_norm
self.relative = relative
self.flatten = nn.Sequential(
Rearrange('b c pnh pnw psh psw -> (b pnh pnw) psh psw c'),
)
if not relative:
self.pe = nn.ParameterList(
[nn.Parameter(torch.zeros(1, patch_size, 1, planes//2)), nn.Parameter(torch.zeros(1, 1, patch_size, planes//2))]
)
self.attn = MHSA(planes, head_num, dropout, patch_size, qkv_bias=qkv_bias, relative=relative)
self.mlp = MLP(planes, mlp_dim, dropout=dropout)
self.norm1 = nn.LayerNorm(planes)
self.norm2 = nn.LayerNorm(planes)
def forward(self, x):
bs, c, h, w = x.shape
patch_size = self.patch_size
patch_num_h, patch_num_w = h // patch_size, w // patch_size
x = (
x.unfold(2, self.patch_size, self.patch_size)
.unfold(3, self.patch_size, self.patch_size)
) # x: (b, c, patch_num, patch_num, patch_size, patch_size)
x = self.flatten(x) # x: (b, patch_size, patch_size, c)
### add 2d position embedding ###
if not self.relative:
x_h, x_w = x.split(c // 2, dim=3)
x = torch.cat((x_h + self.pe[0], x_w + self.pe[1]), dim=3) # x: (b, patch_size, patch_size, c)
x = rearrange(x, 'b psh psw c -> b (psh psw) c')
if self.pre_norm:
x = x + self.attn(self.norm1(x))
x = x + self.mlp(self.norm2(x))
else:
x = self.norm1(x + self.attn(x))
x = self.norm2(x + self.mlp(x))
x = rearrange(x, '(b pnh pnw) (psh psw) c -> b c (pnh psh) (pnw psw)', pnh=patch_num_h, pnw=patch_num_w, psh=patch_size, psw=patch_size)
return x
class ConTBlock(nn.Module):
r"""
Build a ConTBlock
"""
def __init__(self,
planes: int,
out_planes: int,
mlp_dim: int,
head_num: int,
dropout: float,
patch_size: List[int],
downsample: nn.Module = None,
stride: int=1,
last_dropout: float=0.3,
**kwargs):
super(ConTBlock, self).__init__()
self.downsample = downsample
self.identity = nn.Identity()
self.dropout = nn.Identity()
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.ste1 = STE(planes=planes, mlp_dim=mlp_dim, head_num=head_num, dropout=dropout, patch_size=patch_size[0], **kwargs)
self.ste2 = STE(planes=planes, mlp_dim=mlp_dim, head_num=head_num, dropout=dropout, patch_size=patch_size[1], **kwargs)
if stride == 1 and downsample is not None:
self.dropout = nn.Dropout(p=last_dropout)
kernel_size = 1
else:
kernel_size = 3
self.out_conv = ConvBN(planes, out_planes, kernel_size, stride, bn=False)
def forward(self, x):
x_preact = self.relu(self.bn(x))
identity = self.identity(x)
if self.downsample is not None:
identity = self.downsample(x_preact)
residual = self.ste1(x_preact)
residual = self.ste2(residual)
out = self.out_conv(residual)
#out = self.dropout(residual+identity)
return out
class ConTNet(nn.Module):
r"""
Build a ConTNet backbone
"""
def __init__(self,
block,
layers: List[int],
mlp_dim: List[int],
head_num: List[int],
dropout: List[float],
in_channels: int=3,
inplanes: int=64,
num_classes: int=1000,
init_weights: bool=True,
first_embedding: bool=False,
tweak_C: bool=False,
**kwargs):
r"""
Args:
block: ConT Block
layers: number of blocks at each layer
mlp_dim: dimension of mlp in each stage
head_num: number of head in each stage
dropout: dropout in the last two stage
relative: if True, relative Position Embedding is used
groups: nunmber of group at each conv layer in the Network
depthwise: if True, depthwise convolution is adopted
in_channels: number of channels of input image
inplanes: channel of the first convolution layer
num_classes: number of classes for classification task
only useful when `with_classifier` is True
with_avgpool: if True, an average pooling is added at the end of resnet stage5
with_classifier: if True, FC layer is registered for classification task
first_embedding: if True, a conv layer with both stride and kernel of 7 is placed at the top
tweakC: if true, the first layer of ResNet-C replace the ori layer
"""
super(ConTNet, self).__init__()
self.inplanes = inplanes
self.block = block
# build the top layer
if tweak_C:
self.layer0 = nn.Sequential(OrderedDict([
('conv_bn1', ConvBN(in_channels, inplanes//2, kernel_size=3, stride=2)),
('relu1', nn.ReLU(inplace=True)),
('conv_bn2', ConvBN(inplanes//2, inplanes//2, kernel_size=3, stride=1)),
('relu2', nn.ReLU(inplace=True)),
('conv_bn3', ConvBN(inplanes//2, inplanes, kernel_size=3, stride=1)),
('relu3', nn.ReLU(inplace=True)),
('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
]))
elif first_embedding:
self.layer0 = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(in_channels, inplanes, kernel_size=4, stride=4)),
('norm', nn.LayerNorm(inplanes))
]))
else:
self.layer0 = nn.Sequential(OrderedDict([
('conv', ConvBN(in_channels, inplanes, kernel_size=7, stride=2, bn=False)),
# ('relu', nn.ReLU(inplace=True)),
('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))
]))
# build cont layers
self.cont_layers = []
self.out_channels = OrderedDict()
for i in range(len(layers)):
stride = 2,
patch_size = [7,14]
if i == len(layers)-1:
stride, patch_size[1] = 1, 7 # the last stage does not conduct downsampling
cont_layer = self._make_layer(inplanes * 2**i, layers[i], stride=stride, mlp_dim=mlp_dim[i], head_num=head_num[i], dropout=dropout[i], patch_size=patch_size, **kwargs)
layer_name = 'layer{}'.format(i + 1)
self.add_module(layer_name, cont_layer)
self.cont_layers.append(layer_name)
self.out_channels[layer_name] = 2 * inplanes * 2**i
self.last_out_channels = next(reversed(self.out_channels.values()))
self.fc = nn.Linear(self.last_out_channels, num_classes)
if init_weights:
self._initialize_weights()
def _make_layer(self,
planes: int,
blocks: int,
stride: int,
mlp_dim: int,
head_num: int,
dropout: float,
patch_size: List[int],
use_avgdown: bool=False,
**kwargs):
layers = OrderedDict()
for i in range(0, blocks-1):
layers[f'{self.block.__name__}{i}'] = self.block(
planes, planes, mlp_dim, head_num, dropout, patch_size, **kwargs)
downsample = None
if stride != 1:
if use_avgdown:
downsample = nn.Sequential(OrderedDict([
('avgpool', nn.AvgPool2d(kernel_size=2, stride=2)),
('conv', ConvBN(planes, planes * 2, kernel_size=1, stride=1, bn=False))]))
else:
downsample = ConvBN(planes, planes * 2, kernel_size=1,
stride=2, bn=False)
else:
downsample = ConvBN(planes, planes * 2, kernel_size=1, stride=1, bn=False)
layers[f'{self.block.__name__}{blocks-1}'] = self.block(
planes, planes*2, mlp_dim, head_num, dropout, patch_size, downsample, stride, **kwargs)
return nn.Sequential(layers)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.LayerNorm):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.layer0(x)
for _, layer_name in enumerate(self.cont_layers):
cont_layer = getattr(self, layer_name)
x = cont_layer(x)
x = x.mean([2, 3])
x = self.fc(x)
return x
def create_ConTNet_Ti(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[196, 392, 768, 768],
head_num=[1, 2, 4, 8],
dropout=[0,0,0,0],
inplanes=48,
layers=[1,1,1,1],
last_dropout=0,
**kwargs)
def create_ConTNet_S(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[256, 512, 1024, 1024],
head_num=[1, 2, 4, 8],
dropout=[0,0,0,0],
inplanes=64,
layers=[1,1,1,1],
last_dropout=0,
**kwargs)
def create_ConTNet_M(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[256, 512, 1024, 1024],
head_num=[1, 2, 4, 8],
dropout=[0,0,0,0],
inplanes=64,
layers=[2,2,2,2],
last_dropout=0,
**kwargs)
def create_ConTNet_B(kwargs):
return ConTNet(block=ConTBlock,
mlp_dim=[256, 512, 1024, 1024],
head_num=[1, 2, 4, 8],
dropout=[0,0,0.1,0.1],
inplanes=64,
layers=[3,4,6,3],
last_dropout=0.2,
**kwargs)
def build_model(arch, use_avgdown, relative, qkv_bias, pre_norm):
type = arch.split('-')[-1]
func = eval(f'create_ConTNet_{type}')
kwargs = dict(use_avgdown=use_avgdown, relative=relative, qkv_bias=qkv_bias, pre_norm=pre_norm)
return func(kwargs)
|
the-stack_0_5043 | # -*- coding: utf-8 -*-
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Turbinia task."""
from __future__ import unicode_literals
from copy import deepcopy
from datetime import datetime, timedelta
from enum import IntEnum
import getpass
import json
import logging
import os
import pickle
import platform
import pprint
import subprocess
import sys
import tempfile
import traceback
import uuid
import turbinia
import filelock
from turbinia import config
from turbinia.config import DATETIME_FORMAT
from turbinia.evidence import evidence_decode
from turbinia.processors import resource_manager
from turbinia import output_manager
from turbinia import state_manager
from turbinia import task_utils
from turbinia import TurbiniaException
from turbinia import log_and_report
from turbinia.lib import docker_manager
from prometheus_client import Gauge
from prometheus_client import Histogram
METRICS = {}
log = logging.getLogger('turbinia')
turbinia_worker_tasks_started_total = Gauge(
'turbinia_worker_tasks_started_total',
'Total number of started worker tasks')
turbinia_worker_tasks_completed_total = Gauge(
'turbinia_worker_tasks_completed_total',
'Total number of completed worker tasks')
turbinia_worker_tasks_queued_total = Gauge(
'turbinia_worker_tasks_queued_total', 'Total number of queued worker tasks')
turbinia_worker_tasks_failed_total = Gauge(
'turbinia_worker_tasks_failed_total', 'Total number of failed worker tasks')
turbinia_worker_tasks_timeout_total = Gauge(
'turbinia_worker_tasks_timeout_total',
'Total number of worker tasks timed out.')
class Priority(IntEnum):
"""Reporting priority enum to store common values.
Priorities can be anything in the range of 0-100, where 0 is the highest
priority.
"""
LOW = 80
MEDIUM = 50
HIGH = 20
CRITICAL = 10
class TurbiniaTaskResult:
"""Object to store task results to be returned by a TurbiniaTask.
Attributes:
base_output_dir: Base path for local output
closed: Boolean indicating whether this result is closed
output_dir: Full path for local output
error: Dict of error data ('error' and 'traceback' are some valid keys)
evidence: List of newly created Evidence objects.
id: Unique Id of result (string of hex)
input_evidence: The evidence this task processed.
job_id (str): The ID of the Job that generated this Task/TaskResult
report_data (string): Markdown data that can be used in a Turbinia report.
report_priority (int): Value between 0-100 (0 is the highest priority) to
be used to order report sections.
request_id: The id of the initial request to process this evidence.
run_time: Length of time the task ran for.
saved_paths: Paths where output has been saved.
start_time: Datetime object of when the task was started
status: A one line descriptive task status.
successful: Bool indicating success status.
task_id: Task ID of the parent task.
task_name: Name of parent task.
requester: The user who requested the task.
state_manager: (DatastoreStateManager|RedisStateManager): State manager
object to handle syncing with storage.
worker_name: Name of worker task executed on.
_log: A list of log messages
"""
# The list of attributes that we will persist into storage
STORED_ATTRIBUTES = [
'worker_name', 'report_data', 'report_priority', 'run_time', 'status',
'saved_paths', 'successful'
]
def __init__(
self, evidence=None, input_evidence=None, base_output_dir=None,
request_id=None, job_id=None, no_output_manager=False):
"""Initialize the TurbiniaTaskResult object."""
self.closed = False
self.evidence = evidence if evidence else []
self.input_evidence = input_evidence
self.id = uuid.uuid4().hex
self.job_id = job_id
self.base_output_dir = base_output_dir
self.request_id = request_id
self.task_id = None
self.task_name = None
self.requester = None
self.output_dir = None
self.report_data = None
self.report_priority = Priority.MEDIUM
self.start_time = datetime.now()
self.run_time = None
self.saved_paths = []
self.successful = None
self.status = None
self.error = {}
self.worker_name = platform.node()
self.state_manager = None
# TODO(aarontp): Create mechanism to grab actual python logging data.
self._log = []
self.no_output_manager = no_output_manager
def __str__(self):
return pprint.pformat(vars(self), depth=3)
def setup(self, task):
"""Handles initializing task based attributes, after object creation.
Args:
task (TurbiniaTask): The calling Task object
Raises:
TurbiniaException: If the Output Manager is not setup.
"""
self.task_id = task.id
self.task_name = task.name
self.requester = task.requester
self.state_manager = state_manager.get_state_manager()
if not self.no_output_manager:
if task.output_manager.is_setup:
ldirs = task.output_manager.get_local_output_dirs()
_, self.output_dir = ldirs
else:
raise TurbiniaException('Output Manager is not setup yet.')
def close(self, task, success, status=None):
"""Handles closing of this result and writing logs.
Normally this should be called by the Run method to make sure that the
status, etc are set correctly, but if there is an exception thrown when the
task executes, then run_wrapper will call this with default arguments
indicating a failure.
Args:
task (TurbiniaTask): The calling Task object
success: Bool indicating task success
status: One line descriptive task status.
"""
if self.closed:
# Don't try to close twice.
return
self.successful = success
self.run_time = datetime.now() - self.start_time
if success:
turbinia_worker_tasks_completed_total.inc()
else:
turbinia_worker_tasks_failed_total.inc()
if not status and self.successful:
status = 'Completed successfully in {0:s} on {1:s}'.format(
str(self.run_time), self.worker_name)
elif not status and not self.successful:
status = 'Run failed in {0:s} on {1:s}'.format(
str(self.run_time), self.worker_name)
self.log(status)
self.status = status
for evidence in self.evidence:
if evidence.source_path:
if os.path.exists(evidence.source_path):
self.saved_paths.append(evidence.source_path)
if evidence.copyable:
task.output_manager.save_evidence(evidence, self)
else:
self.log(
'Evidence {0:s} has missing file at source_path {1!s} so '
'not saving.'.format(evidence.name, evidence.source_path))
else:
self.log(
'Evidence {0:s} has empty source_path so '
'not saving.'.format(evidence.name))
if not evidence.request_id:
evidence.request_id = self.request_id
if self.input_evidence:
try:
self.input_evidence.postprocess(task_id=self.task_id)
# Adding a broad exception here because we want to try post-processing
# to clean things up even after other failures in the task, so this could
# also fail.
# pylint: disable=broad-except
except Exception as exception:
message = 'Evidence post-processing for {0!s} failed: {1!s}'.format(
self.input_evidence.name, exception)
self.log(message, level=logging.ERROR)
with filelock.FileLock(config.RESOURCE_FILE_LOCK):
resource_manager.PostProcessResourceState(
self.input_evidence.resource_id, self.task_id)
else:
self.log(
'No input evidence attached to the result object so post-processing '
'cannot be run. This usually means there were previous failures '
'during Task execution and this may result in resources (e.g. '
'mounted disks) accumulating on the Worker.', level=logging.WARNING)
# Now that we've post-processed the input_evidence, we can unset it
# because we don't need to return it.
self.input_evidence = None
if not self.no_output_manager:
# Write result log info to file
logfile = os.path.join(self.output_dir, 'worker-log.txt')
# Create default log text just so that the worker log is created to
# avoid confusion if it doesn't exist.
if not self._log:
self._log.append('No worker messages were logged.')
if self.output_dir and os.path.exists(self.output_dir):
with open(logfile, 'w') as f:
f.write('\n'.join(self._log))
f.write('\n')
task.output_manager.save_local_file(logfile, self)
self.closed = True
log.debug('Result close successful. Status is [{0:s}]'.format(self.status))
def log(self, message, level=logging.INFO, traceback_=None):
"""Log Task messages.
Logs to both the result and the normal logging mechanism.
Args:
message (string): Message to log.
level (int): Log level as defined by logging enums (e.g. logging.INFO)
traceback (string): Trace message to log
"""
self._log.append(message)
if level == logging.DEBUG:
log.debug(message)
elif level == logging.INFO:
log.info(message)
elif level == logging.WARN:
log.warn(message)
elif level == logging.ERROR:
log.error(message)
elif level == logging.CRITICAL:
log.critical(message)
if traceback_:
self.result.set_error(message, traceback_)
def update_task_status(self, task, status=None):
"""Updates the task status and pushes it directly to datastore.
Args:
task (TurbiniaTask): The calling Task object
status (str): Brief word or phrase for Task state. If not supplied, the
existing Task status will be used.
"""
if status:
task.result.status = 'Task {0!s} is {1!s} on {2!s}'.format(
self.task_name, status, self.worker_name)
if self.state_manager:
self.state_manager.update_task(task)
else:
self.log(
'No state_manager initialized, not updating Task info', logging.DEBUG)
def add_evidence(self, evidence, evidence_config):
"""Populate the results list.
Args:
evidence: Evidence object
evidence_config (dict): The evidence config we want to associate with
this object. This will be passed in with the original evidence that
was supplied to the task, so likely the caller will always want to
use evidence_.config for this parameter.
"""
if (evidence.source_path and os.path.exists(evidence.source_path) and
os.path.getsize(evidence.source_path) == 0):
self.log(
'Evidence source path [{0:s}] for [{1:s}] exists but is empty. Not '
'adding empty Evidence.'.format(evidence.source_path, evidence.name),
logging.WARNING)
return
# We want to enforce this here to make sure that any new Evidence objects
# created also contain the config. We could create a closure to do this
# automatically, but the real fix is to attach this to a separate object.
# See https://github.com/google/turbinia/issues/211 for more details.
evidence.config = evidence_config
if evidence.context_dependent:
evidence.set_parent(self.input_evidence)
self.evidence.append(evidence)
def set_error(self, error, traceback_):
"""Add error and traceback.
Args:
error: Short string describing the error.
traceback_: Traceback of the error.
"""
self.error['error'] = str(error)
self.error['traceback'] = str(traceback_)
def serialize(self):
"""Creates serialized result object.
Returns:
dict: Object dictionary that is JSON serializable.
"""
self.state_manager = None
result_copy = deepcopy(self.__dict__)
if self.run_time:
result_copy['run_time'] = self.run_time.total_seconds()
else:
result_copy['run_time'] = None
result_copy['start_time'] = self.start_time.strftime(DATETIME_FORMAT)
if self.input_evidence:
result_copy['input_evidence'] = None
result_copy['evidence'] = [x.serialize() for x in self.evidence]
return result_copy
@classmethod
def deserialize(cls, input_dict):
"""Converts an input dictionary back into a TurbiniaTaskResult object.
Args:
input_dict (dict): TurbiniaTaskResult object dictionary.
Returns:
TurbiniaTaskResult: Deserialized object.
"""
result = TurbiniaTaskResult()
result.__dict__.update(input_dict)
if result.state_manager:
result.state_manager = None
if result.run_time:
result.run_time = timedelta(seconds=result.run_time)
result.start_time = datetime.strptime(result.start_time, DATETIME_FORMAT)
if result.input_evidence:
result.input_evidence = evidence_decode(result.input_evidence)
result.evidence = [evidence_decode(x) for x in result.evidence]
return result
class TurbiniaTask:
"""Base class for Turbinia tasks.
Attributes:
base_output_dir (str): The base directory that output will go into.
Per-task directories will be created under this.
id (str): Unique Id of task (string of hex)
is_finalize_task (bool): Whether this is a finalize Task or not.
job_id (str): Job ID the Task was created by.
job_name (str): The name of the Job.
last_update (datetime): A datetime object with the last time the task was
updated.
name (str): Name of task
output_dir (str): The directory output will go into (including per-task
folder).
output_manager (OutputManager): The object that manages saving output.
result (TurbiniaTaskResult): A TurbiniaTaskResult object.
request_id (str): The id of the initial request to process this evidence.
state_key (str): A key used to manage task state
stub (psq.task.TaskResult|celery.app.Task): The task manager
implementation specific task stub that exists server side to keep a
reference to the remote task objects. For PSQ this is a task result
object, but other implementations have their own stub objects.
tmp_dir (str): Temporary directory for Task to write to.
requester (str): The user who requested the task.
_evidence_config (dict): The config that we want to pass to all new
evidence created from this task.
recipe (dict): Validated recipe to be used as the task configuration.
task_config (dict): Default task configuration, in effect if
no recipe is explicitly provided for the task.
"""
# The list of attributes that we will persist into storage
STORED_ATTRIBUTES = [
'id', 'job_id', 'last_update', 'name', 'request_id', 'requester'
]
# The list of evidence states that are required by a Task in order to run.
# See `evidence.Evidence.preprocess()` docstrings for more details.
REQUIRED_STATES = []
# The default configuration variables used by Tasks. Recipe data will
# override these parameters at run time.
TASK_CONFIG = {}
def __init__(
self, name=None, base_output_dir=None, request_id=None, requester=None):
"""Initialization for TurbiniaTask."""
if base_output_dir:
self.base_output_dir = base_output_dir
else:
self.base_output_dir = config.OUTPUT_DIR
self.id = uuid.uuid4().hex
self.is_finalize_task = False
self.job_id = None
self.job_name = None
self.last_update = datetime.now()
self.name = name if name else self.__class__.__name__
self.output_dir = None
self.output_manager = output_manager.OutputManager()
self.result = None
self.request_id = request_id
self.state_key = None
self.stub = None
self.tmp_dir = None
self.turbinia_version = turbinia.__version__
self.requester = requester if requester else 'user_unspecified'
self._evidence_config = {}
self.recipe = {}
self.task_config = {}
def serialize(self):
"""Converts the TurbiniaTask object into a serializable dict.
Returns:
Dict: Dictionary representing this object, ready to be serialized.
"""
task_copy = deepcopy(self.__dict__)
task_copy['output_manager'] = self.output_manager.__dict__
task_copy['last_update'] = self.last_update.strftime(DATETIME_FORMAT)
return task_copy
@classmethod
def deserialize(cls, input_dict):
"""Converts an input dictionary back into a TurbiniaTask object.
Args:
input_dict (dict): TurbiniaTask object dictionary.
Returns:
TurbiniaTask: Deserialized object.
"""
return task_utils.task_deserialize(input_dict)
@classmethod
def check_worker_role(cls):
"""Checks whether the execution context is within a worker or nosetests.
Returns:
bool: If the current execution is in a worker or nosetests.
"""
if config.TURBINIA_COMMAND in ('celeryworker', 'psqworker'):
return True
for arg in sys.argv:
if 'nosetests' in arg:
return True
return False
def evidence_setup(self, evidence):
"""Validates and processes the evidence.
Args:
evidence(Evidence): The Evidence to setup.
Raises:
TurbiniaException: If the Evidence can't be validated or the current
state does not meet the required state.
"""
evidence.validate()
evidence.preprocess(
self.id, tmp_dir=self.tmp_dir, required_states=self.REQUIRED_STATES)
# Final check to make sure that the required evidence state has been met
# for Evidence types that have those capabilities.
for state in self.REQUIRED_STATES:
if state in evidence.POSSIBLE_STATES and not evidence.state.get(state):
raise TurbiniaException(
'Evidence {0!s} being processed by Task {1:s} requires Evidence '
'to be in state {2:s}, but earlier pre-processors may have '
'failed. Current state is {3:s}. See previous logs for more '
'information.'.format(
evidence, self.name, state.name, evidence.format_state()))
def validate_task_conf(self, proposed_conf):
"""Checks if the provided recipe contains exclusively allowed fields.
Args:
proposed_conf (dict): Dict to override the default dynamic task conf.
Returns:
bool: False if a field not present in the default dynamic task config
is found.
"""
if not proposed_conf:
return False
for k in proposed_conf.keys():
if k == 'task':
continue
if k not in self.TASK_CONFIG:
self.result.log(
'Recipe key "{0:s}" is not found in task {1:s} default config: {2!s}'
.format(k, self.name, self.TASK_CONFIG))
return False
return True
def get_metrics(self):
"""Gets histogram metric for current Task.
Returns:
prometheus_client.Historgram: For the current task,
or None if they are not initialized.
Raises:
TurbiniaException: If no metric is found for the given Task.
"""
global METRICS
metric = METRICS.get(self.name.lower())
if not metric:
message = (
'No metric found for Task {0:s}. client.TASK_MAP may be out of '
'date.'.format(self.name.lower))
raise TurbiniaException(message)
return metric
def execute(
self, cmd, result, save_files=None, log_files=None, new_evidence=None,
close=False, shell=False, stderr_file=None, stdout_file=None,
success_codes=None, cwd=None):
"""Executes a given binary and saves output.
Args:
cmd (list|string): Command arguments to run
result (TurbiniaTaskResult): The result object to put data into.
save_files (list): A list of files to save (files referenced by Evidence
objects are automatically saved, so no need to include them).
log_files (list): A list of files to save even if execution fails.
new_evidence (list): These are new evidence objects created by the task.
If the task is successful, they will be added to the result.
close (bool): Whether to close out the result.
shell (bool): Whether the cmd is in the form of a string or a list.
success_codes (list(int)): Which return codes are considered successful.
stderr_file (str): Path to location to save stderr.
stdout_file (str): Path to location to save stdout.
cwd (str): Sets the current directory before the process is executed.
Returns:
Tuple of the return code, and the TurbiniaTaskResult object
"""
# Avoid circular dependency.
from turbinia.jobs import manager as job_manager
save_files = save_files if save_files else []
log_files = log_files if log_files else []
new_evidence = new_evidence if new_evidence else []
success_codes = success_codes if success_codes else [0]
stdout = None
stderr = None
# Get timeout value.
timeout_limit = job_manager.JobsManager.GetTimeoutValue(self.job_name)
# Execute the job via docker.
docker_image = job_manager.JobsManager.GetDockerImage(self.job_name)
if docker_image:
ro_paths = []
for path in ['local_path', 'source_path', 'device_path', 'mount_path']:
if hasattr(result.input_evidence, path):
path_string = getattr(result.input_evidence, path)
if path_string:
ro_paths.append(path_string)
rw_paths = [self.output_dir, self.tmp_dir]
container_manager = docker_manager.ContainerManager(docker_image)
stdout, stderr, ret = container_manager.execute_container(
cmd, shell, ro_paths=ro_paths, rw_paths=rw_paths,
timeout_limit=timeout_limit)
# Execute the job on the host system.
else:
try:
if shell:
proc = subprocess.Popen(
cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=cwd)
proc.wait(timeout_limit)
else:
proc = subprocess.Popen(
cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=cwd)
proc.wait(timeout_limit)
except subprocess.TimeoutExpired as exception:
# Log error and close result.
message = (
'Execution of [{0!s}] failed due to job timeout of '
'{1:d} seconds has been reached.'.format(cmd, timeout_limit))
result.log(message)
result.close(self, success=False, status=message)
# Increase timeout metric and raise exception
turbinia_worker_tasks_timeout_total.inc()
raise TurbiniaException(message)
stdout, stderr = proc.communicate()
ret = proc.returncode
result.error['stdout'] = str(stdout)
result.error['stderr'] = str(stderr)
if stderr_file and not stderr:
result.log(
'Attempting to save stderr to {0:s}, but no stderr found during '
'execution'.format(stderr_file))
elif stderr:
if not stderr_file:
_, stderr_file = tempfile.mkstemp(
suffix='.txt', prefix='stderr-', dir=self.output_dir)
result.log(
'Writing stderr to {0:s}'.format(stderr_file), level=logging.DEBUG)
with open(stderr_file, 'wb') as fh:
fh.write(stderr)
log_files.append(stderr_file)
if stdout_file and not stdout:
result.log(
'Attempting to save stdout to {0:s}, but no stdout found during '
'execution'.format(stdout_file))
elif stdout:
if not stdout_file:
_, stdout_file = tempfile.mkstemp(
suffix='.txt', prefix='stdout-', dir=self.output_dir)
result.log(
'Writing stdout to {0:s}'.format(stdout_file), level=logging.DEBUG)
with open(stdout_file, 'wb') as fh:
fh.write(stdout)
log_files.append(stdout_file)
log_files = list(set(log_files))
for file_ in log_files:
if not os.path.exists(file_):
result.log(
'Log file {0:s} does not exist to save'.format(file_),
level=logging.DEBUG)
continue
if os.path.getsize(file_) == 0:
result.log(
'Log file {0:s} is empty. Not saving'.format(file_),
level=logging.DEBUG)
continue
result.log('Output log file found at {0:s}'.format(file_))
self.output_manager.save_local_file(file_, result)
if ret not in success_codes:
message = 'Execution of [{0!s}] failed with status {1:d}'.format(cmd, ret)
result.log(message)
if close:
result.close(self, success=False, status=message)
else:
result.log('Execution of [{0!s}] succeeded'.format(cmd))
for file_ in save_files:
if os.path.getsize(file_) == 0:
result.log(
'Output file {0:s} is empty. Not saving'.format(file_),
level=logging.DEBUG)
continue
result.log('Output save file at {0:s}'.format(file_))
self.output_manager.save_local_file(file_, result)
for evidence in new_evidence:
# If the local path is set in the Evidence, we check to make sure that
# the path exists and is not empty before adding it.
if evidence.source_path and not os.path.exists(evidence.source_path):
message = (
'Evidence {0:s} source_path {1:s} does not exist. Not returning '
'empty Evidence.'.format(evidence.name, evidence.source_path))
result.log(message, level=logging.WARN)
elif (evidence.source_path and os.path.exists(evidence.source_path) and
os.path.getsize(evidence.source_path) == 0):
message = (
'Evidence {0:s} source_path {1:s} is empty. Not returning '
'empty new Evidence.'.format(evidence.name, evidence.source_path))
result.log(message, level=logging.WARN)
else:
result.add_evidence(evidence, self._evidence_config)
if close:
result.close(self, success=True)
return ret, result
def setup(self, evidence):
"""Perform common setup operations and runtime environment.
Even though TurbiniaTasks are initially instantiated by the Jobs under the
Task Manager, this setup method needs to be run from the task on the worker
because it handles setting up the task runtime environment.
Args:
evidence: An Evidence object to process.
Returns:
A TurbiniaTaskResult object.
Raises:
TurbiniaException: If the evidence can not be found.
"""
self.setup_metrics()
self.output_manager.setup(self.name, self.id, self.request_id)
self.tmp_dir, self.output_dir = self.output_manager.get_local_output_dirs()
if not self.result:
self.result = self.create_result(input_evidence=evidence)
if evidence.copyable and not config.SHARED_FILESYSTEM:
self.output_manager.retrieve_evidence(evidence)
if evidence.source_path and not os.path.exists(evidence.source_path):
raise TurbiniaException(
'Evidence source path {0:s} does not exist'.format(
evidence.source_path))
return self.result
def setup_metrics(self, task_list=None):
"""Sets up the application metrics.
Returns early with metrics if they are already setup.
Arguments:
task_list(list): List of Task names
Returns:
Dict: Mapping of task names to metrics objects.
"""
global METRICS
if METRICS:
return METRICS
if not task_list:
task_loader = task_utils.TaskLoader()
task_list = task_loader.get_task_names()
for task_name in task_list:
task_name = task_name.lower()
if task_name in METRICS:
continue
metric = Histogram(
'{0:s}_duration_seconds'.format(task_name),
'Seconds to run {0:s}'.format(task_name))
METRICS[task_name] = metric
log.debug('Registered {0:d} task metrics'.format(len(METRICS)))
return METRICS
def touch(self):
"""Updates the last_update time of the task."""
self.last_update = datetime.now()
def create_result(
self, input_evidence=None, status=None, message=None, trace=None,
no_output_manager=False):
"""Creates a new TurbiniaTaskResults and instantiates the result.
Args:
input_evidence(Evidence): The evidence being processed by this Task.
status(str): A one line descriptive task status.
message(str): An error message to show when returning the result.
trace: Stack traceback for errors.
"""
result = TurbiniaTaskResult(
base_output_dir=self.base_output_dir, request_id=self.request_id,
job_id=self.job_id, input_evidence=input_evidence,
no_output_manager=no_output_manager)
result.setup(self)
if message:
if status:
result.status = '{0:s}. Previous status: [{1:s}]'.format(
message, status)
else:
result.status = message
result.set_error(message, trace)
return result
def validate_result(self, result):
"""Checks to make sure that the result is valid.
We occasionally get something added into a TurbiniaTaskResult that makes
it unpickleable. We don't necessarily know what caused it to be in that
state, so we need to create a new, mostly empty result so that the client
is able to get the error message (otherwise the task will stay pending
indefinitely).
Args:
result (TurbiniaTaskResult): Result object to check
Returns:
The original result object if it is OK, otherwise an empty result object
indicating a failure.
"""
message = None
check_status = 'Successful'
if isinstance(result, TurbiniaTaskResult):
try:
log.debug('Checking TurbiniaTaskResult for pickle serializability')
pickle.dumps(result.serialize())
except (TypeError, pickle.PicklingError) as exception:
message = (
'Error pickling TurbiniaTaskResult object. Returning a new result '
'with the pickling error, and all previous result data will be '
'lost. Pickle Error: {0!s}'.format(exception))
try:
log.debug('Checking TurbiniaTaskResult for JSON serializability')
json.dumps(result.serialize())
except (TypeError) as exception:
message = (
'Error JSON serializing TurbiniaTaskResult object. Returning a new '
'result with the JSON error, and all previous result data will '
'be lost. JSON Error: {0!s}'.format(exception))
else:
message = (
'Task returned type [{0!s}] instead of TurbiniaTaskResult.').format(
type(result))
if message:
log.error(message)
if result and hasattr(result, 'status') and result.status:
status = result.status
else:
status = 'No previous status'
result = self.create_result(
status=status, message=message, trace=traceback.format_exc())
result.close(self, success=False, status=message)
check_status = 'Failed, but replaced with empty result'
log.info('Result check: {0:s}'.format(check_status))
return result
def get_task_recipe(self, recipe):
"""Creates and validates a recipe for the specified task.
Args:
recipe (dict): The full request recipe data.
Returns:
Dict: Recipe data specific to the current Task
"""
recipe_data = deepcopy(self.TASK_CONFIG)
for _, task_recipe in recipe.items():
if isinstance(task_recipe, dict):
task = task_recipe.get('task', None)
if task and task == self.name and self.validate_task_conf(task_recipe):
log.debug(
'Setting recipe data for task {0:s}: {1!s}'.format(
task, task_recipe))
recipe_data.update(task_recipe)
recipe_data.pop('task')
break
recipe_data.update(recipe['globals'])
return recipe_data
def run_wrapper(self, evidence):
"""Wrapper to manage TurbiniaTaskResults and exception handling.
This wrapper should be called to invoke the run() methods so it can handle
the management of TurbiniaTaskResults and the exception handling. Otherwise
details from exceptions in the worker cannot be propagated back to the
Turbinia TaskManager.
This method should handle (in no particular order):
- Exceptions thrown from run()
- Verifying valid TurbiniaTaskResult object is returned
- Check for bad results (non TurbiniaTaskResults) returned from run()
- Auto-close results that haven't been closed
- Verifying that the results are serializeable
- Locking to make sure only one task is active at a time
Args:
evidence (dict): To be decoded into Evidence object
Returns:
A TurbiniaTaskResult object
"""
# Avoid circular dependency.
from turbinia.jobs import manager as job_manager
log.debug('Task {0:s} {1:s} awaiting execution'.format(self.name, self.id))
evidence = evidence_decode(evidence)
try:
self.result = self.setup(evidence)
self.result.update_task_status(self, 'queued')
turbinia_worker_tasks_queued_total.inc()
task_runtime_metrics = self.get_metrics()
except TurbiniaException as exception:
message = (
'{0:s} Task setup failed with exception: [{1!s}]'.format(
self.name, exception))
# Logging explicitly here because the result is in an unknown state
trace = traceback.format_exc()
log.error(message)
log.error(trace)
if self.result:
if hasattr(exception, 'message'):
self.result.set_error(exception.message, traceback.format_exc())
else:
self.result.set_error(exception.__class__, traceback.format_exc())
self.result.status = message
else:
self.result = self.create_result(
message=message, trace=traceback.format_exc())
return self.result.serialize()
log.info('Starting Task {0:s} {1:s}'.format(self.name, self.id))
original_result_id = None
turbinia_worker_tasks_started_total.inc()
with task_runtime_metrics.time():
try:
original_result_id = self.result.id
# Check if Task's job is available for the worker.
active_jobs = list(job_manager.JobsManager.GetJobNames())
if self.job_name.lower() not in active_jobs:
message = (
'Task will not run due to the job: {0:s} being disabled '
'on the worker.'.format(self.job_name))
self.result.log(message, level=logging.ERROR)
self.result.status = message
return self.result.serialize()
self.evidence_setup(evidence)
if self.turbinia_version != turbinia.__version__:
message = (
'Worker and Server versions do not match: {0:s} != {1:s}'.format(
self.turbinia_version, turbinia.__version__))
self.result.log(message, level=logging.ERROR)
self.result.status = message
self.result.successful = False
return self.result.serialize()
self.result.update_task_status(self, 'running')
self._evidence_config = evidence.config
self.task_config = self.get_task_recipe(evidence.config)
self.result = self.run(evidence, self.result)
# pylint: disable=broad-except
except Exception as exception:
message = (
'{0:s} Task failed with exception: [{1!s}]'.format(
self.name, exception))
# Logging explicitly here because the result is in an unknown state
trace = traceback.format_exc()
log_and_report(message, trace)
if self.result:
self.result.log(message, level=logging.ERROR)
self.result.log(trace)
if hasattr(exception, 'message'):
self.result.set_error(exception.message, traceback.format_exc())
else:
self.result.set_error(exception.__class__, traceback.format_exc())
self.result.status = message
else:
log.error('No TurbiniaTaskResult object found after task execution.')
self.result = self.validate_result(self.result)
if self.result:
self.result.update_task_status(self)
# Trying to close the result if possible so that we clean up what we can.
# This has a higher likelihood of failing because something must have gone
# wrong as the Task should have already closed this.
if self.result and not self.result.closed:
message = 'Trying last ditch attempt to close result'
log.warning(message)
self.result.log(message)
if self.result.status:
status = self.result.status
else:
status = 'No previous status'
message = (
'Task Result was auto-closed from task executor on {0:s} likely '
'due to previous failures. Previous status: [{1:s}]'.format(
self.result.worker_name, status))
self.result.log(message)
try:
self.result.close(self, False, message)
# Using broad except here because lots can go wrong due to the reasons
# listed above.
# pylint: disable=broad-except
except Exception as exception:
log.error('TurbiniaTaskResult close failed: {0!s}'.format(exception))
if not self.result.status:
self.result.status = message
# Check the result again after closing to make sure it's still good.
self.result = self.validate_result(self.result)
if original_result_id != self.result.id:
log.debug(
'Result object {0:s} is different from original {1!s} after task '
'execution which indicates errors during execution'.format(
self.result.id, original_result_id))
else:
log.debug(
'Returning original result object {0:s} after task execution'.format(
self.result.id))
return self.result.serialize()
def run(self, evidence, result):
"""Entry point to execute the task.
Args:
evidence: Evidence object.
result: A TurbiniaTaskResult object to place task results into.
Returns:
TurbiniaTaskResult object.
"""
raise NotImplementedError
|
the-stack_0_5044 | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.configuration import Configuration
class TpdmTeacherPreparationProviderProgramGradeLevel(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'grade_level_descriptor': 'str'
}
attribute_map = {
'grade_level_descriptor': 'gradeLevelDescriptor'
}
def __init__(self, grade_level_descriptor=None, _configuration=None): # noqa: E501
"""TpdmTeacherPreparationProviderProgramGradeLevel - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._grade_level_descriptor = None
self.discriminator = None
self.grade_level_descriptor = grade_level_descriptor
@property
def grade_level_descriptor(self):
"""Gets the grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel. # noqa: E501
The grade levels served at the TPP Program. # noqa: E501
:return: The grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel. # noqa: E501
:rtype: str
"""
return self._grade_level_descriptor
@grade_level_descriptor.setter
def grade_level_descriptor(self, grade_level_descriptor):
"""Sets the grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel.
The grade levels served at the TPP Program. # noqa: E501
:param grade_level_descriptor: The grade_level_descriptor of this TpdmTeacherPreparationProviderProgramGradeLevel. # noqa: E501
:type: str
"""
if self._configuration.client_side_validation and grade_level_descriptor is None:
raise ValueError("Invalid value for `grade_level_descriptor`, must not be `None`") # noqa: E501
if (self._configuration.client_side_validation and
grade_level_descriptor is not None and len(grade_level_descriptor) > 306):
raise ValueError("Invalid value for `grade_level_descriptor`, length must be less than or equal to `306`") # noqa: E501
self._grade_level_descriptor = grade_level_descriptor
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TpdmTeacherPreparationProviderProgramGradeLevel, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TpdmTeacherPreparationProviderProgramGradeLevel):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TpdmTeacherPreparationProviderProgramGradeLevel):
return True
return self.to_dict() != other.to_dict()
|
the-stack_0_5045 | from flask import Flask
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
class HelloWorld(Resource):
def get(self):
return {'hello': 'world'}
api.add_resource(HelloWorld, '/')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0') |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.