ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a516d5ffb0369d7a1b70fe34e3eb8b8372ac05f | # Generated by Django 3.1.7 on 2021-05-10 06:50
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('gyoithon', '0011_auto_20210510_1545'),
]
operations = [
migrations.RemoveField(
model_name='domain',
name='organization_id',
),
migrations.AddField(
model_name='domain',
name='related_organization_id',
field=models.IntegerField(default=0, verbose_name='Related Organization ID'),
preserve_default=False,
),
migrations.AlterField(
model_name='domain',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 10, 6, 50, 14, 780669, tzinfo=utc)),
),
migrations.AlterField(
model_name='organization',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 10, 6, 50, 14, 780063, tzinfo=utc)),
),
migrations.AlterField(
model_name='subdomain',
name='registration_date',
field=models.DateTimeField(default=datetime.datetime(2021, 5, 10, 6, 50, 14, 781293, tzinfo=utc)),
),
]
|
py | 1a516f3d944ecf89703351bc6e54a4c52c2c7daf | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import codecs
import sys
from stacker.session_cache import get_session
from . import LookupHandler
from ...util import read_value_from_path
TYPE_NAME = "kms"
class KmsLookup(LookupHandler):
@classmethod
def handle(cls, value, **kwargs):
"""Decrypt the specified value with a master key in KMS.
kmssimple field types should be in the following format:
[<region>@]<base64 encrypted value>
Note: The region is optional, and defaults to the environment's
`AWS_DEFAULT_REGION` if not specified.
For example:
# We use the aws cli to get the encrypted value for the string
# "PASSWORD" using the master key called "myStackerKey" in
# us-east-1
$ aws --region us-east-1 kms encrypt --key-id alias/myStackerKey \
--plaintext "PASSWORD" --output text --query CiphertextBlob
CiD6bC8t2Y<...encrypted blob...>
# In stacker we would reference the encrypted value like:
conf_key: ${kms us-east-1@CiD6bC8t2Y<...encrypted blob...>}
You can optionally store the encrypted value in a file, ie:
kms_value.txt
us-east-1@CiD6bC8t2Y<...encrypted blob...>
and reference it within stacker (NOTE: the path should be relative
to the stacker config file):
conf_key: ${kms file://kms_value.txt}
# Both of the above would resolve to
conf_key: PASSWORD
"""
value = read_value_from_path(value)
region = None
if "@" in value:
region, value = value.split("@", 1)
kms = get_session(region).client('kms')
# encode str value as an utf-8 bytestring for use with codecs.decode.
value = value.encode('utf-8')
# get raw but still encrypted value from base64 version.
decoded = codecs.decode(value, 'base64')
# check python version in your system
python3_or_later = sys.version_info[0] >= 3
# decrypt and return the plain text raw value.
if python3_or_later:
return kms.decrypt(CiphertextBlob=decoded)["Plaintext"]\
.decode('utf-8')
else:
return kms.decrypt(CiphertextBlob=decoded)["Plaintext"]
|
py | 1a516f3f82a0a145d2fd63acc463c6935ff186b3 | # -*- coding: utf-8 -*-
from difference import main
if __name__ == '__main__':
main()
|
py | 1a516f92bd203d3c9d047cb5be5ba1809a4d3f1d | import datetime
from functools import partial
import numpy as np
import regex as re
import toolz
from multipledispatch import Dispatcher
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.lineage as lin
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.sql.compiler as comp
from ibis.bigquery.datatypes import ibis_type_to_bigquery_type
from ibis.impala import compiler as impala_compiler
from ibis.impala.compiler import (
ImpalaSelect,
ImpalaTableSetFormatter,
_reduction,
fixed_arity,
unary,
)
class BigQueryUDFNode(ops.ValueOp):
pass
class BigQuerySelectBuilder(comp.SelectBuilder):
@property
def _select_class(self):
return BigQuerySelect
class BigQueryUDFDefinition(comp.DDL):
def __init__(self, expr, context):
self.expr = expr
self.context = context
def compile(self):
return self.expr.op().js
class BigQueryUnion(comp.Union):
@staticmethod
def keyword(distinct):
return 'UNION DISTINCT' if distinct else 'UNION ALL'
def find_bigquery_udf(expr):
if isinstance(expr.op(), BigQueryUDFNode):
result = expr
else:
result = None
return lin.proceed, result
class BigQueryQueryBuilder(comp.QueryBuilder):
select_builder = BigQuerySelectBuilder
union_class = BigQueryUnion
def generate_setup_queries(self):
queries = map(
partial(BigQueryUDFDefinition, context=self.context),
lin.traverse(find_bigquery_udf, self.expr),
)
# UDFs are uniquely identified by the name of the Node subclass we
# generate.
return list(
toolz.unique(queries, key=lambda x: type(x.expr.op()).__name__)
)
def build_ast(expr, context):
builder = BigQueryQueryBuilder(expr, context=context)
return builder.get_result()
def to_sql(expr, context):
query_ast = build_ast(expr, context)
compiled = query_ast.compile()
return compiled
class BigQueryContext(comp.QueryContext):
def _to_sql(self, expr, ctx):
return to_sql(expr, context=ctx)
def _extract_field(sql_attr):
def extract_field_formatter(translator, expr):
op = expr.op()
arg = translator.translate(op.args[0])
return 'EXTRACT({} from {})'.format(sql_attr, arg)
return extract_field_formatter
bigquery_cast = Dispatcher('bigquery_cast')
@bigquery_cast.register(str, dt.Timestamp, dt.Integer)
def bigquery_cast_timestamp_to_integer(compiled_arg, from_, to):
return 'UNIX_MICROS({})'.format(compiled_arg)
@bigquery_cast.register(str, dt.DataType, dt.DataType)
def bigquery_cast_generate(compiled_arg, from_, to):
sql_type = ibis_type_to_bigquery_type(to)
return 'CAST({} AS {})'.format(compiled_arg, sql_type)
def _cast(translator, expr):
op = expr.op()
arg, target_type = op.args
arg_formatted = translator.translate(arg)
return bigquery_cast(arg_formatted, arg.type(), target_type)
def _struct_field(translator, expr):
arg, field = expr.op().args
arg_formatted = translator.translate(arg)
return '{}.`{}`'.format(arg_formatted, field)
def _array_concat(translator, expr):
return 'ARRAY_CONCAT({})'.format(
', '.join(map(translator.translate, expr.op().args))
)
def _array_index(translator, expr):
# SAFE_OFFSET returns NULL if out of bounds
return '{}[SAFE_OFFSET({})]'.format(
*map(translator.translate, expr.op().args)
)
def _string_find(translator, expr):
haystack, needle, start, end = expr.op().args
if start is not None:
raise NotImplementedError('start not implemented for string find')
if end is not None:
raise NotImplementedError('end not implemented for string find')
return 'STRPOS({}, {}) - 1'.format(
translator.translate(haystack), translator.translate(needle)
)
def _translate_pattern(translator, pattern):
# add 'r' to string literals to indicate to BigQuery this is a raw string
return 'r' * isinstance(pattern.op(), ops.Literal) + translator.translate(
pattern
)
def _regex_search(translator, expr):
arg, pattern = expr.op().args
regex = _translate_pattern(translator, pattern)
result = 'REGEXP_CONTAINS({}, {})'.format(translator.translate(arg), regex)
return result
def _regex_extract(translator, expr):
arg, pattern, index = expr.op().args
regex = _translate_pattern(translator, pattern)
result = 'REGEXP_EXTRACT_ALL({}, {})[SAFE_OFFSET({})]'.format(
translator.translate(arg), regex, translator.translate(index)
)
return result
def _regex_replace(translator, expr):
arg, pattern, replacement = expr.op().args
regex = _translate_pattern(translator, pattern)
result = 'REGEXP_REPLACE({}, {}, {})'.format(
translator.translate(arg), regex, translator.translate(replacement)
)
return result
def _string_concat(translator, expr):
return 'CONCAT({})'.format(
', '.join(map(translator.translate, expr.op().arg))
)
def _string_join(translator, expr):
sep, args = expr.op().args
return 'ARRAY_TO_STRING([{}], {})'.format(
', '.join(map(translator.translate, args)), translator.translate(sep)
)
def _string_ascii(translator, expr):
(arg,) = expr.op().args
return 'TO_CODE_POINTS({})[SAFE_OFFSET(0)]'.format(
translator.translate(arg)
)
def _string_right(translator, expr):
arg, nchars = map(translator.translate, expr.op().args)
return 'SUBSTR({arg}, -LEAST(LENGTH({arg}), {nchars}))'.format(
arg=arg, nchars=nchars
)
def _array_literal_format(expr):
return str(list(expr.op().value))
def _log(translator, expr):
op = expr.op()
arg, base = op.args
arg_formatted = translator.translate(arg)
if base is None:
return 'ln({})'.format(arg_formatted)
base_formatted = translator.translate(base)
return 'log({}, {})'.format(arg_formatted, base_formatted)
def _literal(translator, expr):
if isinstance(expr, ir.NumericValue):
value = expr.op().value
if not np.isfinite(value):
return 'CAST({!r} AS FLOAT64)'.format(str(value))
# special case literal timestamp, date, and time scalars
if isinstance(expr.op(), ops.Literal):
value = expr.op().value
if isinstance(expr, ir.DateScalar):
if isinstance(value, datetime.datetime):
raw_value = value.date()
else:
raw_value = value
return "DATE '{}'".format(raw_value)
elif isinstance(expr, ir.TimestampScalar):
return "TIMESTAMP '{}'".format(value)
elif isinstance(expr, ir.TimeScalar):
# TODO: define extractors on TimeValue expressions
return "TIME '{}'".format(value)
try:
return impala_compiler._literal(translator, expr)
except NotImplementedError:
if isinstance(expr, ir.ArrayValue):
return _array_literal_format(expr)
raise NotImplementedError(type(expr).__name__)
def _arbitrary(translator, expr):
arg, how, where = expr.op().args
if where is not None:
arg = where.ifelse(arg, ibis.NA)
if how not in (None, 'first'):
raise com.UnsupportedOperationError(
'{!r} value not supported for arbitrary in BigQuery'.format(how)
)
return 'ANY_VALUE({})'.format(translator.translate(arg))
_date_units = {
'Y': 'YEAR',
'Q': 'QUARTER',
'W': 'WEEK',
'M': 'MONTH',
'D': 'DAY',
}
_timestamp_units = {
'us': 'MICROSECOND',
'ms': 'MILLISECOND',
's': 'SECOND',
'm': 'MINUTE',
'h': 'HOUR',
}
_time_units = _timestamp_units.copy()
_timestamp_units.update(_date_units)
def _truncate(kind, units):
def truncator(translator, expr):
arg, unit = expr.op().args
trans_arg = translator.translate(arg)
valid_unit = units.get(unit)
if valid_unit is None:
raise com.UnsupportedOperationError(
'BigQuery does not support truncating {} values to unit '
'{!r}'.format(arg.type(), unit)
)
return '{}_TRUNC({}, {})'.format(kind, trans_arg, valid_unit)
return truncator
def _timestamp_op(func, units):
def _formatter(translator, expr):
op = expr.op()
arg, offset = op.args
unit = offset.type().unit
if unit not in units:
raise com.UnsupportedOperationError(
'BigQuery does not allow binary operation '
'{} with INTERVAL offset {}'.format(func, unit)
)
formatted_arg = translator.translate(arg)
formatted_offset = translator.translate(offset)
result = '{}({}, {})'.format(func, formatted_arg, formatted_offset)
return result
return _formatter
STRFTIME_FORMAT_FUNCTIONS = {
dt.Date: 'DATE',
dt.Time: 'TIME',
dt.Timestamp: 'TIMESTAMP',
}
_operation_registry = impala_compiler._operation_registry.copy()
_operation_registry.update(
{
ops.ExtractYear: _extract_field('year'),
ops.ExtractMonth: _extract_field('month'),
ops.ExtractDay: _extract_field('day'),
ops.ExtractHour: _extract_field('hour'),
ops.ExtractMinute: _extract_field('minute'),
ops.ExtractSecond: _extract_field('second'),
ops.ExtractMillisecond: _extract_field('millisecond'),
ops.StringReplace: fixed_arity('REPLACE', 3),
ops.StringSplit: fixed_arity('SPLIT', 2),
ops.StringConcat: _string_concat,
ops.StringJoin: _string_join,
ops.StringAscii: _string_ascii,
ops.StringFind: _string_find,
ops.StrRight: _string_right,
ops.Repeat: fixed_arity('REPEAT', 2),
ops.RegexSearch: _regex_search,
ops.RegexExtract: _regex_extract,
ops.RegexReplace: _regex_replace,
ops.GroupConcat: _reduction('STRING_AGG'),
ops.IfNull: fixed_arity('IFNULL', 2),
ops.Cast: _cast,
ops.StructField: _struct_field,
ops.ArrayCollect: unary('ARRAY_AGG'),
ops.ArrayConcat: _array_concat,
ops.ArrayIndex: _array_index,
ops.ArrayLength: unary('ARRAY_LENGTH'),
ops.HLLCardinality: _reduction('APPROX_COUNT_DISTINCT'),
ops.Log: _log,
ops.Sign: unary('SIGN'),
ops.Modulus: fixed_arity('MOD', 2),
ops.Date: unary('DATE'),
# BigQuery doesn't have these operations built in.
# ops.ArrayRepeat: _array_repeat,
# ops.ArraySlice: _array_slice,
ops.Literal: _literal,
ops.Arbitrary: _arbitrary,
ops.TimestampTruncate: _truncate('TIMESTAMP', _timestamp_units),
ops.DateTruncate: _truncate('DATE', _date_units),
ops.TimeTruncate: _truncate('TIME', _timestamp_units),
ops.Time: unary('TIME'),
ops.TimestampAdd: _timestamp_op(
'TIMESTAMP_ADD', {'h', 'm', 's', 'ms', 'us'}
),
ops.TimestampSub: _timestamp_op(
'TIMESTAMP_DIFF', {'h', 'm', 's', 'ms', 'us'}
),
ops.DateAdd: _timestamp_op('DATE_ADD', {'D', 'W', 'M', 'Q', 'Y'}),
ops.DateSub: _timestamp_op('DATE_SUB', {'D', 'W', 'M', 'Q', 'Y'}),
ops.TimestampNow: fixed_arity('CURRENT_TIMESTAMP', 0),
}
)
_invalid_operations = {
ops.Translate,
ops.FindInSet,
ops.Capitalize,
ops.DateDiff,
ops.TimestampDiff,
}
_operation_registry = {
k: v
for k, v in _operation_registry.items()
if k not in _invalid_operations
}
class BigQueryExprTranslator(impala_compiler.ImpalaExprTranslator):
_registry = _operation_registry
_rewrites = impala_compiler.ImpalaExprTranslator._rewrites.copy()
context_class = BigQueryContext
def _trans_param(self, expr):
op = expr.op()
if op not in self.context.params:
raise KeyError(op)
return '@{}'.format(expr.get_name())
compiles = BigQueryExprTranslator.compiles
rewrites = BigQueryExprTranslator.rewrites
@compiles(ops.DayOfWeekIndex)
def bigquery_day_of_week_index(t, e):
arg = e.op().args[0]
arg_formatted = t.translate(arg)
return 'MOD(EXTRACT(DAYOFWEEK FROM {}) + 5, 7)'.format(arg_formatted)
@rewrites(ops.DayOfWeekName)
def bigquery_day_of_week_name(e):
arg = e.op().args[0]
return arg.strftime('%A')
@compiles(ops.Divide)
def bigquery_compiles_divide(t, e):
return 'IEEE_DIVIDE({}, {})'.format(*map(t.translate, e.op().args))
@compiles(ops.Strftime)
def compiles_strftime(translator, expr):
arg, format_string = expr.op().args
arg_type = arg.type()
strftime_format_func_name = STRFTIME_FORMAT_FUNCTIONS[type(arg_type)]
fmt_string = translator.translate(format_string)
arg_formatted = translator.translate(arg)
if isinstance(arg_type, dt.Timestamp):
return 'FORMAT_{}({}, {}, {!r})'.format(
strftime_format_func_name,
fmt_string,
arg_formatted,
arg_type.timezone if arg_type.timezone is not None else 'UTC',
)
return 'FORMAT_{}({}, {})'.format(
strftime_format_func_name, fmt_string, arg_formatted
)
@compiles(ops.StringToTimestamp)
def compiles_string_to_timestamp(translator, expr):
arg, format_string, timezone_arg = expr.op().args
fmt_string = translator.translate(format_string)
arg_formatted = translator.translate(arg)
if timezone_arg is not None:
timezone_str = translator.translate(timezone_arg)
return 'PARSE_TIMESTAMP({}, {}, {})'.format(
fmt_string, arg_formatted, timezone_str
)
return 'PARSE_TIMESTAMP({}, {})'.format(fmt_string, arg_formatted)
class BigQueryTableSetFormatter(ImpalaTableSetFormatter):
def _quote_identifier(self, name):
if re.match(r'^[A-Za-z][A-Za-z_0-9]*$', name):
return name
return '`{}`'.format(name)
class BigQuerySelect(ImpalaSelect):
translator = BigQueryExprTranslator
@property
def table_set_formatter(self):
return BigQueryTableSetFormatter
@rewrites(ops.IdenticalTo)
def identical_to(expr):
left, right = expr.op().args
return (left.isnull() & right.isnull()) | (left == right)
@rewrites(ops.Log2)
def log2(expr):
(arg,) = expr.op().args
return arg.log(2)
@rewrites(ops.Sum)
def bq_sum(expr):
arg = expr.op().args[0]
where = expr.op().args[1]
if isinstance(arg, ir.BooleanColumn):
return arg.cast('int64').sum(where=where)
else:
return expr
@rewrites(ops.Mean)
def bq_mean(expr):
arg = expr.op().args[0]
where = expr.op().args[1]
if isinstance(arg, ir.BooleanColumn):
return arg.cast('int64').mean(where=where)
else:
return expr
UNIT_FUNCS = {'s': 'SECONDS', 'ms': 'MILLIS', 'us': 'MICROS'}
@compiles(ops.TimestampFromUNIX)
def compiles_timestamp_from_unix(t, e):
value, unit = e.op().args
return 'TIMESTAMP_{}({})'.format(UNIT_FUNCS[unit], t.translate(value))
@compiles(ops.Floor)
def compiles_floor(t, e):
bigquery_type = ibis_type_to_bigquery_type(e.type())
arg = e.op().arg
return 'CAST(FLOOR({}) AS {})'.format(t.translate(arg), bigquery_type)
@compiles(ops.CMSMedian)
def compiles_approx(translator, expr):
expr = expr.op()
arg = expr.arg
where = expr.where
if where is not None:
arg = where.ifelse(arg, ibis.NA)
return 'APPROX_QUANTILES({}, 2)[OFFSET(1)]'.format(
translator.translate(arg)
)
@compiles(ops.Covariance)
def compiles_covar(translator, expr):
expr = expr.op()
left = expr.left
right = expr.right
where = expr.where
if expr.how == 'sample':
how = 'SAMP'
elif expr.how == 'pop':
how = 'POP'
else:
raise ValueError(
"Covariance with how={!r} is not supported.".format(how)
)
if where is not None:
left = where.ifelse(left, ibis.NA)
right = where.ifelse(right, ibis.NA)
return "COVAR_{}({}, {})".format(how, left, right)
@rewrites(ops.Any)
@rewrites(ops.All)
@rewrites(ops.NotAny)
@rewrites(ops.NotAll)
def bigquery_any_all_no_op(expr):
return expr
@compiles(ops.Any)
def bigquery_compile_any(translator, expr):
return "LOGICAL_OR({})".format(*map(translator.translate, expr.op().args))
@compiles(ops.NotAny)
def bigquery_compile_notany(translator, expr):
return "LOGICAL_AND(NOT ({}))".format(
*map(translator.translate, expr.op().args)
)
@compiles(ops.All)
def bigquery_compile_all(translator, expr):
return "LOGICAL_AND({})".format(*map(translator.translate, expr.op().args))
@compiles(ops.NotAll)
def bigquery_compile_notall(translator, expr):
return "LOGICAL_OR(NOT ({}))".format(
*map(translator.translate, expr.op().args)
)
class BigQueryDialect(impala_compiler.ImpalaDialect):
translator = BigQueryExprTranslator
dialect = BigQueryDialect
|
py | 1a517008090c3fb4dd37e66901e010facfa6850a | # -*- coding: utf-8 -*-
'''
Created on 31.10.2014
@author: Simon Gwerder
'''
import xml.etree.ElementTree as ET
import requests
from ordered_set import OrderedSet
from utilities import utils
from utilities.configloader import ConfigLoader
from utilities.retry import retry
from vocabularybase import VocabularyBase
class OpenThesaurus(VocabularyBase):
cl = ConfigLoader()
apiPrefix = cl.getOpenThesaurusAPIString('API_URL')
apiSuffix = cl.getOpenThesaurusAPIString('API_URL_SUFFIX')
relatedSet = OrderedSet()
broaderSet = OrderedSet()
narrowerSet = OrderedSet()
def __init__(self, searchTerm, language):
VocabularyBase.__init__(self, searchTerm, language)
self.relatedSet = OrderedSet()
self.broaderSet = OrderedSet()
self.narrowerSet = OrderedSet()
self.supportedLang.append('de')
if language in self.supportedLang:
for word in self.searchTerms:
result = self.apiCall(word, language)
if result.status_code < 400:
xmlString = result.text
self.parseXML(xmlString)
if len(self.relatedSet) > 0:
break
@retry(Exception, tries=3)
def apiCall(self, word, apiLang):
return requests.get(self.apiPrefix + word + self.apiSuffix)
def parseXML(self, xmlString):
root = ET.fromstring(xmlString)
for levelOne in root:
if levelOne.tag == 'synset':
synsetOne = levelOne
for levelTwo in synsetOne:
if levelTwo.tag == 'term':
synonym = levelTwo.attrib['term']
self.relatedSet.append(utils.eszettToSS(synonym))
elif levelTwo.tag == 'supersynsets':
for levelThree in levelTwo:
if levelThree.tag == 'synset':
for levelFour in levelThree:
if levelFour.tag == 'term':
broader = levelFour.attrib['term']
self.broaderSet.append(utils.eszettToSS(broader))
elif levelTwo.tag == 'subsynsets':
for levelThree in levelTwo:
if levelThree.tag == 'synset':
for levelFour in levelThree:
if levelFour.tag == 'term':
narrower = levelFour.attrib['term']
self.narrowerSet.append(utils.eszettToSS(narrower))
def getRelated(self):
return self.relatedSet
def getNarrower(self):
return self.narrowerSet
def getBroader(self):
return self.broaderSet
def checkConnection(self):
response = self.apiCall('test', 'de')
if response is not None and response.status_code < 400:
return True
return False
if __name__ == '__main__':
ot = OpenThesaurus('Coiffeur', 'de')
print "Related: "
for related in ot.getRelated():
print related
print "\nNarrower: "
for narrower in ot.getNarrower():
print narrower
print "\nBroader: "
for broader in ot.getBroader():
print broader
|
py | 1a5170c0c09550921c5adfa22e1026f74ed0951c | """This module contains the general information for SwatTarget ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SwatTargetConsts:
pass
class SwatTarget(ManagedObject):
"""This is SwatTarget class."""
consts = SwatTargetConsts()
naming_props = set([u'varName', u'varValue'])
mo_meta = MoMeta("SwatTarget", "swatTarget", "target-[var_name]-[var_value]", VersionMeta.Version101e, "InputOutput", 0x7f, [], ["admin"], [u'swatAction'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"var_name": MoPropertyMeta("var_name", "varName", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x20, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"var_value": MoPropertyMeta("var_value", "varValue", "string", VersionMeta.Version101e, MoPropertyMeta.NAMING, 0x40, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"varName": "var_name",
"varValue": "var_value",
}
def __init__(self, parent_mo_or_dn, var_name, var_value, **kwargs):
self._dirty_mask = 0
self.var_name = var_name
self.var_value = var_value
self.child_action = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "SwatTarget", parent_mo_or_dn, **kwargs)
|
py | 1a51726f0e8ef9fd60c28957b5350b8ca65abbfd | """
Gathers all models in one place for convenient imports
"""
# flake8: noqa
from deepchem.models.models import Model
from deepchem.models.multitask import SingletaskToMultitask
from deepchem.models.wandblogger import WandbLogger
from deepchem.models.callbacks import ValidationCallback
# Tensorflow Depedency Models
try:
from deepchem.models.keras_model import KerasModel
from deepchem.models.IRV import MultitaskIRVClassifier
from deepchem.models.robust_multitask import RobustMultitaskClassifier
from deepchem.models.robust_multitask import RobustMultitaskRegressor
from deepchem.models.progressive_multitask import ProgressiveMultitaskRegressor, ProgressiveMultitaskClassifier
from deepchem.models.graph_models import WeaveModel, DTNNModel, DAGModel, GraphConvModel, MPNNModel
from deepchem.models.scscore import ScScoreModel
from deepchem.models.seqtoseq import SeqToSeq
from deepchem.models.gan import GAN, WGAN
from deepchem.models.molgan import BasicMolGANModel
from deepchem.models.cnn import CNN
from deepchem.models.text_cnn import TextCNNModel
from deepchem.models.atomic_conv import AtomicConvModel
from deepchem.models.chemnet_models import Smiles2Vec, ChemCeption
except ModuleNotFoundError:
pass
# scikit-learn model
from deepchem.models.sklearn_models import SklearnModel
from deepchem.models.gbdt_models import GBDTModel
# PyTorch models
try:
from deepchem.models.torch_models import TorchModel
from deepchem.models.torch_models import AttentiveFP, AttentiveFPModel
from deepchem.models.torch_models import CGCNN, CGCNNModel
from deepchem.models.torch_models import GAT, GATModel
from deepchem.models.torch_models import GCN, GCNModel
from deepchem.models.torch_models import LCNN, LCNNModel
from deepchem.models.torch_models import Pagtn, PagtnModel
from deepchem.models.fcnet import MultitaskRegressor, MultitaskClassifier, MultitaskFitTransformRegressor
from deepchem.models.torch_models import MEGNetModel
except ModuleNotFoundError:
pass
# Jax models
try:
from deepchem.models.jax_models import JaxModel
from deepchem.models.jax_models import PINNModel
except ModuleNotFoundError:
pass
#####################################################################################
# Compatibility imports for renamed XGBoost models. Remove below with DeepChem 3.0.
#####################################################################################
from deepchem.models.gbdt_models.gbdt_model import XGBoostModel
########################################################################################
# Compatibility imports for renamed TensorGraph models. Remove below with DeepChem 3.0.
########################################################################################
try:
from deepchem.models.text_cnn import TextCNNTensorGraph
from deepchem.models.graph_models import WeaveTensorGraph, DTNNTensorGraph, DAGTensorGraph, GraphConvTensorGraph, MPNNTensorGraph
from deepchem.models.IRV import TensorflowMultitaskIRVClassifier
except ModuleNotFoundError:
pass
|
py | 1a5172a0a41356438f05a769e9aaba9e42591e73 | import re
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import and use built-in open()
from io import open as io_open
from os import path
from setuptools import setup
here = path.abspath(path.dirname(__file__))
def readall(*args):
with io_open(path.join(here, *args), encoding="utf-8") as fp:
return fp.read()
metadata = dict(
re.findall(r"""__([a-z]+)__ = "([^"]+)""",
readall("websocket_commands", "__init__.py"))
)
setup(
name='websocket-commands',
version=metadata['version'],
packages=['websocket_commands'],
url='http://github.com/en-lofty/websocket-commands.git',
license='',
author='raphael',
author_email='[email protected]',
description='A library that makes communicating between frontend and '
'backend websockets simple.',
install_requires=['deprecation', ]
)
|
py | 1a5172bcf5600f46236bd811222327081bb92633 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetPeeringResult',
'AwaitableGetPeeringResult',
'get_peering',
'get_peering_output',
]
@pulumi.output_type
class GetPeeringResult:
def __init__(__self__, authorized_network=None, create_time=None, domain_resource=None, labels=None, name=None, state=None, status_message=None, update_time=None):
if authorized_network and not isinstance(authorized_network, str):
raise TypeError("Expected argument 'authorized_network' to be a str")
pulumi.set(__self__, "authorized_network", authorized_network)
if create_time and not isinstance(create_time, str):
raise TypeError("Expected argument 'create_time' to be a str")
pulumi.set(__self__, "create_time", create_time)
if domain_resource and not isinstance(domain_resource, str):
raise TypeError("Expected argument 'domain_resource' to be a str")
pulumi.set(__self__, "domain_resource", domain_resource)
if labels and not isinstance(labels, dict):
raise TypeError("Expected argument 'labels' to be a dict")
pulumi.set(__self__, "labels", labels)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if status_message and not isinstance(status_message, str):
raise TypeError("Expected argument 'status_message' to be a str")
pulumi.set(__self__, "status_message", status_message)
if update_time and not isinstance(update_time, str):
raise TypeError("Expected argument 'update_time' to be a str")
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="authorizedNetwork")
def authorized_network(self) -> str:
"""
The full names of the Google Compute Engine [networks](/compute/docs/networks-and-firewalls#networks) to which the instance is connected. Caller needs to make sure that CIDR subnets do not overlap between networks, else peering creation will fail.
"""
return pulumi.get(self, "authorized_network")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> str:
"""
The time the instance was created.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter(name="domainResource")
def domain_resource(self) -> str:
"""
Full domain resource path for the Managed AD Domain involved in peering. The resource path should be in the form: `projects/{project_id}/locations/global/domains/{domain_name}`
"""
return pulumi.get(self, "domain_resource")
@property
@pulumi.getter
def labels(self) -> Mapping[str, str]:
"""
Optional. Resource labels to represent user-provided metadata.
"""
return pulumi.get(self, "labels")
@property
@pulumi.getter
def name(self) -> str:
"""
Unique name of the peering in this scope including projects and location using the form: `projects/{project_id}/locations/global/peerings/{peering_id}`.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of this Peering.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> str:
"""
Additional information about the current status of this peering, if available.
"""
return pulumi.get(self, "status_message")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> str:
"""
Last update time.
"""
return pulumi.get(self, "update_time")
class AwaitableGetPeeringResult(GetPeeringResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPeeringResult(
authorized_network=self.authorized_network,
create_time=self.create_time,
domain_resource=self.domain_resource,
labels=self.labels,
name=self.name,
state=self.state,
status_message=self.status_message,
update_time=self.update_time)
def get_peering(peering_id: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPeeringResult:
"""
Gets details of a single Peering.
"""
__args__ = dict()
__args__['peeringId'] = peering_id
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:managedidentities/v1:getPeering', __args__, opts=opts, typ=GetPeeringResult).value
return AwaitableGetPeeringResult(
authorized_network=__ret__.authorized_network,
create_time=__ret__.create_time,
domain_resource=__ret__.domain_resource,
labels=__ret__.labels,
name=__ret__.name,
state=__ret__.state,
status_message=__ret__.status_message,
update_time=__ret__.update_time)
@_utilities.lift_output_func(get_peering)
def get_peering_output(peering_id: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPeeringResult]:
"""
Gets details of a single Peering.
"""
...
|
py | 1a517376682469ed7677ca7cc6bb584a8fd4605f | """
This package contains some JSON specific panels.
"""
from .navigation import NavigationPanel
__all__ = [
'NavigationPanel'
]
|
py | 1a51738cb77c278e6d15b4a2507792161d988adc | """Support for OASA Telematics from telematics.oasa.gr."""
from datetime import timedelta
import logging
from operator import itemgetter
import oasatelematics
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, DEVICE_CLASS_TIMESTAMP
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_STOP_ID = "stop_id"
ATTR_STOP_NAME = "stop_name"
ATTR_ROUTE_ID = "route_id"
ATTR_ROUTE_NAME = "route_name"
ATTR_NEXT_ARRIVAL = "next_arrival"
ATTR_SECOND_NEXT_ARRIVAL = "second_next_arrival"
ATTR_NEXT_DEPARTURE = "next_departure"
ATTRIBUTION = "Data retrieved from telematics.oasa.gr"
CONF_STOP_ID = "stop_id"
CONF_ROUTE_ID = "route_id"
DEFAULT_NAME = "OASA Telematics"
ICON = "mdi:bus"
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STOP_ID): cv.string,
vol.Required(CONF_ROUTE_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the OASA Telematics sensor."""
name = config[CONF_NAME]
stop_id = config[CONF_STOP_ID]
route_id = config.get(CONF_ROUTE_ID)
data = OASATelematicsData(stop_id, route_id)
add_entities([OASATelematicsSensor(data, stop_id, route_id, name)], True)
class OASATelematicsSensor(SensorEntity):
"""Implementation of the OASA Telematics sensor."""
def __init__(self, data, stop_id, route_id, name):
"""Initialize the sensor."""
self.data = data
self._name = name
self._stop_id = stop_id
self._route_id = route_id
self._name_data = self._times = self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes."""
params = {}
if self._times is not None:
next_arrival_data = self._times[0]
if ATTR_NEXT_ARRIVAL in next_arrival_data:
next_arrival = next_arrival_data[ATTR_NEXT_ARRIVAL]
params.update({ATTR_NEXT_ARRIVAL: next_arrival.isoformat()})
if len(self._times) > 1:
second_next_arrival_time = self._times[1][ATTR_NEXT_ARRIVAL]
if second_next_arrival_time is not None:
second_arrival = second_next_arrival_time
params.update(
{ATTR_SECOND_NEXT_ARRIVAL: second_arrival.isoformat()}
)
params.update(
{
ATTR_ROUTE_ID: self._times[0][ATTR_ROUTE_ID],
ATTR_STOP_ID: self._stop_id,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
)
params.update(
{
ATTR_ROUTE_NAME: self._name_data[ATTR_ROUTE_NAME],
ATTR_STOP_NAME: self._name_data[ATTR_STOP_NAME],
}
)
return {k: v for k, v in params.items() if v}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data from OASA API and update the states."""
self.data.update()
self._times = self.data.info
self._name_data = self.data.name_data
next_arrival_data = self._times[0]
if ATTR_NEXT_ARRIVAL in next_arrival_data:
self._state = next_arrival_data[ATTR_NEXT_ARRIVAL].isoformat()
class OASATelematicsData:
"""The class for handling data retrieval."""
def __init__(self, stop_id, route_id):
"""Initialize the data object."""
self.stop_id = stop_id
self.route_id = route_id
self.info = self.empty_result()
self.oasa_api = oasatelematics
self.name_data = {
ATTR_ROUTE_NAME: self.get_route_name(),
ATTR_STOP_NAME: self.get_stop_name(),
}
def empty_result(self):
"""Object returned when no arrivals are found."""
return [{ATTR_ROUTE_ID: self.route_id}]
def get_route_name(self):
"""Get the route name from the API."""
try:
route = self.oasa_api.getRouteName(self.route_id)
if route:
return route[0].get("route_departure_eng")
except TypeError:
_LOGGER.error("Cannot get route name from OASA API")
return None
def get_stop_name(self):
"""Get the stop name from the API."""
try:
name_data = self.oasa_api.getStopNameAndXY(self.stop_id)
if name_data:
return name_data[0].get("stop_descr_matrix_eng")
except TypeError:
_LOGGER.error("Cannot get stop name from OASA API")
return None
def update(self):
"""Get the latest arrival data from telematics.oasa.gr API."""
self.info = []
results = self.oasa_api.getStopArrivals(self.stop_id)
if not results:
self.info = self.empty_result()
return
# Parse results
results = [r for r in results if r.get("route_code") in self.route_id]
current_time = dt_util.utcnow()
for result in results:
if (btime2 := result.get("btime2")) is not None:
arrival_min = int(btime2)
timestamp = current_time + timedelta(minutes=arrival_min)
arrival_data = {
ATTR_NEXT_ARRIVAL: timestamp,
ATTR_ROUTE_ID: self.route_id,
}
self.info.append(arrival_data)
if not self.info:
_LOGGER.debug("No arrivals with given parameters")
self.info = self.empty_result()
return
# Sort the data by time
sort = sorted(self.info, key=itemgetter(ATTR_NEXT_ARRIVAL))
self.info = sort
|
py | 1a5173c908eaa663e59cda493dd2444df575b247 | import enum
from typing import Any
# See equality/wallet/puzzles/condition_codes.clvm
class ConditionOpcode(bytes, enum.Enum):
# UNKNOWN is ascii "0"
UNKNOWN = bytes([48])
# AGG_SIG is ascii "1"
# the conditions below require bls12-381 signatures
AGG_SIG_UNSAFE = bytes([49])
AGG_SIG_ME = bytes([50])
# the conditions below reserve coin amounts and have to be accounted for in output totals
CREATE_COIN = bytes([51])
RESERVE_FEE = bytes([52])
# the conditions below deal with announcements, for inter-coin communication
CREATE_COIN_ANNOUNCEMENT = bytes([60])
ASSERT_COIN_ANNOUNCEMENT = bytes([61])
CREATE_PUZZLE_ANNOUNCEMENT = bytes([62])
ASSERT_PUZZLE_ANNOUNCEMENT = bytes([63])
# the conditions below let coins inquire about themselves
ASSERT_MY_COIN_ID = bytes([70])
ASSERT_MY_PARENT_ID = bytes([71])
ASSERT_MY_PUZZLEHASH = bytes([72])
ASSERT_MY_AMOUNT = bytes([73])
# the conditions below ensure that we're "far enough" in the future
# wall-clock time
ASSERT_SECONDS_RELATIVE = bytes([80])
ASSERT_SECONDS_ABSOLUTE = bytes([81])
# block index
ASSERT_HEIGHT_RELATIVE = bytes([82])
ASSERT_HEIGHT_ABSOLUTE = bytes([83])
def __bytes__(self) -> bytes:
return bytes(self.value)
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
assert len(blob) == 1
return cls(blob)
|
py | 1a5174216fc4ffdc224e9cd6bd1483322005fe01 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import json
import logging
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import apex # pylint: disable=import-error
from apex.parallel import DistributedDataParallel # pylint: disable=import-error
from .mutator import RegularizedDartsMutator, RegularizedMutatorParallel, DartsDiscreteMutator # pylint: disable=wrong-import-order
from nni.nas.pytorch.utils import AverageMeterGroup # pylint: disable=wrong-import-order
from .utils import CyclicIterator, TorchTensorEncoder, accuracy, reduce_metrics
PHASE_SMALL = "small"
PHASE_LARGE = "large"
class InteractiveKLLoss(nn.Module):
def __init__(self, temperature):
super().__init__()
self.temperature = temperature
# self.kl_loss = nn.KLDivLoss(reduction = 'batchmean')
self.kl_loss = nn.KLDivLoss()
def forward(self, student, teacher):
return self.kl_loss(F.log_softmax(student / self.temperature, dim=1),
F.softmax(teacher / self.temperature, dim=1))
class CdartsTrainer(object):
"""
CDARTS trainer.
Parameters
----------
model_small : nn.Module
PyTorch model to be trained. This is the search network of CDARTS.
model_large : nn.Module
PyTorch model to be trained. This is the evaluation network of CDARTS.
criterion : callable
Receives logits and ground truth label, return a loss tensor, e.g., ``nn.CrossEntropyLoss()``.
loaders : list of torch.utils.data.DataLoader
List of train data and valid data loaders, for training weights and architecture weights respectively.
samplers : list of torch.utils.data.Sampler
List of train data and valid data samplers. This can be PyTorch standard samplers if not distributed.
In distributed mode, sampler needs to have ``set_epoch`` method. Refer to data utils in CDARTS example for details.
logger : logging.Logger
The logger for logging. Will use nni logger by default (if logger is ``None``).
regular_coeff : float
The coefficient of regular loss.
regular_ratio : float
The ratio of regular loss.
warmup_epochs : int
The epochs to warmup the search network
fix_head : bool
``True`` if fixing the paramters of auxiliary heads, else unfix the paramters of auxiliary heads.
epochs : int
Number of epochs planned for training.
steps_per_epoch : int
Steps of one epoch.
loss_alpha : float
The loss coefficient.
loss_T : float
The loss coefficient.
distributed : bool
``True`` if using distributed training, else non-distributed training.
log_frequency : int
Step count per logging.
grad_clip : float
Gradient clipping for weights.
interactive_type : string
``kl`` or ``smoothl1``.
output_path : string
Log storage path.
w_lr : float
Learning rate of the search network parameters.
w_momentum : float
Momentum of the search and the evaluation network.
w_weight_decay : float
The weight decay the search and the evaluation network parameters.
alpha_lr : float
Learning rate of the architecture parameters.
alpha_weight_decay : float
The weight decay the architecture parameters.
nasnet_lr : float
Learning rate of the evaluation network parameters.
local_rank : int
The number of thread.
share_module : bool
``True`` if sharing the stem and auxiliary heads, else not sharing these modules.
"""
def __init__(self, model_small, model_large, criterion, loaders, samplers, logger=None,
regular_coeff=5, regular_ratio=0.2, warmup_epochs=2, fix_head=True,
epochs=32, steps_per_epoch=None, loss_alpha=2, loss_T=2, distributed=True,
log_frequency=10, grad_clip=5.0, interactive_type='kl', output_path='./outputs',
w_lr=0.2, w_momentum=0.9, w_weight_decay=3e-4, alpha_lr=0.2, alpha_weight_decay=1e-4,
nasnet_lr=0.2, local_rank=0, share_module=True):
if logger is None:
logger = logging.getLogger(__name__)
train_loader, valid_loader = loaders
train_sampler, valid_sampler = samplers
self.train_loader = CyclicIterator(train_loader, train_sampler, distributed)
self.valid_loader = CyclicIterator(valid_loader, valid_sampler, distributed)
self.regular_coeff = regular_coeff
self.regular_ratio = regular_ratio
self.warmup_epochs = warmup_epochs
self.fix_head = fix_head
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
if self.steps_per_epoch is None:
self.steps_per_epoch = min(len(self.train_loader), len(self.valid_loader))
self.loss_alpha = loss_alpha
self.grad_clip = grad_clip
if interactive_type == "kl":
self.interactive_loss = InteractiveKLLoss(loss_T)
elif interactive_type == "smoothl1":
self.interactive_loss = nn.SmoothL1Loss()
self.loss_T = loss_T
self.distributed = distributed
self.log_frequency = log_frequency
self.main_proc = not distributed or local_rank == 0
self.logger = logger
self.checkpoint_dir = output_path
if self.main_proc:
os.makedirs(self.checkpoint_dir, exist_ok=True)
if distributed:
torch.distributed.barrier()
self.model_small = model_small
self.model_large = model_large
if self.fix_head:
for param in self.model_small.aux_head.parameters():
param.requires_grad = False
for param in self.model_large.aux_head.parameters():
param.requires_grad = False
self.mutator_small = RegularizedDartsMutator(self.model_small).cuda()
self.mutator_large = DartsDiscreteMutator(self.model_large, self.mutator_small).cuda()
self.criterion = criterion
self.optimizer_small = torch.optim.SGD(self.model_small.parameters(), w_lr,
momentum=w_momentum, weight_decay=w_weight_decay)
self.optimizer_large = torch.optim.SGD(self.model_large.parameters(), nasnet_lr,
momentum=w_momentum, weight_decay=w_weight_decay)
self.optimizer_alpha = torch.optim.Adam(self.mutator_small.parameters(), alpha_lr,
betas=(0.5, 0.999), weight_decay=alpha_weight_decay)
if distributed:
apex.parallel.convert_syncbn_model(self.model_small)
apex.parallel.convert_syncbn_model(self.model_large)
self.model_small = DistributedDataParallel(self.model_small, delay_allreduce=True)
self.model_large = DistributedDataParallel(self.model_large, delay_allreduce=True)
self.mutator_small = RegularizedMutatorParallel(self.mutator_small, delay_allreduce=True)
if share_module:
self.model_small.callback_queued = True
self.model_large.callback_queued = True
# mutator large never gets optimized, so do not need parallelized
def _warmup(self, phase, epoch):
assert phase in [PHASE_SMALL, PHASE_LARGE]
if phase == PHASE_SMALL:
model, optimizer = self.model_small, self.optimizer_small
elif phase == PHASE_LARGE:
model, optimizer = self.model_large, self.optimizer_large
model.train()
meters = AverageMeterGroup()
for step in range(self.steps_per_epoch):
x, y = next(self.train_loader)
x, y = x.cuda(), y.cuda()
optimizer.zero_grad()
logits_main, _ = model(x)
loss = self.criterion(logits_main, y)
loss.backward()
self._clip_grad_norm(model)
optimizer.step()
prec1, prec5 = accuracy(logits_main, y, topk=(1, 5))
metrics = {"prec1": prec1, "prec5": prec5, "loss": loss}
metrics = reduce_metrics(metrics, self.distributed)
meters.update(metrics)
if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch):
self.logger.info("Epoch [%d/%d] Step [%d/%d] (%s) %s", epoch + 1, self.epochs,
step + 1, self.steps_per_epoch, phase, meters)
def _clip_grad_norm(self, model):
if isinstance(model, DistributedDataParallel):
nn.utils.clip_grad_norm_(model.module.parameters(), self.grad_clip)
else:
nn.utils.clip_grad_norm_(model.parameters(), self.grad_clip)
def _reset_nan(self, parameters):
with torch.no_grad():
for param in parameters:
for i, p in enumerate(param):
if p != p: # equivalent to `isnan(p)`
param[i] = float("-inf")
def _joint_train(self, epoch):
self.model_large.train()
self.model_small.train()
meters = AverageMeterGroup()
for step in range(self.steps_per_epoch):
trn_x, trn_y = next(self.train_loader)
val_x, val_y = next(self.valid_loader)
trn_x, trn_y = trn_x.cuda(), trn_y.cuda()
val_x, val_y = val_x.cuda(), val_y.cuda()
# step 1. optimize architecture
self.optimizer_alpha.zero_grad()
self.optimizer_large.zero_grad()
reg_decay = max(self.regular_coeff * (1 - float(epoch - self.warmup_epochs) / (
(self.epochs - self.warmup_epochs) * self.regular_ratio)), 0)
loss_regular = self.mutator_small.reset_with_loss()
if loss_regular:
loss_regular *= reg_decay
logits_search, emsemble_logits_search = self.model_small(val_x)
logits_main, emsemble_logits_main = self.model_large(val_x)
loss_cls = (self.criterion(logits_search, val_y) + self.criterion(logits_main, val_y)) / self.loss_alpha
loss_interactive = self.interactive_loss(emsemble_logits_search, emsemble_logits_main) * (self.loss_T ** 2) * self.loss_alpha
loss = loss_cls + loss_interactive + loss_regular
loss.backward()
self._clip_grad_norm(self.model_large)
self.optimizer_large.step()
self.optimizer_alpha.step()
# NOTE: need to call here `self._reset_nan(self.mutator_small.parameters())` if `cut_choices`
# step 2. optimize op weights
self.optimizer_small.zero_grad()
with torch.no_grad():
# resample architecture since parameters have been changed
self.mutator_small.reset_with_loss()
logits_search_train, _ = self.model_small(trn_x)
loss_weight = self.criterion(logits_search_train, trn_y)
loss_weight.backward()
self._clip_grad_norm(self.model_small)
self.optimizer_small.step()
metrics = {"loss_cls": loss_cls, "loss_interactive": loss_interactive,
"loss_regular": loss_regular, "loss_weight": loss_weight}
metrics = reduce_metrics(metrics, self.distributed)
meters.update(metrics)
if self.main_proc and (step % self.log_frequency == 0 or step + 1 == self.steps_per_epoch):
self.logger.info("Epoch [%d/%d] Step [%d/%d] (joint) %s", epoch + 1, self.epochs,
step + 1, self.steps_per_epoch, meters)
def train(self):
for epoch in range(self.epochs):
if epoch < self.warmup_epochs:
with torch.no_grad(): # otherwise grads will be retained on the architecture params
self.mutator_small.reset_with_loss()
self._warmup(PHASE_SMALL, epoch)
else:
with torch.no_grad():
self.mutator_large.reset()
self._warmup(PHASE_LARGE, epoch)
self._joint_train(epoch)
self.export(os.path.join(self.checkpoint_dir, "epoch_{:02d}.json".format(epoch)),
os.path.join(self.checkpoint_dir, "epoch_{:02d}.genotypes".format(epoch)))
def export(self, file, genotype_file):
if self.main_proc:
mutator_export, genotypes = self.mutator_small.export(self.logger)
with open(file, "w") as f:
json.dump(mutator_export, f, indent=2, sort_keys=True, cls=TorchTensorEncoder)
with open(genotype_file, "w") as f:
f.write(str(genotypes))
|
py | 1a517438539dd2aece834a9fa81fa84784c25c52 | import tensorflow as tf
from tensorflow.keras import Model
import tensorflow_addons as tfa
from tensorflow.keras.layers import Dense, Dropout, LayerNormalization, Layer
def create_padding_mask(input):
"""
Creates mask for input to Transformer based on the average of all elements = 0
:param input: input sequence
:return: mask
"""
input = tf.pad(input, paddings=[[0, 0], [1, 0], [0, 0]], constant_values=1)
input = tf.cast(tf.math.equal(tf.keras.backend.mean(input, axis=-1), 0), tf.float32)
# add extra dimensions to add the padding to the attention logits.
return input[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
class MultiHeadAttention(Layer):
"""
This is the standard multi-head attention layer
"""
def __init__(self, d_model, num_heads=8):
super(MultiHeadAttention, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
if d_model % num_heads != 0:
raise ValueError(
f'embedding dimension = {d_model} should be divisible by number of heads = {num_heads}'
)
self.depth = d_model // num_heads
self.wq = Dense(d_model)
self.wk = Dense(d_model)
self.wv = Dense(d_model)
self.dense = Dense(d_model)
def split_heads(self, x, batch_size):
x = tf.reshape(
x, (batch_size, -1, self.num_heads, self.depth)
)
return tf.transpose(x, perm=[0, 2, 1, 3])
def scaled_dot_product_attention(self, query, key, value, mask):
matmul_qk = tf.matmul(query, key, transpose_b=True)
dim_key = tf.cast(tf.shape(key)[-1], tf.float32)
scaled_score = matmul_qk / tf.math.sqrt(dim_key)
if mask is not None:
scaled_score += (mask * -1e9)
weights = tf.nn.softmax(scaled_score, axis=-1)
output = tf.matmul(weights, value)
return output, weights
def call(self, inputs, mask):
batch_size = tf.shape(inputs)[0]
query = self.wq(inputs)
key = self.wk(inputs)
value = self.wv(inputs)
query = self.split_heads(query, batch_size)
key = self.split_heads(key, batch_size)
value = self.split_heads(value, batch_size)
attention, weights = self.scaled_dot_product_attention(query, key, value, mask)
attention = tf.transpose(attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(
attention, (batch_size, -1, self.d_model)
)
output = self.dense(concat_attention)
return output, weights
class TransformerBlock(Layer):
"""
This is the standard Transformer block
"""
def __init__(self, d_model, num_heads, dff, dropout=0.1):
super(TransformerBlock, self).__init__()
self.mha = MultiHeadAttention(d_model, num_heads)
self.ffn = tf.keras.Sequential(
[Dense(dff, activation="relu"),
Dense(d_model),]
)
self.layernorm1 = LayerNormalization(epsilon=1e-6)
self.layernorm2 = LayerNormalization(epsilon=1e-6)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
def call(self, x, training, mask):
attn_output, attention_weigths = self.mha(x, mask)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class VideoQualityTransformer(Model):
"""
Transformer for video quality assessment using the standard Transformer,
the maximum_position_encoding should cover the maximal clip number in the databases
"""
def __init__(
self,
num_layers,
d_model,
num_heads,
mlp_dim,
dropout=0.1,
maximum_position_encoding=6000
):
super(VideoQualityTransformer, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
# positional embedding is predefined with a sufficient length
self.pos_emb = self.add_weight('pos_emb', shape=(1, maximum_position_encoding, d_model))
# add video quality token
self.quality_emb = self.add_weight('quality_emb', shape=(1, 1, d_model))
# normal Transformer architecture
self.feature_proj = Dense(d_model)
self.dropout = Dropout(dropout)
self.enc_layers = [
TransformerBlock(d_model, num_heads, mlp_dim, dropout)
for _ in range(num_layers)
]
# MLP head
self.mlp_head = tf.keras.Sequential(
[
Dense(mlp_dim, activation=tfa.activations.gelu),
Dropout(dropout),
Dense(1),
]
)
def call(self, x, training):
batch_size = tf.shape(x)[0]
mask = create_padding_mask(x)
frame_length = tf.shape(x)[1]
x = self.feature_proj(x)
quality_emb = tf.broadcast_to(self.quality_emb, [batch_size, 1, self.d_model])
x = tf.concat([quality_emb, x], axis=1)
# truncate the positional embedding for shorter videos
x = x + self.pos_emb[:, : frame_length + 1, :]
x = self.dropout(x, training=training)
for layer in self.enc_layers:
x = layer(x, training, mask)
# First (CLS) is used for VQA
x = self.mlp_head(x[:, 0])
return x |
py | 1a517651642743ce8939196c26b8c0e24d1d8e92 | from lstm import *
from relu import * |
py | 1a517785d6f071378897761964cf51016d26a8b5 | import unittest
import json
from md.auction_json import *
from md.auction import *
# Tests encoding and decoding objects as json.
class JsonTests(unittest.TestCase):
def test_bid_json(self):
self.encode_then_decode(Bid(-3, {'a': 1}))
self.encode_then_decode(Bid(-3, {'a': 1}, label='mybid'))
def encode_then_decode(self, bid):
bid_json = json.dumps(bid, indent=4, cls=ObjectEncoder)
bid_dct = json.loads(bid_json)
decoded_bid = decode_bid(bid_dct)
self.assertBidsEqual(bid, decoded_bid)
def assertBidsEqual(self, a, b):
self.assertIsInstance(a, Bid)
self.assertIsInstance(b, Bid)
self.assertEqual(a.v, b.v)
self.assertEqual(a.q, b.q)
self.assertEqual(a.label, b.label)
self.assertEqual(a.xor_group, b.xor_group)
self.assertEqual(a.winning, b.winning)
def assertBiddersEqual(self, a, b):
self.assertIsInstance(a, Bidder)
self.assertIsInstance(b, Bidder)
self.assertEqual(a.name, b.name)
self.assertEqual(len(a.bids), len(b.bids))
for i in range(len(a.bids)):
self.assertBidsEqual(a.bids[i], b.bids[i])
def assertProblemsEqual(self, a, b):
self.assertIsInstance(a, Problem)
self.assertIsInstance(b, Problem)
self.assertEqual(a.description, b.description)
self.assertEqual(a.free_disposal, b.free_disposal)
self.assertEqual(a.goods, b.goods)
self.assertEqual(len(a.bidders), len(a.bidders))
for i, bidder_a in enumerate(a.bidders):
bidder_b = b.bidders[i]
self.assertBiddersEqual(bidder_a, bidder_b)
def test_bidder_json(self):
b = Bidder('seller')
b.add_bid(-3, {'a': 1})
b.add_bid(-5, {'b': 1})
json_str = json.dumps(b, indent=4, cls=ObjectEncoder)
dct = json.loads(json_str)
decoded_bidder = decode_bidder(dct)
self.assertBiddersEqual(b, decoded_bidder)
def test_problem_json(self):
bidders = [Bidder('seller').add_bid(-3, {'a': 1})]
p = Problem(bidders=bidders, description='Test case', free_disposal=True)
json_str = json.dumps(p, indent=4, cls=ObjectEncoder)
p_dct = json.loads(json_str)
decoded_problem = decode_problem(p_dct)
self.assertProblemsEqual(p, decoded_problem)
if __name__ == '__main__':
unittest.main()
|
py | 1a5177eab090131cbb904c5003d1d7b326c98308 | import simplejson
import string
import time
import traceback
import logging
import requests
ID="api" #this is our command identifier, so with conventional commands, this is the command name
permission=0 #Min permission required to run the command (needs to be 0 as our lowest command is 0)
import collections
def update(d, u):
for k, v in u.iteritems():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
class ModDotaAPI:
def __init__(self):
self.requests_session = requests.Session()
self.requests_session.headers = {
'User-agent': 'ModDota_API/1.X (+http://github.com/SinZ163/ModDotaFAQ)'
}
self.ReadDump()
def fetch_page(self, url, timeout=10, decode_json=True):
request = self.requests_session.get(url, timeout=timeout)
if decode_json:
return request.json()
else:
return request.text
def ReadDump(self):
serverInfo = self.fetch_page("https://raw.githubusercontent.com/ModDota/API/master/_data/lua_server.json")
#serverInfo = self.fetch_page("https://raw.githubusercontent.com/SinZ163/TestTracking/master/lua_server.json")
communityInfo = self.fetch_page("https://raw.githubusercontent.com/ModDota/API/master/_data/override_lua_server.json")
self.lua_server = serverInfo.copy()
self.lua_server = update(self.lua_server, communityInfo)
#TODO: add community db here and inject into lua_server
MDAPI_logger = logging.getLogger("MDAPI_Reborn")
modDotaAPI = ModDotaAPI()
#called when the bot has loaded everything and is connected
def __initialize__(self, Startup):
pass
#the command entry point from '=api" or something
def execute(self, name, params, channel, userdata, rank):
msg = " ".join(params)
functions = []
output = channel
#TODO: add logic to figure out which dump we want
for Class, ClassInfo in modDotaAPI.lua_server.iteritems():
for FunctionName, FunctionInfo in ClassInfo["functions"].iteritems():
#print(FunctionName)
if msg.lower() in FunctionName.lower():
MDAPI_logger.info("Found a method, "+FunctionName)
functions.append((Class, FunctionName))
if len(functions) == 0:
self.sendMessage(channel, "No results found.")
if len(functions) > 5:
#pm it
if name == "DB" or len(functions) > 20:
self.sendMessage(channel, "Too many functions matched ("+str(len(functions))+"). Please refine your search.")
return
else:
output = name
self.sendMessage(channel, "Too many functions matched ("+str(len(functions))+"). replying privately.")
colBold = chr(2)
colItalics = chr(29)
colGreen = chr(3)+"03"
colBlue = chr(3)+"02"
colBrown = chr(3)+"07"
colEnd = chr(3)
for function in functions:
className = function[0]
functionName = function[1]
functionInfo = modDotaAPI.lua_server[className]["functions"][functionName]
argInfo = ""
description = ""
if "args" in functionInfo:
if len(functionInfo["args"]) > 0:
#We have argument info
for index, arg in enumerate(functionInfo["args"]):
if index > 0:
argInfo = argInfo + ", "
if "arg_names" in functionInfo:
if len(functionInfo["arg_names"]) > 0:
#we have argument info with named variables
argInfo = argInfo + u"{nullable}{colBrown}{argType}{colBrown}{nullable} {colBlue}{argName}{colEnd}".format(
colBrown = colBrown,
colBlue = colBlue,
colEnd = colEnd,
argType = arg,
argName = functionInfo["arg_names"][index],
nullable = colItalics if "?" in arg else ""
)
continue
argInfo = argInfo + u"{nullable}{colBrown}{argType}{colEnd}{nullable}".format(
colBrown = colBrown,
colEnd = colEnd,
argType = arg,
nullable = colItalics if "?" in arg else ""
)
if argInfo != "":
argInfo = " " + argInfo + " "
if "description" in functionInfo:
description = "{colGreen} -- {description}{colEnd}".format(
description = functionInfo["description"],
colGreen = colGreen,
colEnd = colEnd
)
#self.sendMessage(output, "["+method[0]+"] "+modDotaAPI.db[method[0]]["methods"][method[1]]["return"] + " " + method[1] + colBold+"(" + colBold + msg + colBold+")" + colBold + comment)
self.sendMessage(output, "[{colBlue}{className}{colEnd}] {colBrown}{returnType}{colEnd} {name}{bold}({bold}{argInfo}{bold}){bold} {description}".format(
bold = colBold,
italic = colItalics,
colBlue = colBlue,
colBrown = colBrown,
colEnd = colEnd,
className = className,
name = functionName,
returnType = functionInfo["return"],
argInfo = argInfo,
description = description
))
|
py | 1a5179c578a67a4a0cf99041b53c3f76936a11d3 | from django.contrib import admin
from .models import Contact
from .models import Project
from .models import Info
from .models import StudyDay
# Register your models here.
admin.site.register(Contact)
admin.site.register(Project)
admin.site.register(Info)
admin.site.register(StudyDay)
|
py | 1a517bbf912c99bb738d660de818d91398708823 | # -*- coding: utf-8 -*-
#!/usr/bin/python
import os
import sys
import json
import argparse
import re
import requests
import codecs
from configparser import ConfigParser
from distutils.version import LooseVersion
# Hackety Hack. Puc mantenir el prestapyt com a submodul i buscar la lib dins d'aquest.
# git submodule add https://github.com/prestapyt/prestapyt.git
# El paquet disponible a pip no es prou nou per prestashop 1.7
sys.path.insert(1, 'prestapyt/')
from prestapyt import PrestaShopWebServiceDict
def get_fb_catalog(ps, f, c):
plist = ps.get('products',options={'filter[active]': '1'})
lang_id = c.get('ps','lang_id')
base_url = c.get('ps', 'base_url')
print("PROCESSING: {}".format(len(plist['products']['product'])))
# field header
f.write(u'id\ttitle\tdescription\tlink\timage_link\tavailability\tprice\tcurrency\tgoogle_product_category\tbrand\tage_group\tgender\tcondition\n')
for product in plist['products']['product']:
prod = ps.get('products/'+product['attrs']['id'])
if prod['product']['active'] == '0':
print("Product not active: "+product['attrs']['id'])
continue
# id - prod['product']['reference']
id = prod['product']['reference']
# title - for name in prod['product']['name']['language']: name['value'] if lang == 'ES' else next
if isinstance(prod['product']['name']['language'], list):
for name in prod['product']['name']['language']:
if name['attrs']['id'] == lang_id:
title = name['value']
else:
title = prod['product']['name']['language']['value']
# description - for desc prod['product']['description_short']['language']: desc['value'] if lang == 'ES' else next
if isinstance(prod['product']['description_short']['language'], list):
for name in prod['product']['description_short']['language']:
if name['attrs']['id'] == lang_id:
description = re.sub('<[^>]+?>', '', name['value'])
else:
description = re.sub('<[^>]+?>', '', prod['product']['description_short']['language']['value'])
# link -
if isinstance(prod['product']['link_rewrite']['language'], list):
for ln in prod['product']['link_rewrite']['language']:
if ln['attrs']['id'] == lang_id:
link = "{0}/{1}-{2}.html".format(base_url, product['attrs']['id'], ln['value'])
else:
link = "{0}/{1}-{2}.html".format(base_url, product['attrs']['id'], prod['product']['link_rewrite']['language']['value'])
# image_link
r = requests.get("{0}/get-image.php?imageid={1}".format(base_url, prod['product']['id_default_image']['value']))
image_link = r.text
# availability -
# TODO: stocks available quan hi ha més d'una combinació
# si stock_available es una llista vol dir que hi ha més d'una combinació.
# de moment assumim stock = len de la llista
if isinstance(prod['product']['associations']['stock_availables']['stock_available'], list):
stocks_avail['stock_available']['quantity'] = str(len(prod['product']['associations']['stock_availables']['stock_available']))
else:
stocks_avail = ps.get('stock_availables/'+prod['product']['associations']['stock_availables']['stock_available']['id'])
print("ID: "+id+" Quantity: "+stocks_avail['stock_available']['quantity'])
if int(stocks_avail['stock_available']['quantity']) > 0:
print("in stock")
#if lang_id == '1':
avail = 'in stock'
#else:
# avail = 'disponible'
else:
print("out of stock")
#if lang_id == '1':
avail = 'out of stock'
#else:
# avail = 'agotado'
# price
price = "{:.2f}".format(float(prod['product']['price'])*1.21)
currency = "EUR"
# google_product_category
catemap = dict(c.items('catemap'))
try:
gpc = catemap[ prod['product']['id_category_default'] ]
except KeyError:
print("Key ERROR - Product ID: {0} Category ID: {1}".format(prod['product']['id'], prod['product']['id_category_default']))
quit()
# brand - from config
brand = c.get('general', 'brand')
# age_group - adult
age_group = 'adult'
# TODO: color
#color = ''
# gender - female
gender = 'female'
# TODO: shipping
# condition - new
condition = 'new'
# with shipping info
#print("{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\t{12}\t{13}".format(id, title, description, link, image_link, avail, price, gpc, brand, age_group, color, gender, shipping, condition))
# without shipping info, color
f.write(u'{0}\t"{1}"\t"{2}"\t{3}\t{4}\t{5}\t{6}\t{7}\t{8}\t{9}\t{10}\t{11}\t{12}\n'.format(id, title, description, link, image_link, avail, price, currency, gpc, brand, age_group, gender, condition))
return
if __name__ == '__main__':
try:
basedir = sys.argv[1]
except IndexError:
basedir = '.'
config = ConfigParser()
config.read(basedir+'/config.ini')
file = codecs.open("{0}/{1}-{2}.tsv".format(config.get('report','folder_name'), config.get('report','file_name'), config.get('report','lang')), "w", "utf-8-sig")
ps = PrestaShopWebServiceDict(config.get('ps', 'api_url'), config.get('ps', 'token'))
get_fb_catalog(ps, file, config)
file.close() |
py | 1a517cb07a7579d87a5b4c75d4a014fcfcd5408a | import requests
import urllib.request
import time
import urllib
import re
import csv
import sys
from bs4 import BeautifulSoup
def uni_montreal():
url = "https://diro.umontreal.ca/english/departement-directory/professors/"
r = requests.get(url) # request to url
# getting the soup by parsing the html parsel to text to request r
soup = BeautifulSoup(r.text, "html5lib")
# print(soup.prettify)
# file initialization to write
file_name = sys.argv[0]
# file_name = file_name[4:]
txt_file = file_name.replace(".py", ".txt")
f = open(txt_file, "w")
csv_file = file_name.replace(".py", ".csv")
f2 = open(csv_file, "w")
csvwriter = csv.writer(f2)
overall_file = "all_emails.csv"
f3 = open(overall_file, "a")
csvwriter2 = csv.writer(f3)
u_name = "University of Montreal"
country = "Canada"
grabage_emails = []
var = [f, csvwriter, csvwriter2, u_name, country, grabage_emails]
# d gives the array of all profs on the dept homepage
dd = soup.find('div', {'class':'list_individus'})
d = dd.find_all('div', {'class':'individu with-affiliations with-expertises'})
#iterating for every prof
for i in d:
h4 = i.find('h4', {'class':"nom-prenom"})
a = h4.find('a')
if a == None:
continue
link = "https://diro.umontreal.ca"+a.get('href')
name = (a.get_text()).strip()
name = " ".join(name.split())
# print(name, link)
# check if link is valid on Not
try:
prof_resp = requests.get(link)
except:
continue
div_mail = i.find('div', {'class':'courriel'})
a_mail = div_mail.find('a')
if a_mail != None:
email = a_mail.get_text()
else:
email = "Not Found"
print(name, link)
filterandgetEmail(var, grabage_emails, name, link, email, prof_resp)
f.close()
f2.close()
f3.close()
print("Finished")
def filterandgetEmail(var, grabage_emails, name, link, email, prof_resp):
f = var[0]
csvwriter = var[1]
csvwriter2 = var[2]
u_name = var[3]
country = var[4]
keyword_list = ['Computer Architecture','hardware and system architecture', 'hardware and architecture', 'embedded system', 'computer organization','VLSI Design', 'Computer and System',
'multiprocessor architecture']
flag = 1
prof_soup = BeautifulSoup(prof_resp.text, "html.parser")
research_text = prof_soup.text
for pattern in keyword_list:
if re.search(pattern, research_text, re.IGNORECASE):
flag = 0
if email != 'Not Found':
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
new_emails = set(re.findall(r"[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,4}", prof_resp.text))
for eemail in grabage_emails:
if eemail in new_emails:
new_emails.remove(eemail)
if len(new_emails) == 0:
email = "Email Not Found"
f.write(link + '\n' + name + "\t"+ email + "\n")
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
else:
# f.write(link + '\n' + name)
for email in new_emails:
f.write(link + '\n' + name + '\t\t' + email + '\n')
csvwriter.writerow([u_name, country, name, email, link])
csvwriter2.writerow([u_name, country, name, email, link])
# f.write("\n")
f.write(pattern)
f.write('\n\n')
break
if __name__ == '__main__':
uni_montreal()
|
py | 1a517d65869f9a42035ade0bd7fe5498894da077 | # Friends again #
# March 15, 2019
# By Robin Nash
import sys
def getCircle(friend, pairs, circle):
circle.append(pairs[circle[-1]])
last = circle[-1]
if last == circle[0]:
return circle[:-1]
if last in circle[:-1]:
return circle[circle.index(last):-1]
return getCircle(friend, pairs, circle)
def getDistance(a,b,circle):
ai = circle.index(a)
bi = circle.index(b)
d1 = bi-ai-1
d2 = len(circle) - bi + ai -1
return min([d1, d2])
data = ['9', '2 3', '1 2', '3 1', '10 11', '100 10', '11 100', '12 100', '13 14','14 100', '1 100', '2 3', '12 14']
##data = sys.stdin.read().strip().split('\n')[:-1]
friendsNum = int(data.pop(0))
pairs = {f:ff for f,ff in [pair.split() for pair in data[:friendsNum]]}
checkPairs = [pair.split() for pair in data[friendsNum:]]
data.clear()
circles = []
append = circles.append
for f,ff in pairs.items():
if circles == [] or True not in [f in c and ff in c for c in circles]:
circle = getCircle(f,pairs,[f])
append(circle)
##sample = [(1,2,[2,3,4,1]), (1,2,[4,5,1,0,0,0,2])]
##for a,b,circle in sample:
## a,b = sorted([circle.index(a),circle.index(b)])
## a,b = circle[a],circle[b]
## print(getDistance(a,b,circle))
for a,b in checkPairs:
try:
circle = [c for c in circles if a and b in circle][0]
distance = getDistance(a,b,circle)
print('Yes',distance)
except ValueError:
print('No')
except IndexError:
print("No")
print(circles)
#1552666028.0 |
py | 1a517e37d84b845776fc84489ee9eed084c96a99 | from rest_framework.permissions import BasePermission
from environments.models import Environment
from environments.permissions.constants import UPDATE_FEATURE_STATE
from projects.models import Project
ACTION_PERMISSIONS_MAP = {
"retrieve": "VIEW_PROJECT",
"destroy": "DELETE_FEATURE",
"list": "VIEW_PROJECT",
"create": "CREATE_FEATURE",
"add_owners": "CREATE_FEATURE",
"remove_owners": "CREATE_FEATURE",
"update": "CREATE_FEATURE",
"partial_update": "CREATE_FEATURE",
}
class FeaturePermissions(BasePermission):
def has_permission(self, request, view):
try:
project_id = view.kwargs.get("project_pk") or request.data.get("project")
project = Project.objects.get(id=project_id)
if view.action in ACTION_PERMISSIONS_MAP:
return request.user.has_project_permission(
ACTION_PERMISSIONS_MAP.get(view.action), project
)
# move on to object specific permissions
return view.detail
except Project.DoesNotExist:
return False
def has_object_permission(self, request, view, obj):
# map of actions and their required permission
if view.action in ACTION_PERMISSIONS_MAP:
return request.user.has_project_permission(
ACTION_PERMISSIONS_MAP[view.action], obj.project
)
if view.action == "segments":
return request.user.is_project_admin(obj.project)
return False
class FeatureStatePermissions(BasePermission):
def has_permission(self, request, view):
try:
if view.action == "create" and request.data.get("environment"):
environment = Environment.objects.get(id=request.data["environment"])
return request.user.has_environment_permission(
UPDATE_FEATURE_STATE, environment
)
# - detail view means we can just defer to object permissions
# - list view means we just need to filter the objects based on permissions
return view.detail or view.action == "list"
except Environment.DoesNotExist:
return False
def has_object_permission(self, request, view, obj):
return request.user.has_environment_permission(
UPDATE_FEATURE_STATE, environment=obj.environment
)
class EnvironmentFeatureStatePermissions(BasePermission):
def has_permission(self, request, view):
if view.action == "create":
environment_api_key = view.kwargs.get("environment_api_key")
if not environment_api_key:
return False
environment = Environment.objects.get(api_key=environment_api_key)
return request.user.has_environment_permission(
permission=UPDATE_FEATURE_STATE, environment=environment
)
if view.action == "list":
return True
# move on to object specific permissions
return view.detail
def has_object_permission(self, request, view, obj):
return request.user.has_environment_permission(
permission=UPDATE_FEATURE_STATE, environment=obj.environment
)
class IdentityFeatureStatePermissions(EnvironmentFeatureStatePermissions):
pass
|
py | 1a517f18a179520f38a98bc060d388497b74d525 | def get_error_message(attribute_name: str, attempted_source: str, allowed_source: str) -> str:
return rf"Attempting to access attribute '{attribute_name}' from invalid scope." \
rf" Access only allowed from {allowed_source} but access from {attempted_source} \([0-9\-]*\) attempted"
def get_access_child_from_parent_message(attribute_name: str) -> str:
return get_error_message(attribute_name, "Parent", "Child")
def get_access_parent_from_child_message(attribute_name: str) -> str:
return get_error_message(attribute_name, "Child", "Parent")
|
py | 1a517f9d73566510fdb86e62403c47e38e10a19b | import unittest
import sys
sys.path.append("../src/")
from merge_sort_without_sentinel import merge_sort
class TestMergeSortWithoutSentinel(unittest.TestCase):
def test_merge_sort_already_sorted(self):
A = [1, 2, 3, 4, 5, 6]
merge_sort(A)
self.assertEqual(A, [1, 2, 3, 4, 5, 6])
def test_merge_sort(self):
A = [5, 2, 4, 6, 1, 3]
merge_sort(A)
self.assertEqual(A, [1, 2, 3, 4, 5, 6])
if __name__ == "__main__":
unittest.main()
|
py | 1a517ffa5d7f5204cd03f9893e5a38148e11354a | # -*- coding: utf-8 -*-
import sys
import warnings
from pathlib import Path
PROJECT_DIR = Path(__file__).resolve().parent
if str(PROJECT_DIR.parent) not in sys.path:
sys.path.insert(0, str(PROJECT_DIR.parent))
warnings.filterwarnings(
"ignore", category=FutureWarning, module="sklearn.utils.deprecation"
)
from common import *
warnings.filterwarnings(
"always", category=FutureWarning, module="sklearn.utils.deprecation"
)
figure_saver = PaperFigureSaver(
directories=Path("~") / "tmp" / PROJECT_DIR.parent.name / PROJECT_DIR.name,
debug=True,
)
map_figure_saver = figure_saver(**map_figure_saver_kwargs)
for fig_saver in (figure_saver, map_figure_saver):
fig_saver.experiment = PROJECT_DIR.name
memory = get_memory("__".join((PROJECT_DIR.parent.name, PROJECT_DIR.name)), verbose=100)
CACHE_DIR = Path(DATA_DIR) / ".pickle" / PROJECT_DIR.parent.name / PROJECT_DIR.name
data_split_cache = SimpleCache("data_split", cache_dir=CACHE_DIR)
save_ale_2d_and_get_importance = partial(
save_ale_2d_and_get_importance, figure_saver=figure_saver
)
save_pdp_plot_2d = partial(save_pdp_plot_2d, figure_saver=figure_saver)
save_ale_plot_1d_with_ptp = partial(
save_ale_plot_1d_with_ptp, figure_saver=figure_saver
)
save_pdp_plot_1d = partial(
save_pdp_plot_1d, CACHE_DIR=CACHE_DIR, figure_saver=figure_saver
)
multi_ale_plot_1d = partial(multi_ale_plot_1d, figure_saver=figure_saver)
# Number of SHAP jobs.
try:
X_train, X_test, y_train, y_test = data_split_cache.load()
# Maximum job array index (inclusive).
shap_params["max_index"] = math.floor(X_train.shape[0] / shap_params["job_samples"])
# Upper bound only.
shap_params["total_samples"] = (shap_params["max_index"] + 1) * shap_params[
"job_samples"
]
except NoCachedDataError:
warnings.warn(
"Processed data not found, not calculating 'max_index' or 'total_samples'."
)
# Upper bound only.
shap_interact_params["total_samples"] = (
shap_interact_params["max_index"] + 1
) * shap_interact_params["job_samples"]
# SHAP cache.
shap_cache = SimpleCache("shap_cache", cache_dir=CACHE_DIR / Path("shap"))
shap_interact_cache = SimpleCache(
"shap_interact_cache", cache_dir=CACHE_DIR / Path("shap_interaction")
)
interact_data_cache = SimpleCache("SHAP_interact_data", cache_dir=CACHE_DIR)
# Redefine the common functionality for our use-case - no shifted variables.
_common_get_data = get_data
_common_get_offset_data = get_offset_data
selected_features = (
"Dry Day Period",
"FAPAR 50P 4k",
"Max Temp",
"VOD Ku-band 50P 4k -3 Month",
"LAI 50P 4k -1 Month",
"Dry Day Period -1 Month",
"Dry Day Period -3 Month",
"SIF 50P 4k",
"LAI 50P 4k -3 Month",
"VOD Ku-band 50P 4k -1 Month",
"VOD Ku-band 50P 4k",
"FAPAR 50P 4k -1 Month",
"pftCrop",
"SIF 50P 4k -9 Month",
"popd",
)
@wraps(_common_get_data)
def get_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
@wraps(_common_get_offset_data)
def get_offset_data(*args, **kwargs):
(
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
) = _common_get_offset_data(*args, **kwargs)
# We need to subset exog_data, filled_datasets, and masked_datasets.
exog_data = exog_data[list(selected_features)]
filled_datasets = filled_datasets.select_variables(selected_features)
masked_datasets = masked_datasets.select_variables(selected_features)
return (
endog_data,
exog_data,
master_mask,
filled_datasets,
masked_datasets,
land_mask,
)
def get_model(X_train=None, y_train=None):
return common_get_model(cache_dir=CACHE_DIR, X_train=X_train, y_train=y_train)
model_score_cache = SimpleCache("model_scores", cache_dir=CACHE_DIR)
@model_score_cache
def get_model_scores(rf=None, X_test=None, X_train=None, y_test=None, y_train=None):
return common_get_model_scores(rf, X_test, X_train, y_test, y_train)
|
py | 1a5181bdfbee93f6a15a9d727b1e87000a82f35c | import sys
import pytest
from dagster import file_relative_path, lambda_solid, pipeline, repository
from dagster.core.definitions.repository_definition import RepositoryData
from dagster.core.test_utils import instance_for_test
from dagster.core.types.loadable_target_origin import LoadableTargetOrigin
from dagster.core.workspace import WorkspaceProcessContext
from dagster.core.workspace.load_target import GrpcServerTarget
from dagster.grpc.server import GrpcServerProcess
def define_do_something(num_calls):
@lambda_solid(name="do_something_" + str(num_calls))
def do_something():
return num_calls
return do_something
@lambda_solid
def do_input(x):
return x
def define_foo_pipeline(num_calls):
do_something = define_do_something(num_calls)
@pipeline(name="foo_" + str(num_calls))
def foo_pipeline():
do_input(do_something())
return foo_pipeline
class TestDynamicRepositoryData(RepositoryData):
def __init__(self):
self._num_calls = 0
# List of pipelines changes everytime get_all_pipelines is called
def get_all_pipelines(self):
self._num_calls = self._num_calls + 1
return [define_foo_pipeline(self._num_calls)]
@repository
def bar_repo():
return TestDynamicRepositoryData()
@pytest.fixture(name="instance")
def instance_fixture():
with instance_for_test() as instance:
yield instance
@pytest.fixture(name="workspace_process_context")
def workspace_process_context_fixture(instance):
loadable_target_origin = LoadableTargetOrigin(
executable_path=sys.executable,
python_file=file_relative_path(__file__, "test_custom_repository_data.py"),
)
server_process = GrpcServerProcess(loadable_target_origin=loadable_target_origin)
try:
with server_process.create_ephemeral_client(): # shuts down when leaves this context
with WorkspaceProcessContext(
instance,
GrpcServerTarget(
host="localhost",
socket=server_process.socket,
port=server_process.port,
location_name="test",
),
) as workspace_process_context:
yield workspace_process_context
finally:
server_process.wait()
def test_repository_data_can_reload_without_restarting(workspace_process_context):
request_context = workspace_process_context.create_request_context()
repo_location = request_context.get_repository_location("test")
repo = repo_location.get_repository("bar_repo")
# get_all_pipelines called on server init twice, then on repository load, so starts at 3
# this is a janky test
assert repo.has_pipeline("foo_3")
assert not repo.has_pipeline("foo_1")
assert not repo.has_pipeline("foo_2")
external_pipeline = repo.get_full_external_pipeline("foo_3")
assert external_pipeline.has_solid_invocation("do_something_3")
# Reloading the location changes the pipeline without needing
# to restart the server process
workspace_process_context.reload_repository_location("test")
request_context = workspace_process_context.create_request_context()
repo_location = request_context.get_repository_location("test")
repo = repo_location.get_repository("bar_repo")
assert repo.has_pipeline("foo_4")
assert not repo.has_pipeline("foo_3")
external_pipeline = repo.get_full_external_pipeline("foo_4")
assert external_pipeline.has_solid_invocation("do_something_4")
def test_custom_repo_select_only_job():
assert not bar_repo.get_all_jobs()
|
py | 1a51829be0fff2137fcb928495840c9204681507 | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.networks.q_rnn_network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.networks import expand_dims_layer
from tf_agents.networks import q_rnn_network
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step
class QRnnNetworkTest(tf.test.TestCase):
def test_network_builds(self):
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
rnn_network = q_rnn_network.QRnnNetwork(tf_env.observation_spec(),
tf_env.action_spec())
first_time_step = tf_env.current_time_step()
q_values, state = rnn_network(
first_time_step.observation, first_time_step.step_type,
network_state=rnn_network.get_initial_state(batch_size=1)
)
self.assertEqual((1, 2), q_values.shape)
self.assertEqual((1, 40), state[0].shape)
self.assertEqual((1, 40), state[1].shape)
def test_network_can_preprocess_and_combine(self):
batch_size = 3
frames = 5
num_actions = 2
lstm_size = 6
states = (tf.random.uniform([batch_size, frames, 1]),
tf.random.uniform([batch_size, frames]))
preprocessing_layers = (
tf.keras.layers.Dense(4),
tf.keras.Sequential([
expand_dims_layer.ExpandDims(-1), # Convert to vec size (1,).
tf.keras.layers.Dense(4)]))
network = q_rnn_network.QRnnNetwork(
input_tensor_spec=(
tensor_spec.TensorSpec([1], tf.float32),
tensor_spec.TensorSpec([], tf.float32)),
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=tf.keras.layers.Add(),
lstm_size=(lstm_size,),
action_spec=tensor_spec.BoundedTensorSpec(
[1], tf.int32, 0, num_actions - 1))
empty_step_type = tf.constant(
[[time_step.StepType.FIRST] * frames] * batch_size)
q_values, _ = network(states, empty_step_type,
network_state=network.get_initial_state(batch_size))
self.assertAllEqual(
q_values.shape.as_list(), [batch_size, frames, num_actions])
# At least 2 variables each for the preprocessing layers.
self.assertGreater(len(network.trainable_variables), 4)
def test_network_can_preprocess_and_combine_no_time_dim(self):
batch_size = 3
num_actions = 2
lstm_size = 5
states = (tf.random.uniform([batch_size, 1]),
tf.random.uniform([batch_size]))
preprocessing_layers = (
tf.keras.layers.Dense(4),
tf.keras.Sequential([
expand_dims_layer.ExpandDims(-1), # Convert to vec size (1,).
tf.keras.layers.Dense(4)]))
network = q_rnn_network.QRnnNetwork(
input_tensor_spec=(
tensor_spec.TensorSpec([1], tf.float32),
tensor_spec.TensorSpec([], tf.float32)),
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=tf.keras.layers.Add(),
lstm_size=(lstm_size,),
action_spec=tensor_spec.BoundedTensorSpec(
[1], tf.int32, 0, num_actions - 1))
empty_step_type = tf.constant([time_step.StepType.FIRST] * batch_size)
q_values, _ = network(
states, empty_step_type,
network_state=network.get_initial_state(batch_size=batch_size))
# Processed 1 time step and the time axis was squeezed back.
self.assertAllEqual(
q_values.shape.as_list(), [batch_size, num_actions])
# At least 2 variables each for the preprocessing layers.
self.assertGreater(len(network.trainable_variables), 4)
def test_network_builds_stacked_cells(self):
env = suite_gym.load('CartPole-v0')
tf_env = tf_py_environment.TFPyEnvironment(env)
rnn_network = q_rnn_network.QRnnNetwork(
tf_env.observation_spec(), tf_env.action_spec(), lstm_size=(10, 5))
first_time_step = tf_env.current_time_step()
q_values, state = rnn_network(
first_time_step.observation, first_time_step.step_type,
network_state=rnn_network.get_initial_state(batch_size=1)
)
tf.nest.assert_same_structure(rnn_network.state_spec, state)
self.assertEqual(2, len(state))
self.assertEqual((1, 2), q_values.shape)
self.assertEqual((1, 10), state[0][0].shape)
self.assertEqual((1, 10), state[0][1].shape)
self.assertEqual((1, 5), state[1][0].shape)
self.assertEqual((1, 5), state[1][1].shape)
if __name__ == '__main__':
tf.test.main()
|
py | 1a5183489d6f93df542746b49c9da388f0d64b0f | # Copyright (c) 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import warnings
import fixtures
import six
from neutron.api.v2 import attributes
class AttributeMapMemento(fixtures.Fixture):
"""Create a copy of the resource attribute map so it can be restored during
test cleanup.
There are a few reasons why this is not included in a class derived
from BaseTestCase:
- Test cases may need more control about when the backup is
made, especially if they are not direct descendants of
BaseTestCase.
- Inheritance is a bit of overkill for this facility and it's a
stretch to rationalize the "is a" criteria.
"""
def _setUp(self):
# Shallow copy is not a proper choice for keeping a backup copy as
# the RESOURCE_ATTRIBUTE_MAP map is modified in place through the
# 0th level keys. Ideally deepcopy() would be used but this seems
# to result in test failures. A compromise is to copy one level
# deeper than a shallow copy.
self.contents_backup = {}
for res, attrs in six.iteritems(attributes.RESOURCE_ATTRIBUTE_MAP):
self.contents_backup[res] = attrs.copy()
self.addCleanup(self.restore)
def restore(self):
attributes.RESOURCE_ATTRIBUTE_MAP = self.contents_backup
class WarningsFixture(fixtures.Fixture):
"""Filters out warnings during test runs."""
warning_types = (
DeprecationWarning, PendingDeprecationWarning, ImportWarning
)
def _setUp(self):
self.addCleanup(warnings.resetwarnings)
for wtype in self.warning_types:
warnings.filterwarnings(
"always", category=wtype, module='^neutron\\.')
"""setup_mock_calls and verify_mock_calls are convenient methods
to setup a sequence of mock calls.
expected_calls_and_values is a list of (expected_call, return_value):
expected_calls_and_values = [
(mock.call(["ovs-vsctl", self.TO, '--', "--may-exist", "add-port",
self.BR_NAME, pname]),
None),
(mock.call(["ovs-vsctl", self.TO, "set", "Interface",
pname, "type=gre"]),
None),
....
]
* expected_call should be mock.call(expected_arg, ....)
* return_value is passed to side_effect of a mocked call.
A return value or an exception can be specified.
"""
import unittest
def setup_mock_calls(mocked_call, expected_calls_and_values):
return_values = [call[1] for call in expected_calls_and_values]
mocked_call.side_effect = return_values
def verify_mock_calls(mocked_call, expected_calls_and_values,
any_order=False):
expected_calls = [call[0] for call in expected_calls_and_values]
mocked_call.assert_has_calls(expected_calls, any_order=any_order)
def fail(msg=None):
"""Fail immediately, with the given message.
This method is equivalent to TestCase.fail without requiring a
testcase instance (usefully for reducing coupling).
"""
raise unittest.TestCase.failureException(msg)
class UnorderedList(list):
"""A list that is equals to any permutation of itself."""
def __eq__(self, other):
if not isinstance(other, list):
return False
return sorted(self) == sorted(other)
def __neq__(self, other):
return not self == other
|
py | 1a51838968431dad6240e235e49e6026f14e9fa4 | """
Simple iOS tests, showing accessing elements and getting/setting text from them.
"""
import unittest
import os
from random import randint
from appium import webdriver
from time import sleep
class SimpleIOSTests(unittest.TestCase):
def setUp(self):
# set up appium
app = os.path.abspath('../../apps/TestApp/build/release-iphonesimulator/TestApp-iphonesimulator.app')
self.driver = webdriver.Remote(
command_executor='http://127.0.0.1:4723/wd/hub',
desired_capabilities={
'app': app,
'platformName': 'iOS',
'platformVersion': '11.1',
'deviceName': 'iPhone 6'
})
def tearDown(self):
self.driver.quit()
def _populate(self):
# populate text fields with two random numbers
els = [self.driver.find_element_by_accessibility_id('TextField1'),
self.driver.find_element_by_accessibility_id('TextField2')]
self._sum = 0
for i in range(2):
rnd = randint(0, 10)
els[i].send_keys(rnd)
self._sum += rnd
def test_ui_computation(self):
# populate text fields with values
self._populate()
# trigger computation by using the button
self.driver.find_element_by_accessibility_id('ComputeSumButton').click()
# is sum equal ?
# sauce does not handle class name, so get fourth element
sum = self.driver.find_element_by_accessibility_id('Answer').text
self.assertEqual(int(sum), self._sum)
def test_scroll(self):
els = self.driver.find_elements_by_class_name('XCUIElementTypeButton')
els[5].click()
sleep(1)
try:
el = self.driver.find_element_by_accessibility_id('Allow')
el.click()
sleep(1)
except:
pass
el = self.driver.find_element_by_xpath('//XCUIElementTypeMap[1]')
location = el.location
self.driver.swipe(start_x=location['x'], start_y=location['y'], end_x=0.5, end_y=location['y'], duration=800)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(SimpleIOSTests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
py | 1a5183ddc9260dbb95ccc8c3854cce9f1c2afb7d | # Copyright (c) 2015 Infoblox Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import constants
from neutron.db import ipam_backend_mixin
from neutron.tests import base
class TestIpamBackendMixin(base.BaseTestCase):
def setUp(self):
super(TestIpamBackendMixin, self).setUp()
self.mixin = ipam_backend_mixin.IpamBackendMixin()
self.ctx = mock.Mock()
self.default_new_ips = (('id-1', '192.168.1.1'),
('id-2', '192.168.1.2'))
self.default_original_ips = (('id-1', '192.168.1.1'),
('id-5', '172.20.16.5'))
self.owner_non_router = constants.DEVICE_OWNER_DHCP
self.owner_router = constants.DEVICE_OWNER_ROUTER_INTF
def _prepare_ips(self, ips):
return [{'ip_address': ip[1],
'subnet_id': ip[0]} for ip in ips]
def _mock_slaac_subnet_on(self):
slaac_subnet = {'ipv6_address_mode': constants.IPV6_SLAAC,
'ipv6_ra_mode': constants.IPV6_SLAAC}
self.mixin._get_subnet = mock.Mock(return_value=slaac_subnet)
def _mock_slaac_subnet_off(self):
non_slaac_subnet = {'ipv6_address_mode': None,
'ipv6_ra_mode': None}
self.mixin._get_subnet = mock.Mock(return_value=non_slaac_subnet)
def _test_get_changed_ips_for_port(self, expected_change, original_ips,
new_ips, owner):
change = self.mixin._get_changed_ips_for_port(self.ctx,
original_ips,
new_ips,
owner)
self.assertEqual(expected_change, change)
def test__get_changed_ips_for_port(self):
new_ips = self._prepare_ips(self.default_new_ips)
original_ips = self._prepare_ips(self.default_original_ips)
expected_change = self.mixin.Changes(add=[new_ips[1]],
original=[original_ips[0]],
remove=[original_ips[1]])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_router)
def test__get_changed_ips_for_port_autoaddress(self):
new_ips = self._prepare_ips(self.default_new_ips)
original = (('id-1', '192.168.1.1'),
('id-5', '2000:1234:5678::12FF:FE34:5678'))
original_ips = self._prepare_ips(original)
self._mock_slaac_subnet_on()
expected_change = self.mixin.Changes(add=[new_ips[1]],
original=original_ips,
remove=[])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_non_router)
def _test_get_changed_ips_for_port_no_ip_address(self):
# IP address should be added if only subnet_id is provided,
# independently from auto_address status for subnet
new_ips = [{'subnet_id': 'id-3'}]
original_ips = []
expected_change = self.mixin.Changes(add=[new_ips[0]],
original=[],
remove=[])
self._test_get_changed_ips_for_port(expected_change, original_ips,
new_ips, self.owner_non_router)
def test__get_changed_ips_for_port_no_ip_address_no_slaac(self):
self._mock_slaac_subnet_off()
self._test_get_changed_ips_for_port_no_ip_address()
def test__get_changed_ips_for_port_no_ip_address_slaac(self):
self._mock_slaac_subnet_on()
self._test_get_changed_ips_for_port_no_ip_address()
def test__is_ip_required_by_subnet_for_router_port(self):
# Owner -> router:
# _get_subnet should not be called,
# expected True
self._mock_slaac_subnet_off()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_router)
self.assertTrue(result)
self.assertFalse(self.mixin._get_subnet.called)
def test__is_ip_required_by_subnet_for_non_router_port(self):
# Owner -> not router:
# _get_subnet should be called,
# expected True, because subnet is not slaac
self._mock_slaac_subnet_off()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_non_router)
self.assertTrue(result)
self.assertTrue(self.mixin._get_subnet.called)
def test__is_ip_required_by_subnet_for_non_router_port_and_slaac(self):
# Owner -> not router:
# _get_subnet should be called,
# expected False, because subnet is slaac
self._mock_slaac_subnet_on()
result = self.mixin._is_ip_required_by_subnet(self.ctx, 'id',
self.owner_non_router)
self.assertFalse(result)
self.assertTrue(self.mixin._get_subnet.called)
|
py | 1a518513e56bf268757aa76eccb911d66d3d4251 | """ Defines a few simple comps that are used in tests."""
import numpy as np
import openmdao.api as om
class DoubleArrayComp(om.ExplicitComponent):
"""
A fairly simple array component.
"""
def __init__(self):
super(DoubleArrayComp, self).__init__()
self.JJ = np.array([[1.0, 3.0, -2.0, 7.0],
[6.0, 2.5, 2.0, 4.0],
[-1.0, 0.0, 8.0, 1.0],
[1.0, 4.0, -5.0, 6.0]])
self.declare_partials('*', '*')
def setup(self):
# Params
self.add_input('x1', np.zeros([2]))
self.add_input('x2', np.zeros([2]))
# Unknowns
self.add_output('y1', np.zeros([2]))
self.add_output('y2', np.zeros([2]))
# Derivs
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
"""
Execution.
"""
outputs['y1'] = self.JJ[0:2, 0:2].dot(inputs['x1']) + \
self.JJ[0:2, 2:4].dot(inputs['x2'])
outputs['y2'] = self.JJ[2:4, 0:2].dot(inputs['x1']) + \
self.JJ[2:4, 2:4].dot(inputs['x2'])
def compute_partials(self, inputs, partials):
"""
Analytical derivatives.
"""
partials[('y1', 'x1')] = self.JJ[0:2, 0:2]
partials[('y1', 'x2')] = self.JJ[0:2, 2:4]
partials[('y2', 'x1')] = self.JJ[2:4, 0:2]
partials[('y2', 'x2')] = self.JJ[2:4, 2:4]
class NonSquareArrayComp(om.ExplicitComponent):
"""
A fairly simple array component.
"""
def __init__(self):
super(NonSquareArrayComp, self).__init__()
self.JJ = np.array([[1.0, 3.0, -2.0, 7.0],
[6.0, 2.5, 2.0, 4.0],
[-1.0, 0.0, 8.0, 1.0],
[1.0, 4.0, -5.0, 6.0]])
self.declare_partials('*', '*')
def setup(self):
# Params
self.add_input('x1', np.zeros([2]))
self.add_input('x2', np.zeros([2]))
# Unknowns
self.add_output('y1', np.zeros([3]))
self.add_output('y2', np.zeros([1]))
# Derivs
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
"""
Execution.
"""
outputs['y1'] = self.JJ[0:3, 0:2].dot(inputs['x1']) + \
self.JJ[0:3, 2:4].dot(inputs['x2'])
outputs['y2'] = self.JJ[3:4, 0:2].dot(inputs['x1']) + \
self.JJ[3:4, 2:4].dot(inputs['x2'])
def compute_partials(self, inputs, partials):
"""
Analytical derivatives.
"""
partials[('y1', 'x1')] = self.JJ[0:3, 0:2]
partials[('y1', 'x2')] = self.JJ[0:3, 2:4]
partials[('y2', 'x1')] = self.JJ[3:4, 0:2]
partials[('y2', 'x2')] = self.JJ[3:4, 2:4]
class TestExplCompDeprecated(om.ExplicitComponent):
"""
A component that adds variables in the __init__ function.
"""
def __init__(self):
super(TestExplCompDeprecated, self).__init__()
self.add_input('x1', np.zeros([2]))
self.add_output('y1', np.zeros([2]))
# Derivs
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
"""
Execution.
"""
outputs['y1'] = 2.*inputs['x1']
|
py | 1a51855d79e4bfe00d93ac71309327a37fc43997 | from django.db import models
from django.utils import timezone
from account.models import User
from django_mysql.models import JSONField, Model
class Post(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE)
title = models.CharField(max_length=100)
content = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
published_at = models.DateTimeField(blank = True, null = True)
def publish(self):
self.published_at = timezone.now()
self.save()
def __str__(self):
return self.title
class Project(models.Model):
project_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.project_name
class Blueprint(models.Model):
flowdata = JSONField()
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Template(models.Model):
template_name = models.CharField(max_length=255, unique=True)
blueprint = models.ForeignKey(Blueprint, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Experiment(models.Model):
title = models.CharField(max_length=255, unique=True)
blueprint = models.ForeignKey(Blueprint, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Library(models.Model):
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Tag(models.Model):
tag_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.tag_name
class Pin(models.Model):
experiment = models.ForeignKey(Experiment, on_delete=models.CASCADE)
tag = models.ForeignKey(Tag, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Type(models.Model):
type_name = models.CharField(max_length=255, unique=True)
concept = models.IntegerField(default=2)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.type_name
class Node(models.Model):
node_name = models.CharField(max_length=255)
typeid = models.ForeignKey(Type, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
node_image = models.ImageField(upload_to='images/',default='images/node_default.png')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.node_name
class Entity(models.Model):
node = models.ForeignKey(Node, on_delete=models.PROTECT,null=True,blank=True)
boxid = models.CharField(max_length=255)
blueprint = models.ForeignKey(Blueprint, on_delete=models.PROTECT)
is_finished = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
finished_at = models.DateTimeField(blank = True, null = True)
def finished(self):
self.finished_at = timezone.now()
self.save()
class Property(models.Model):
property_name = models.CharField(max_length=255, default="")
official = models.BooleanField(default=False)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.property_name
class Unit(models.Model):
symbol = models.CharField(max_length=255, default="")
base = models.BooleanField(default=False)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.symbol
class Quantity(models.Model):
unit = models.ForeignKey(Unit, on_delete=models.CASCADE)
property = models.ForeignKey(Property, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Figure(models.Model):
node = models.ForeignKey(Node, on_delete=models.CASCADE)
figure_name = models.CharField(max_length=255)
property_x = models.ForeignKey(Property, on_delete=models.PROTECT, related_name="property_x", blank = True, null = True)
property_y = models.ForeignKey(Property, on_delete=models.PROTECT, related_name="property_y", blank = True, null = True)
property_z = models.ForeignKey(Property, on_delete=models.PROTECT, related_name="property_z", blank = True, null = True)
datatype = models.IntegerField(default=0)
is_condition = models.BooleanField(default=False)#将来的に廃止予定
cluster = models.IntegerField(default=2)
editor = models.ForeignKey(User, on_delete=models.PROTECT,blank = True, null = True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.figure_name
class Datum(models.Model):
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
unit_x = models.ForeignKey(Unit, on_delete=models.PROTECT, related_name="unit_x", blank = True, null = True)
unit_y = models.ForeignKey(Unit, on_delete=models.PROTECT, related_name="unit_y", blank = True, null = True)
unit_z = models.ForeignKey(Unit, on_delete=models.PROTECT, related_name="unit_z", blank = True, null = True)
figure = models.ForeignKey(Figure, on_delete=models.PROTECT)
data = JSONField()
editor = models.ForeignKey(User, on_delete=models.PROTECT, related_name="editor")
is_deleted = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Metakey(models.Model):
key_name = models.CharField(max_length=255)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.key_name
class Product(models.Model):
product_name = models.CharField(max_length=255)
experiment = models.ForeignKey(Experiment, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.product_name
class Definition(models.Model):
product = models.ForeignKey(Product, on_delete=models.PROTECT)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Image(models.Model):
image_name = models.CharField(max_length=255)
image = models.ImageField(upload_to='images/')
cluster = models.IntegerField(default=2)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.image_name
class Video(models.Model):
video_name = models.CharField(max_length=255)
video_url = models.TextField()
cluster = models.IntegerField(default=2)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.video_name
class Item(models.Model):
item_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.item_name
class Metadata(models.Model):
figure = models.ForeignKey(Node, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.PROTECT)
values = JSONField()
editor = models.ForeignKey(User, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.value
class Detail(models.Model):
detail_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.detail_name
class Description(models.Model):
values = JSONField()
is_condition = models.BooleanField(default=False)#将来的に廃止予定
cluster = models.IntegerField(default=2)
entity = models.ForeignKey(Entity, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Default(models.Model):
node = models.ForeignKey(Node, on_delete=models.PROTECT)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
is_condition = models.BooleanField(default=False)#将来的に廃止予定
cluster = models.IntegerField(default=2)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Headline(models.Model):
headline_name = models.CharField(max_length=255, unique=True)
editor = models.ForeignKey(User, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.headline_name
class Sentence(models.Model):
value = models.TextField()
headline = models.ForeignKey(Headline, on_delete=models.PROTECT)
entity = models.ForeignKey(Entity, on_delete=models.CASCADE)
cluster = models.IntegerField(default=2)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class Explanation(models.Model):
value = models.TextField()
headline = models.ForeignKey(Headline, on_delete=models.PROTECT)
figure = models.ForeignKey(Figure, on_delete=models.PROTECT)
cluster = models.IntegerField(default=2)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
|
py | 1a518581c23a5dde47fab9c321accec60725e029 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a line item creative association for a creative
set.
To create creative sets, run create_creative_set.py. To create creatives, run
create_creatives.py. To determine which LICAs exist, run get_all_licas.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: LineItemCreativeAssociationService.createLineItemCreativeAssociations
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
CREATIVE_SET_ID = 'INSERT_CREATIVE_SET_ID_HERE'
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
def main(client, creative_set_id, line_item_id):
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201408')
# Create LICA for a creative set.
lica = {'creativeSetId': creative_set_id, 'lineItemId': line_item_id}
# Add LICA.
lica = lica_service.createLineItemCreativeAssociations([lica])
# Display results.
print (('LICA with line item ID \'%s\' and creative set ID \'%s\' was '
'created.') % (lica['lineItemId'], lica['creativeSetId']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CREATIVE_SET_ID, LINE_ITEM_ID)
|
py | 1a51858aea86a584eeac192dc46b3e543fb59149 | from flask import flash, url_for
from conekt.extensions import admin_required
from werkzeug.utils import redirect
from conekt import cache
from conekt.controllers.admin.controls import admin_controls
@admin_controls.route('/clear/cache')
@admin_required
def clear_cache():
"""
Touching this endpoint will clear the servers cache (all of it!).
:return: Redirect to admin controls
"""
try:
cache.clear()
except Exception as e:
flash('An error occurred while clearing the cache', 'danger')
else:
flash('Cache cleared', 'success')
return redirect(url_for('admin.controls.index')) |
py | 1a5187b7de9db7cbb89668da7c78849dff8f239d | import os
import sys
import argparse
import logging
from tqdm.notebook import tqdm
import time
import numpy as np
import matplotlib.pyplot as plt
import torch
import shutil
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import transformers
from config.configs import set_random_fixed, get_path_info
from data.dataloader import get_Finetune_dataloader_Atype, get_Finetune_dataloader_Btype
from data.tokenizer import Tokenizer
from util.utils import (load_metricfn, load_optimizer, load_scheduler, load_lossfn,
save_checkpoint, load_checkpoint,
time_measurement, count_parameters, initialize_weights)
from util.optim_scheduler import ScheduledOptim
from models.model import build_classification_model, build_regression_model
import wandb
class Finetune_Trainer():
def __init__(self, parser, task):
# set parser
self.args = parser.parse_args()
#initialize wandb
#wandb.init(name=task)
# save loss history to plot later on
self.training_history = []
self.validation_history = []
# set variables needed for training
self.n_epoch = self.args.epoch
self.train_batch_size = self.args.train_batch_size
self.display_step = self.args.display_step # training
self.val_batch_size = self.args.val_batch_size
self.test_batch_size = self.args.test_batch_size
self.display_examples = self.args.display_examples # testing
self.lr = self.args.init_lr
#self.eps = self.args.adam_eps
self.weight_decay = self.args.weight_decay
self.beta1 = self.args.adam_beta1
self.beta2 = self.args.adam_beta2
self.warmup_steps = self.args.warm_up
#self.factor = self.args.factor
#self.patience = self.args.patience
#self.clip = self.args.clip
self.language = self.args.language
self.max_len = self.args.max_len
self.vocab_size = self.args.vocab_size
self.device = self.args.device
self.pretrain_weightpath = os.path.join(os.getcwd(),'weights')
if os.path.isdir('finetune_weights'):
shutil.rmtree("finetune_weights")
self.weightpath = os.path.join(os.getcwd(),'finetune_weights')
self.final_weightpath = os.path.join(os.getcwd(),'final_finetune_weights')
self.best_pretrain_epoch = self.args.best_pretrain_epoch
# build dataloader
self.task = task
task_Atype = ['cola','sst2']
task_Btype = ['stsb','rte','mrpc','qqp','mnli']
self.task_Btype = ['stsb','rte','mrpc','qqp','mnli']
task_Btype_sentence = ['stsb','rte','mrpc']
task_Btype_question = ['qqp']
task_Btype_hypothesis = ['mnli']
if task in task_Atype:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Atype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
'glue', task, 'sentence', 'label',
None
)
elif task in task_Btype_sentence:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Btype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
self.args.dataset_name, self.args.dataset_type, 'sentence1', 'sentence2', 'label',
None
)
elif task in task_Btype_question:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Btype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
self.args.dataset_name, self.args.dataset_type, 'question1', 'question2', 'label',
None
)
elif task in task_Btype_hypothesis:
self.train_dataloader, self.val_dataloader, self.test_dataloader = get_Finetune_dataloader_Btype(
self.train_batch_size, self.val_batch_size, self.test_batch_size,
self.language, self.max_len,
self.args.dataset_name, self.args.dataset_type, 'premise', 'hypothesis', 'label',
None
)
else:
assert "The task you typed in is not supported!"
self.train_batch_num = len(self.train_dataloader)
self.val_batch_num = len(self.val_dataloader)
self.test_batch_num = len(self.test_dataloader)
self.num_training_steps = (self.train_batch_num) * (self.n_epoch)
self.t_total = self.train_batch_num * self.n_epoch
# load metric
if task == 'mnli':
self.metric = load_metricfn('matthews_corrcoef')
elif task == 'stsb':
self.metric = load_metricfn('pearson')
else:
self.metric = load_metricfn('accuracy_score')
# build model
if task in task_Atype:
self.model= build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'one')
elif task == 'stsb':
self.model = build_regression_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device)
else:
self.model = build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'two')
load_checkpoint(self.model, os.path.join(self.pretrain_weightpath,str(self.best_pretrain_epoch)+".pth"))
# build optimizer
self.optimizer = load_optimizer(self.model, self.lr, self.weight_decay,
self.beta1, self.beta2)
# build scheduler
self.optim_scheduler = ScheduledOptim(self.optimizer, self.args.model_dim, self.warmup_steps)
# build lossfn
if task=='stsb':
self.lossfn = load_lossfn('MSELoss',self.args.pad_idx) # Regression
else:
self.lossfn = load_lossfn('CrossEntropyLoss',self.args.pad_idx) # Classification
def train_test(self):
best_model_epoch, training_history, validation_history = self.finetune()
self.test(best_model_epoch)
self.plot(training_history, validation_history)
def finetune(self):
# set logging
logging.basicConfig(level=logging.WARNING)
# logging message
sys.stdout.write('#################################################\n')
sys.stdout.write('You have started training the model.\n')
print('Your model size is : ')
count_parameters(self.model)
sys.stdout.write('#################################################\n')
# set randomness of training procedure fixed
self.set_random(516)
# build directory to save to model's weights
self.build_directory()
# set initial variables for training, validation
train_batch_num = len(self.train_dataloader)
validation_batch_num = len(self.val_dataloader)
# set initial variables for model selection
best_model_epoch=0
best_model_score=0
best_model_loss =float('inf')
# save information of the procedure of training
training_history=[]
validation_history=[]
# predict when training will end based on average time
total_time_spent = 0
# start of looping through training data
for epoch_idx in range(self.n_epoch):
# measure time when epoch start
start_time = time.time()
sys.stdout.write('#################################################\n')
sys.stdout.write(f"Epoch : {epoch_idx+1} / {self.n_epoch}")
sys.stdout.write('\n')
sys.stdout.write('#################################################\n')
########################
#### Training Phase ####
########################
# switch model to train mode
self.model.train()
# set initial variables for training (inside epoch)
training_loss_per_epoch=0.0
training_acc_per_epoch = 0
# train model using batch gradient descent with Adam Optimizer
for batch_idx, batch in tqdm(enumerate(self.train_dataloader)):
# move batch of data to gpu
input_ids = batch['input_ids'] #[bs, 1, sl]
token_type_ids = batch['token_type_ids'] #[bs, 1, sl]
labels = batch['label'].to(torch.float) #[bs, 1]
# reshape input_ids and token_type_ids
if self.task in self.task_Btype:
reshaped_input_ids = input_ids.to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().cuda(reshaped_input_ids.device)
else:
reshaped_input_ids = input_ids.contiguous().permute(0,2,1).squeeze(2).to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().permute(0,2,1).squeeze(2).cuda(reshaped_input_ids.device)
# reshape input_ids and token_type_ids
reshaped_labels = labels.contiguous().squeeze(1).cuda(reshaped_input_ids.device)
# compute model output
# 1 sentence classification : Cola, SST2
# 2 sentence classification : RTE, MRPC, QQP, MNLI
# 2 sentence regression : STSB
model_output = self.model(reshaped_input_ids, reshaped_token_type_ids).squeeze() # [bs, 2] in classification, [bs, 1] in regression
train_pred = torch.tensor([1 if n >0 else 0 for n in model_output]).to(self.device)
training_acc_per_epoch += self.metric(train_pred.cpu().detach().numpy(), reshaped_labels.cpu().detach().numpy())
# print(model_output.float().type())
# print(model_output)
# print(reshaped_labels.type())
# print(reshaped_labels)
if batch_idx == 0:
print("##### train pred #####")
print(model_output)
print(reshaped_labels)
print("#"*len("##### train pred #####"))
# compute loss using model output and labels(reshaped ver)
loss = self.lossfn(model_output, reshaped_labels)
# clear gradients, and compute gradient with current batch
self.optimizer.zero_grad()
loss.backward()
# clip gradients
#torch.nn.utils.clip_grad_norm_(self.model.parameters(),self.clip)
# update gradients
self.optim_scheduler.step_and_update_lr()
# add loss to training_loss
training_loss_per_iteration = loss.item()
training_loss_per_epoch += training_loss_per_iteration
# Display summaries of training procedure with period of display_step
if ((batch_idx+1) % self.display_step==0) and (batch_idx>0):
sys.stdout.write(f"Training Phase | Epoch: {epoch_idx+1} | Step: {batch_idx+1} / {train_batch_num} | loss : {training_loss_per_iteration}")
sys.stdout.write('\n')
# save training loss of each epoch, in other words, the average of every batch in the current epoch
training_mean_loss_per_epoch = training_loss_per_epoch / train_batch_num
training_history.append(training_mean_loss_per_epoch)
training_acc_per_epoch = (training_acc_per_epoch/train_batch_num)*100
##########################
#### Validation Phase ####
##########################
# switch model to eval mode
self.model.eval()
# set initial variables for validation (inside epoch)
validation_loss_per_epoch=0.0
validation_score_per_epoch=0.0
# validate model using batch gradient descent with Adam Optimizer
for batch_idx, batch in tqdm(enumerate(self.val_dataloader)):
# move batch of data to gpu
input_ids = batch['input_ids'] #[bs, 1, sl]
token_type_ids = batch['token_type_ids'] #[bs, 1, sl]
labels = batch['label'].to(torch.float) #[bs, 1]
# reshape input_ids and token_type_ids
if self.task in self.task_Btype:
reshaped_input_ids = input_ids.to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().cuda(reshaped_input_ids.device)
else:
reshaped_input_ids = input_ids.contiguous().permute(0,2,1).squeeze(2).to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().permute(0,2,1).squeeze(2).cuda(reshaped_input_ids.device)
reshaped_labels = labels.contiguous().squeeze(1).cuda(reshaped_input_ids.device)
# compute model output
# 1 sentence classification : Cola, SST2
# 2 sentence classification : RTE, MRPC, QQP, MNLI
# 2 sentence regression : STSB
with torch.no_grad():
model_output = self.model(reshaped_input_ids, reshaped_token_type_ids).squeeze() # [bs, 2] in classification, [bs, 1] in regression
if batch_idx == 0:
print(model_output)
print(reshaped_labels)
# compute loss using model output and labels(reshaped ver)
loss = self.lossfn(model_output, reshaped_labels)
# add loss to training_loss
validation_loss_per_iteration = loss.item()
validation_loss_per_epoch += validation_loss_per_iteration
# reshape model output
reshaped_model_output = torch.tensor([1 if n >0 else 0 for n in model_output.squeeze()]).to(self.device)
# compute bleu score using model output and labels(reshaped ver)
validation_score_per_iteration = self.metric(reshaped_model_output.cpu().detach().numpy(), reshaped_labels.cpu().detach().numpy())*100
validation_score_per_epoch += validation_score_per_iteration
# save validation loss of each epoch, in other words, the average of every batch in the current epoch
validation_mean_loss_per_epoch = validation_loss_per_epoch / validation_batch_num
validation_history.append(validation_mean_loss_per_epoch)
# save validation score of each epoch, in other words, the average of every batch in the current epoch
validation_mean_score_per_epoch = validation_score_per_epoch / validation_batch_num
# Display summaries of validation result after all validation is done
sys.stdout.write(f"Validation Phase | Epoch: {epoch_idx+1} | loss : {validation_mean_loss_per_epoch} | score : {validation_mean_score_per_epoch}")
sys.stdout.write('\n')
# Model Selection Process using validation_mean_score_per_epoch
if (validation_mean_loss_per_epoch < best_model_loss):
best_model_epoch = epoch_idx+1
best_model_loss = validation_mean_loss_per_epoch
best_model_score = validation_mean_score_per_epoch
save_checkpoint(self.model, self.optimizer, epoch_idx,
os.path.join(self.weightpath,str(epoch_idx+1)+".pth"))
#wandb log
train_log_dict = {
"train/step": epoch_idx, # grows exponentially with internal wandb step
"train/loss": training_mean_loss_per_epoch, # x-axis is train/step
"train/accuracy": training_acc_per_epoch} # x-axis is train/step
val_log_dict ={
"val/loss": validation_mean_loss_per_epoch, # x-axis is internal wandb step
"val/accuracy":validation_mean_score_per_epoch
}
# wandb.log(train_log_dict)
# wandb.log(val_log_dict)
# measure time when epoch end
end_time = time.time()
# measure the amount of time spent in this epoch
epoch_mins, epoch_secs = time_measurement(start_time, end_time)
sys.stdout.write(f"Time spent in epoch {epoch_idx+1} is {epoch_mins} minuites and {epoch_secs} seconds\n")
# measure the total amount of time spent until now
total_time_spent += (end_time - start_time)
total_time_spent_mins = int(total_time_spent/60)
total_time_spent_secs = int(total_time_spent - (total_time_spent_mins*60))
sys.stdout.write(f"Total amount of time spent until epoch {epoch_idx+1} is {total_time_spent_mins} minuites and {total_time_spent_secs} seconds\n")
# calculate how more time is estimated to be used for training
#avg_time_spent_secs = total_time_spent_secs / (epoch_idx+1)
#left_epochs = self.n_epoch - (epoch_idx+1)
#estimated_left_time = avg_time_spent_secs * left_epochs
#estimated_left_time_mins = int(estimated_left_time/60)
#estimated_left_time_secs = int(estimated_left_time - (estimated_left_time_mins*60))
#sys.stdout.write(f"Estimated amount of time until epoch {self.n_epoch} is {estimated_left_time_mins} minuites and {estimated_left_time_secs} seconds\n")
# summary of whole procedure
sys.stdout.write('#################################################\n')
sys.stdout.write(f"Training and Validation has ended.\n")
sys.stdout.write(f"Your best model was the model from epoch {best_model_epoch+1} and scored {self.args.metric} score : {best_model_score} | loss : {best_model_loss}\n")
sys.stdout.write('#################################################\n')
return best_model_epoch, training_history, validation_history
def test(self, best_model_epoch):
# logging message
sys.stdout.write('#################################################\n')
sys.stdout.write('You have started testing the model.\n')
sys.stdout.write('#################################################\n')
# set randomness of training procedure fixed
self.set_random(516)
# build directory to save to model's weights
self.build_final_directory()
# loading the best_model from checkpoint
task_Atype = ['cola','sst2']
if self.task in task_Atype:
best_model= build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'one')
elif self.task == 'stsb':
best_model = build_regression_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device)
else:
best_model = build_classification_model(self.vocab_size, self.args.model_dim, self.args.hidden_dim,
self.max_len, self.args.num_layers, self.device, 'two')
load_checkpoint(best_model,
os.path.join(self.weightpath,str(best_model_epoch)+".pth"))
# set initial variables for test
test_batch_num = len(self.test_dataloader)
##########################
###### Test Phase ######
##########################
# switch model to eval mode
best_model.eval()
# set initial variables for validation (inside epoch)
test_score_per_epoch=0.0
test_score_tmp_list=[]
# validate model using batch gradient descent with Adam Optimizer
for batch_idx, batch in tqdm(enumerate(self.test_dataloader)):
# move batch of data to gpu
input_ids = batch['input_ids'] #[bs, 1, sl]
token_type_ids = batch['token_type_ids'] #[bs, 1, sl]
labels = batch['label'] #[bs, 1]
# reshape input_ids and token_type_ids
reshaped_input_ids = input_ids.contiguous().permute(0,2,1).squeeze(2).to(self.device)
reshaped_token_type_ids = token_type_ids.contiguous().permute(0,2,1).squeeze(2).cuda(reshaped_input_ids.device)
reshaped_labels = labels.contiguous().squeeze(1).cuda(reshaped_input_ids.device)
# compute model output
# 1 sentence classification : Cola, SST2
# 2 sentence classification : RTE, MRPC, QQP, MNLI
# 2 sentence regression : STSB
with torch.no_grad():
model_output = self.model(reshaped_input_ids, reshaped_token_type_ids) # [bs, 2] in classification, [bs, 1] in regression
# reshape model output
reshaped_model_output = model_output.argmax(dim=1)
# compute bleu score using model output and labels(reshaped ver)
test_score_per_iteration = self.metric(reshaped_model_output.cpu().detach().numpy(), reshaped_labels.cpu().detach().numpy())*100
test_score_tmp_list.append(test_score_per_iteration)
test_score_per_epoch += test_score_per_iteration
# calculate test score
test_score_per_epoch = test_score_per_epoch / test_batch_num
# Evaluate summaries with period of display_steps
sys.stdout.write(f"Test Phase | Best Epoch: {best_model_epoch} | score : {test_score_per_epoch}\n")
save_checkpoint(self.model, self.optimizer, 1,
os.path.join(self.final_weightpath,"final_"+self.task+".pth"))
def plot(self, training_history, validation_history):
step = np.linspace(0,self.n_epoch,self.n_epoch)
plt.plot(step,np.array(training_history),label='Training')
plt.plot(step,np.array(validation_history),label='Validation')
plt.xlabel('number of epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
cur_path = os.getcwd()
plt.savefig(cur_path)
sys.stdout.write('Image of train, validation history saved as plot png!\n')
def build_directory(self):
# Making directory to store model pth
curpath = os.getcwd()
weightpath = os.path.join(curpath,'finetune_weights')
os.mkdir(weightpath)
def build_final_directory(self):
curpath = os.getcwd()
final_weightpath = os.path.join(curpath,'final_finetune_weights')
os.mkdir(final_weightpath)
def set_random(self, seed_num):
set_random_fixed(seed_num)
|
py | 1a51880820a9358e268861bf45bf5e37852d96e4 | # Copyright 2019 Lorenzo Cabrini
#
# Use of this source code is governed by an MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
from .service import Service, NotInstalled
from pigrizia.command.handler import NoSuchCommand
class Python(Service):
"""
This service represents Python and related tools (such as pip).
"""
def __init__(self, host, **kwargs):
super().__init__(host, **kwargs)
try:
self.version()
except NoSuchCommand:
raise NotInstalled()
def version(self, **kwargs):
"""
Gets the python version.
"""
return "{}.{}".format(self._major_version(), self._minor_version())
def _major_version(self, **kwargs):
cmd = "python3 -c 'import sys; print(sys.version_info.major)'"
ret, out, err = self._call(cmd, **kwargs)
return out[0]
def _minor_version(self, **kwargs):
cmd = "python3 -c 'import sys; print(sys.version_info.minor)'"
ret, out, err = self._call(cmd, **kwargs)
return out[0]
|
py | 1a5188b8ea6cdebc4144091330ad0cfef8b2b0dd | """
Django settings for snap_black_32465 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'snap_black_32465.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'snap_black_32465.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
py | 1a518963eef49f782e3b70a2cbe6946224a0c383 | from purbeurre.models import Product
from django.db.models import Q
from collections import Counter
class DatabaseSearch:
"""This class's job is to find categories concerned by the user research
and return the best products (in terms of nutri-score) of each category."""
def get_substitutes_per_category(self, search):
"""This method gets the list of concerned categories from the
get_categories_from_search() method, and if it's not empty,
builds a list of dictionnaries where each one has the category's name
as key and a list of the 6 best products as value."""
cat_subs_list = []
categories = self.get_categories_from_search(search)
if categories is None:
return None
else:
for i, cat in enumerate(categories.keys()):
cat_subs_list.append({cat: []})
rq = Product.objects.filter(
category__name=cat
).order_by('nutriscore')[:6]
for r in rq:
if r.nutriscore < 'd':
cat_subs_list[i][cat].append(r)
return cat_subs_list
def get_categories_from_search(self, search):
products = Product.objects.filter(Q(name__startswith=search.lower())
| Q(name__contains=search.lower()))
return self.keep_only_real_categories(products)
def keep_only_real_categories(self, products):
"""It can happen that a user's search contains a word that is in
a product that has nothing to do with the intended research, so this is
why this method is here : it deletes categories where the product was
found too few times compared to the category where it was the most
present. Actually, the decision ratio if a category is deleted or not
is 10%."""
categories_list = []
# For each product, adds its category name to a list
for product in products:
categories_list.append(product.category.name)
if len(categories_list) == 0:
return None
# Gets the category the most present
greatest = max(Counter(categories_list).values())
keys_to_del = []
# Builds and sorts a dict from the category the most present
# to the least
dict = Counter(categories_list)
the_dict = {k: v for k, v in sorted(dict.items(),
key=lambda item: item[1],
reverse=True)}
# Checks which categories are too few
for k, v in the_dict.items():
if v < greatest*0.1:
keys_to_del.append(k)
# Removes them
for key in keys_to_del:
del(the_dict[key])
return the_dict
|
py | 1a518a5a8deb2fcbd3a2332b30d46cb9850c026c | import datetime
from typing import Union, Optional
import discord
from discord.ext import commands
async def trigger_role(member: discord.Member, role: Union[discord.Role, int, str], guild: Optional[discord.Guild] = None) -> bool:
"""
Triggers a role on a member.
If member already has `role` then role is removed, if the member does not yet have the `role`, then it will be applied.
If role is a discord.Role then nothing is pulled from cache
If role is an integer then a discord.Role object is pulled from cache
if role is a string, then a discord.Role object is pulled from the `guild.roles` cache.
If `guild` is None, and `role` is int or str, then TypeError is raised
Throws:
TypeError, see above
ValueError if the `role` cannot be retrieved from cache
Whatever discord.Member.add_roles can throw
returns False if role was removed, True if it was added
"""
if type(role) == int:
role = discord.utils.get(guild.roles, id=role)
elif type(role) == str:
role = discord.utils.get(guild.roles, name=role)
elif not isinstance(role, discord.Role):
raise TypeError(f"Expected discord.Role, got {type(role)}")
if role is None:
raise ValueError("Role could not be retrieved from cache")
if guild is None and isinstance(role, (str, int, )):
raise TypeError(
"Expected a guild since role was str or int, but got None")
def has_role(member: discord.Member, role: discord.Role) -> bool:
"""Returns True if the member has the role, false if not"""
return role in member.roles
if has_role(member, role):
await member.remove_roles(role)
return False
await member.add_roles(role)
return True
async def simple_embed(ctx, text):
await ctx.send(
embed=discord.Embed(title=text,
colour=discord.Colour(0x00FF00), timestamp=datetime.datetime.utcnow())
.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
.set_footer(text="Success!"))
async def send_error(ctx, error):
await ctx.send(
embed=discord.Embed(title=str(error),
colour=discord.Colour(0xFF0000), timestamp=datetime.datetime.utcnow())
.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
.set_footer(text="Guild only!"))
|
py | 1a518ad40c99892c2585057fefc285bfbe7ba7ff | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""Server of Rock Paper Scissor game (2 players)."""
from socketserver import BaseRequestHandler, TCPServer
__author__ = 'fyabc'
ADDRESS = 'localhost', 20000
MSG_SIZE = 8192
class RpsHandler(BaseRequestHandler):
def handle(self):
print('Get connection from', self.client_address)
while True:
msg = self.request.recv(MSG_SIZE)
if not msg:
break
print('Message from {}: {}'.format(self.client_address, msg))
self.request.sendall(msg)
def main():
server = TCPServer(ADDRESS, RpsHandler)
server.serve_forever()
if __name__ == '__main__':
main()
|
py | 1a518bd244cb56d0352a032bf5da727eebbbb635 | #!/usr/bin/env python3
# coding=utf-8
# 导入相关系统包
import requests
import base64
import zipfile
import configparser
import socket
import ping3
import re
import os
from prettytable import PrettyTable
from colorama import init, Fore, Back, Style
class DrawTable(object):
'''工具类,打印表格格式化'''
def __init__(self):
self.table = []
header = [
"id",
"name",
"ping(ms)",
"port_status",
"server",
"port",
"method"
]
self.x = PrettyTable(header)
self.x.reversesort = True
def append(self,*args,**kwargs):
if(kwargs):
content=[
kwargs['id'],
kwargs['name'],
kwargs['ping'],
kwargs['port_status'],
kwargs['server'],
kwargs['port'],
kwargs['method'],
]
self.x.add_row(content)
def str(self):
return str(self.x)
init (autoreset=False)
class colored(object):
'''工具类,打印不同颜色字体'''
def red(self,s):
return Fore.LIGHTRED_EX + s + Fore.RESET
def green(self,s):
return Fore.LIGHTGREEN_EX + s + Fore.RESET
def yellow(self,s):
return Fore.LIGHTYELLOW_EX + s + Fore.RESET
def white(self,s):
return Fore.LIGHTWHITE_EX + s + Fore.RESET
def blue(self,s):
return Fore.LIGHTBLUE_EX + s + Fore.RESET
# 对base编码进行解码
def base64decode(text):
i = len(text) % 4
if i == 1:
text = text + '==='
elif i == 2:
text = text + '=='
elif i == 3:
text = text + '='
text = re.sub(r'_', '/', text)
text = re.sub(r'-', '+', text)
return base64.urlsafe_b64decode(text).decode()
# 通过订阅链接获取ssr url链接列表
def get_ssr_list(url):
color = colored()
url_colored = color.blue(url)
print('Being parsed the ssr url:', url_colored)
print('It will take a moment,Please be patient~~')
result = requests.get(url, headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3742.400 QQBrowser/10.5.3864.400'})
try:
ssr_result = base64decode(result.text)
except:
print(color.red("ssr subscribe url parsed failed,please check the ssr subscribe url~~"))
return None
else:
ssr_list = ssr_result.split('\n')
ssr_real_list = list()
for ssr in ssr_list:
if ssr:
ssr_real_list.append(ssr[6:])
return ssr_real_list
# 解析ssr url链接
def analysis_ssr_url(ssr_url):
try:
ssr_url = base64decode(ssr_url)
except:
pass
else:
ssr_dict = dict()
param_list = ssr_url.split(':')
if len(param_list) == 6:
server = param_list[0]
port = param_list[1]
protocol = param_list[2]
method = param_list[3]
obfs = param_list[4]
second_encryption_param_list = param_list[-1].split('/?')
password = base64decode(second_encryption_param_list[0])
encryption_param_list = second_encryption_param_list[-1].split('&')
for params in encryption_param_list:
key = params.split('=')[0]
value = params.split('=')[1]
if key == 'obfsparam':
key = 'obfs_param'
if key == 'protoparam':
key = 'protocol_param'
ssr_dict[key] = base64decode(value)
ssr_dict['server'] = server
ssr_dict['server_port'] = int(port)
ssr_dict['method'] = method
ssr_dict['obfs'] = obfs
ssr_dict['password'] = password
ssr_dict['ping'] = get_ping_speed(server, ssr_dict['remarks'])
ssr_dict['port_status'] = get_port_status(server, int(port))
ssr_dict['protocol'] = protocol
return ssr_dict
else:
color = colored()
print(color.yellow("Sorry, Not support ipv6 node~~"))
return None
# 生成ssr 信息列表字典
def generate_ssr_info_dict_list(ssr_url_list):
ssr_info_dict_list = list()
for ssr_url in ssr_url_list:
ssr_info_dict = analysis_ssr_url(ssr_url)
if ssr_info_dict:
ssr_info_dict_list.append(ssr_info_dict)
return ssr_info_dict_list
# 生成打印表格
def generate_ssr_display_table(ssr_info_dict_list):
table = DrawTable()
id = 1
for ssr_info_dict in ssr_info_dict_list:
color = colored()
if ssr_info_dict['ping'] == '∞':
ping = color.red(ssr_info_dict['ping'])
else:
ping = color.green(str(ssr_info_dict['ping']))
if ssr_info_dict['port_status'] == "×":
port_status = color.red(ssr_info_dict['port_status'])
else:
port_status = color.green(ssr_info_dict['port_status'])
table.append(
id = id,
name=ssr_info_dict['remarks'],
ping=ping,
port_status=port_status,
server=ssr_info_dict['server'],
port=ssr_info_dict['server_port'],
method=ssr_info_dict['method']
)
id = id + 1
return table.str()
# 获取ssr节点ping值
def get_ping_speed(server, remarks):
color = colored()
if check_ip_addr(server):
ping_speed = ping3.ping(server, timeout=5, unit='ms')
if ping_speed:
flag = color.green('√')
ping_speed = format(ping_speed, '.3f')
else:
flag = color.red('×')
ping_speed = '∞'
else:
flag = color.red('×')
ping_speed = '∞'
print("Testing ping:", remarks, server, flag)
return ping_speed
def check_ip_addr(server):
ipRe = "^(?=^.{3,255}$)[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62})+$"
if re.search(ipRe, server):
return True
else:
return False
# 获取用户家目录
def get_home_dir():
cmd = 'echo ${HOME}'
home_dir = os.popen(cmd).read().strip()
return home_dir
# 获取配置目录
def get_config_dir():
home_dir = get_home_dir()
config_dir = os.path.join(home_dir, '.ssr-command-client')
config_file_dir = os.path.join(config_dir, 'config.ini')
lock_file_dir = os.path.join(config_dir, '.config.lock')
return config_dir, config_file_dir, lock_file_dir
# 创建配置目录
def create_config_dir():
config_dir, config_file_dir, lock_file_dir = get_config_dir()
if os.path.exists(config_dir):
pass
else:
os.mkdir(config_dir)
if os.path.exists(config_file_dir):
pass
else:
with open(config_file_dir, 'w') as file:
file.write('')
# 下载ssr源码
def download_ssr_source():
url = 'https://github.com/TyrantLucifer/shadowsocksr/archive/3.2.2.zip'
result = requests.get(url, headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3742.400 QQBrowser/10.5.3864.400'})
config_dir, config_file_dir, lock_file_dir = get_config_dir()
shadowsocksr_zip_file_path = os.path.join(config_dir, 'shadowsocksr.zip')
with open(shadowsocksr_zip_file_path, "wb") as file:
file.write(result.content)
zipFile = zipfile.ZipFile(shadowsocksr_zip_file_path)
zipFile.extractall(config_dir)
os.chdir(config_dir)
os.rename(zipFile.namelist()[0], 'shadowsocksr')
# 初始化配置文件
def init_config_file():
config_dir, config_file_dir, lock_file_dir = get_config_dir()
server_json_file_path = os.path.join(config_dir, 'ssr-list.json')
config_json_file_path = os.path.join(config_dir, 'config.json')
shadowsocksr_client_path = os.path.join(config_dir, 'shadowsocksr/shadowsocks/local.py')
shadowsocksr_pid_file_path = os.path.join(config_dir, 'shadowsocksr.pid')
shadowsocksr_log_file_path = os.path.join(config_dir, 'shadowsocksr.log')
cf = configparser.ConfigParser()
cf.add_section('default')
cf.set('default', 'subscribe_url', 'https://raw.githubusercontent.com/satrom/V2SSR/master/SSR/Day.txt')
cf.set('default', 'server_json_file_path', server_json_file_path)
cf.set('default', 'config_json_file_path', config_json_file_path)
cf.set('default', 'local_address', '127.0.0.1')
cf.set('default', 'timeout', '300')
cf.set('default', 'workers', '1')
cf.set('default', 'shadowsocksr_client_path', shadowsocksr_client_path)
cf.set('default', 'shadowsocksr_pid_file_path', shadowsocksr_pid_file_path)
cf.set('default', 'shadowsocksr_log_file_path', shadowsocksr_log_file_path)
with open(config_file_dir, 'w+') as file:
cf.write(file)
with open(lock_file_dir, 'w') as lock_file:
lock_file.write('')
# 获取配置项
def get_config_value(key):
config_dir, config_file_dir, lock_file_dir = get_config_dir()
cf = configparser.ConfigParser()
cf.read(config_file_dir)
return cf.get('default', key)
# 设置配置项
def set_config_value(key, value):
config_dir, config_file_dir, lock_file_dir = get_config_dir()
cf = configparser.ConfigParser()
cf.read(config_file_dir)
cf.set('default', key, str(value))
with open(config_file_dir, 'w+') as file:
cf.write(file)
# 测试端口是否可以联通
def get_port_status(server, port):
server_addr = (server, port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(5)
try:
s.connect(server_addr)
except:
flag = "×"
else:
flag = "√"
s.close()
return flag
|
py | 1a518c3517eb1ad3b834e937e00ae53f8f34cbf2 | #!/usr/bin/env python3
#
# aiohttp documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 5 12:35:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import re
from pathlib import Path
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
_docs_path = os.path.dirname(__file__)
_version_path = os.path.abspath(
os.path.join(_docs_path, "..", "aiohttp", "__init__.py")
)
with open(_version_path, encoding="latin1") as fp:
try:
_version_info = re.search(
r'^__version__ = "'
r"(?P<major>\d+)"
r"\.(?P<minor>\d+)"
r"\.(?P<patch>\d+)"
r'(?P<tag>.*)?"$',
fp.read(),
re.M,
).groupdict()
except IndexError:
raise RuntimeError("Unable to determine version.")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# stdlib-party extensions:
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
# Third-party extensions:
"sphinxcontrib.asyncio",
"sphinxcontrib.blockdiag",
"sphinxcontrib.towncrier", # provides `towncrier-draft-entries` directive
]
try:
import sphinxcontrib.spelling # noqa
extensions.append("sphinxcontrib.spelling")
except ImportError:
pass
intersphinx_mapping = {
"python": ("http://docs.python.org/3", None),
"multidict": ("https://multidict.readthedocs.io/en/stable/", None),
"yarl": ("https://yarl.readthedocs.io/en/stable/", None),
"aiosignal": ("https://aiosignal.readthedocs.io/en/stable/", None),
"aiohttpjinja2": ("https://aiohttp-jinja2.readthedocs.io/en/stable/", None),
"aiohttpremotes": ("https://aiohttp-remotes.readthedocs.io/en/stable/", None),
"aiohttpsession": ("https://aiohttp-session.readthedocs.io/en/stable/", None),
"aiohttpdemos": ("https://aiohttp-demos.readthedocs.io/en/latest/", None),
"asynctest": ("https://asynctest.readthedocs.io/en/latest/", None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# -- Project information -----------------------------------------------------
github_url = "https://github.com"
github_repo_org = "aio-libs"
github_repo_name = "aiohttp"
github_repo_slug = f"{github_repo_org}/{github_repo_name}"
github_repo_url = f"{github_url}/{github_repo_slug}"
github_sponsors_url = f"{github_url}/sponsors"
project = github_repo_name
copyright = f"{project} contributors"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "{major}.{minor}".format(**_version_info)
# The full version, including alpha/beta/rc tags.
release = "{major}.{minor}.{patch}{tag}".format(**_version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
# pygments_style = 'sphinx'
# The default language to highlight source code in.
highlight_language = "python3"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Extension configuration -------------------------------------------------
# -- Options for extlinks extension ---------------------------------------
extlinks = {
"issue": (f"{github_repo_url}/issues/%s", "#"),
"pr": (f"{github_repo_url}/pull/%s", "PR #"),
"commit": (f"{github_repo_url}/commit/%s", ""),
"gh": (f"{github_url}/%s", "GitHub: "),
"user": (f"{github_sponsors_url}/%s", "@"),
}
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "aiohttp_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Async HTTP client/server for asyncio and Python",
"canonical_url": "http://docs.aiohttp.org/en/stable/",
"github_user": github_repo_org,
"github_repo": github_repo_name,
"github_button": True,
"github_type": "star",
"github_banner": True,
"badges": [
{
"image": f"{github_repo_url}/workflows/CI/badge.svg",
"target": f"{github_repo_url}/actions?query=workflow%3ACI",
"height": "20",
"alt": "Azure Pipelines CI status",
},
{
"image": f"https://codecov.io/github/{github_repo_slug}/coverage.svg?branch=master",
"target": f"https://codecov.io/github/{github_repo_slug}",
"height": "20",
"alt": "Code coverage status",
},
{
"image": f"https://badge.fury.io/py/{project}.svg",
"target": f"https://badge.fury.io/py/{project}",
"height": "20",
"alt": "Latest PyPI package version",
},
{
"image": f"https://img.shields.io/discourse/status?server=https%3A%2F%2F{github_repo_org}.discourse.group",
"target": f"https://{github_repo_org}.discourse.group",
"height": "20",
"alt": "Discourse status",
},
{
"image": "https://badges.gitter.im/Join%20Chat.svg",
"target": f"https://gitter.im/{github_repo_org}/Lobby",
"height": "20",
"alt": "Chat on Gitter",
},
],
}
html_css_files = [
"css/logo-adjustments.css",
]
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "aiohttp-plain.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
"**": [
"about.html",
"navigation.html",
"searchbox.html",
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = f"{project}doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
f"{project}.tex",
f"{project} Documentation",
f"{project} contributors",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", project, f"{project} Documentation", [project], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
project,
f"{project} Documentation",
"Aiohttp contributors",
project,
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -------------------------------------------------------------------------
nitpicky = True
nitpick_ignore = [
("py:mod", "aiohttp"), # undocumented, no `.. currentmodule:: aiohttp` in docs
("py:class", "aiohttp.SimpleCookie"), # undocumented
("py:class", "aiohttp.web.RequestHandler"), # undocumented
("py:class", "aiohttp.NamedPipeConnector"), # undocumented
("py:meth", "aiohttp.ClientSession.request"), # undocumented
("py:class", "aiohttp.protocol.HttpVersion"), # undocumented
("py:class", "aiohttp.ClientRequest"), # undocumented
("py:class", "aiohttp.payload.Payload"), # undocumented
("py:class", "aiohttp.abc.AbstractResolver"), # undocumented
("py:func", "aiohttp.ws_connect"), # undocumented
("py:meth", "start"), # undocumented
("py:exc", "aiohttp.ClientHttpProxyError"), # undocumented
("py:class", "asyncio.AbstractServer"), # undocumented
("py:mod", "aiohttp.test_tools"), # undocumented
("py:class", "list of pairs"), # undocumented
("py:class", "aiohttp.protocol.HttpVersion"), # undocumented
("py:meth", "aiohttp.ClientSession.request"), # undocumented
("py:class", "aiohttp.StreamWriter"), # undocumented
("py:attr", "aiohttp.StreamResponse.body"), # undocumented
("py:class", "aiohttp.payload.StringPayload"), # undocumented
("py:meth", "aiohttp.web.Application.copy"), # undocumented
("py:meth", "asyncio.AbstractEventLoop.create_server"), # undocumented
("py:data", "aiohttp.log.server_logger"), # undocumented
("py:data", "aiohttp.log.access_logger"), # undocumented
("py:data", "aiohttp.helpers.AccessLogger"), # undocumented
("py:attr", "helpers.AccessLogger.LOG_FORMAT"), # undocumented
("py:meth", "aiohttp.web.AbstractRoute.url"), # undocumented
("py:class", "aiohttp.web.MatchedSubAppResource"), # undocumented
("py:attr", "body"), # undocumented
("py:class", "socket.socket"), # undocumented
("py:class", "socket.AddressFamily"), # undocumented
("py:obj", "logging.DEBUG"), # undocumented
("py:class", "aiohttp.abc.AbstractAsyncAccessLogger"), # undocumented
("py:meth", "aiohttp.web.Response.write_eof"), # undocumented
("py:meth", "aiohttp.payload.Payload.set_content_disposition"), # undocumented
("py:class", "cgi.FieldStorage"), # undocumented
("py:meth", "aiohttp.web.UrlDispatcher.register_resource"), # undocumented
("py:func", "aiohttp_debugtoolbar.setup"), # undocumented
]
# -- Options for towncrier_draft extension -----------------------------------
towncrier_draft_autoversion_mode = "draft" # or: 'sphinx-version', 'sphinx-release'
towncrier_draft_include_empty = True
towncrier_draft_working_directory = PROJECT_ROOT_DIR
# Not yet supported: towncrier_draft_config_path = 'pyproject.toml' # relative to cwd
|
py | 1a518eb56ebcd7f6846f6191077390e5891c93d4 | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
from argparse import ArgumentParser
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector, show_result_pyplot)
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--palette',
default='coco',
choices=['coco', 'voc', 'citys', 'random'],
help='Color palette used for visualization')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
parser.add_argument(
'--async-test',
action='store_true',
help='whether to set async options for async inference.')
args = parser.parse_args()
return args
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(
model,
args.img,
result,
palette=args.palette,
score_thr=args.score_thr)
async def async_main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
tasks = asyncio.create_task(async_inference_detector(model, args.img))
result = await asyncio.gather(tasks)
# show the results
show_result_pyplot(
model,
args.img,
result[0],
palette=args.palette,
score_thr=args.score_thr)
if __name__ == '__main__':
args = parse_args()
if args.async_test:
asyncio.run(async_main(args))
else:
main(args)
|
py | 1a518eed8022fd37dcb4a40211e1b56780aa9719 | import configparser
from pathlib import Path
DEFAULT_SSH_OPTIONS = '-At -o StrictHostKeyChecking=no -o ServerAliveInterval=10'
CONFIG_FILE_PATH = f'{str(Path.home())}/.aws-cli-utils'
def load_user_configuration_file() -> configparser.ConfigParser:
"""
Loads the user configuration file and returns it as a ConfigParser object
:return: ConfigParser object with the file information
"""
config = configparser.ConfigParser()
config.read(CONFIG_FILE_PATH)
return config
def divide_chunks(big_list: list, chunk_size: int) -> list:
"""
Divides a list into equal chunks of size 'chunk_size'
:param big_list: List to be divided
:param chunk_size: Number of items per chunk
:return: List of lists
"""
for i in range(0, len(big_list), chunk_size):
yield big_list[i:i + chunk_size]
def print_row(first, second, third, fourth):
"""
Prints a row of EC2 instance data in a tabular fashion
:param first: First column
:param second: Second column
:param third: Third column
:param fourth: Fourth column
:return: None
"""
print('{:<20} {:<20} {:<20} {:<20}'.format(first, second, third, fourth))
|
py | 1a518f0b35d01f5082a521afa0c857c62433a61d | # -*- coding: utf-8 -*-
from model.group import Group
import allure
# def test_add_group(app, db, json_groups, check_ui):
# group = json_groups
# old_groups = db.get_group_list()
# app.group_helper.creation(group)
# new_groups = db.get_group_list()
# old_groups.append(group)
# if check_ui:
# assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_add_group(app, db, json_groups, check_ui):
group = json_groups
with allure.step('Given a group list'):
old_groups = db.get_group_list()
with allure.step("When I add a group %s to the list" % group):
app.group_helper.creation(group)
with allure.step('Then the new group list equal to the old list with the added group'):
new_groups = db.get_group_list()
old_groups.append(group)
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
#testdata= [Group(name=name, header=header, footer=footer)
# for name in ["", random_string("name", 10)]
# for header in ["", random_string("header", 20)]
# for footer in ["", random_string("footer", 20)]]
#
# def test_add_empty_group(app, db, check_ui):
# old_groups = db.get_group_list()
# group = Group(name="", header="", footer="")
# app.group_helper.creation(group)
# new_groups = db.get_group_list()
# assert len(old_groups) + 1 == len(new_groups)
# old_groups.append(group)
# if check_ui:
# assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
|
py | 1a51901aa37589f01bf88c1f94f6508b069a83a4 | class TreeNode(object):
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.height = 1
class AVL_Tree(object):
def insert(self, root, key):
if not root:
return TreeNode(key)
elif key < root.val:
root.left = self.insert(root.left, key)
else:
root.right = self.insert(root.right, key)
root.height = 1 + max(self.getHeight(root.left), self.getHeight(root.right))
balance = self.getBalance(root)
if balance > 1 and key < root.left.val:
return self.rightRotate(root)
if balance < -1 and key > root.right.val:
return self.leftRotate(root)
if balance > 1 and key > root.left.val:
root.left = self.leftRotate(root.left)
return self.rightRotate(root)
if balance < -1 and key < root.right.val:
root.right = self.rightRotate(root.right)
return self.leftRotate(root)
return root
def leftRotate(self, z):
y = z.right
T2 = y.left
y.left = z
z.right = T2
z.height = 1 + max(self.getHeight(z.left), self.getHeight(z.right))
y.height = 1 + max(self.getHeight(y.left), self.getHeight(y.right))
return y
def rightRotate(self, z):
y = z.left
T3 = y.right
y.right = z
z.left = T3
z.height = 1 + max(self.getHeight(z.left), self.getHeight(z.right))
y.height = 1 + max(self.getHeight(y.left), self.getHeight(y.right))
return y
def getHeight(self, root):
if not root:
return 0
return root.height
def getBalance(self, root):
if not root:
return 0
return self.getHeight(root.left) - self.getHeight(root.right)
def preOrder(self, root):
if not root:
return
print("{0} ".format(root.val), end="")
self.preOrder(root.left)
self.preOrder(root.right)
if __name__ == "__main__":
"""
from timeit import timeit
myTree = AVL_Tree()
root = None
root = myTree.insert(root, 10)
root = myTree.insert(root, 20)
root = myTree.insert(root, 30)
root = myTree.insert(root, 40)
root = myTree.insert(root, 50)
root = myTree.insert(root, 25)
print(timeit(lambda: myTree.preOrder(root), number=10000)) # 0.1360708509964752
"""
|
py | 1a51902850bd3286cd033ed799c098f994981c3b |
import sys
sys.path.append('../../..')
from fastNLP import cache_results
from reproduction.sequence_labelling.cws.data.cws_shift_pipe import CWSShiftRelayPipe
from reproduction.sequence_labelling.cws.model.bilstm_shift_relay import ShiftRelayCWSModel
from fastNLP import Trainer
from torch.optim import Adam
from fastNLP import BucketSampler
from fastNLP import GradientClipCallback
from reproduction.sequence_labelling.cws.model.metric import RelayMetric
from fastNLP.embeddings import StaticEmbedding
from fastNLP import EvaluateCallback
#########hyper
L = 4
hidden_size = 200
num_layers = 1
drop_p = 0.2
lr = 0.008
data_name = 'pku'
#########hyper
device = 0
cache_fp = 'caches/{}.pkl'.format(data_name)
@cache_results(_cache_fp=cache_fp, _refresh=True) # 将结果缓存到cache_fp中,这样下次运行就直接读取,而不需要再次运行
def prepare_data():
data_bundle = CWSShiftRelayPipe(dataset_name=data_name, L=L).process_from_file()
# 预训练的character embedding和bigram embedding
char_embed = StaticEmbedding(data_bundle.get_vocab('chars'), dropout=0.5, word_dropout=0.01,
model_dir_or_name='~/exps/CWS/pretrain/vectors/1grams_t3_m50_corpus.txt')
bigram_embed = StaticEmbedding(data_bundle.get_vocab('bigrams'), dropout=0.5, min_freq=3, word_dropout=0.01,
model_dir_or_name='~/exps/CWS/pretrain/vectors/2grams_t3_m50_corpus.txt')
return data_bundle, char_embed, bigram_embed
data, char_embed, bigram_embed = prepare_data()
model = ShiftRelayCWSModel(char_embed=char_embed, bigram_embed=bigram_embed,
hidden_size=hidden_size, num_layers=num_layers, drop_p=drop_p, L=L)
sampler = BucketSampler()
optimizer = Adam(model.parameters(), lr=lr)
clipper = GradientClipCallback(clip_value=5, clip_type='value') # 截断太大的梯度
evaluator = EvaluateCallback(data.get_dataset('test')) # 额外测试在test集上的效果
callbacks = [clipper, evaluator]
trainer = Trainer(data.get_dataset('train'), model, optimizer=optimizer, loss=None, batch_size=128, sampler=sampler,
update_every=1, n_epochs=10, print_every=5, dev_data=data.get_dataset('dev'), metrics=RelayMetric(),
metric_key='f', validate_every=-1, save_path=None, use_tqdm=True, device=device, callbacks=callbacks,
check_code_level=0, num_workers=1)
trainer.train() |
py | 1a5190510046daa40a7a0ed51ddfcb88b3e5e676 | from openpyxl import load_workbook
from openpyxl.utils import get_column_letter
from itertools import islice
from datetime import datetime
import pandas as pd
import streamlit as st
import logging
import os
files = os.listdir('./data')
workbooks = [item for item in files if '.xlsx' in item]
logging.basicConfig(filename='log.log', filemode='w', format='%(asctime)s - %(levelname)s %(message)s', datefmt='%H:%M:%S', encoding='utf-8', level=logging.DEBUG, force=True)
months = {'January': '01', 'February': '02', 'March': '03', 'April': '04', 'May': '05', 'June': '06', 'July': '07',
'August': '08', 'September': '09', 'October': '10', 'November': '11', 'December': '12', 'Jan': '01', 'Feb': '02', 'Mar': '03', 'Apr': '04', 'May': '05', 'Jun': '06', 'Jul': '07',
'Aug': '08', 'Sep': '09', 'Oc': '10', 'Nov': '11', 'Dec': '12'}
years = ['2010','2011','2012','2013','2014','2015','2016','2017','2018','2019','2020','2021']
def check_file(file):
"""
Checks if file exists and logs it.
"""
while True:
try:
file = f'data/{file}'
if os.path.exists(file):
logging.info(f"{file} exists")
break
except FileNotFoundError:
print("FileNotFound: not a valid file.")
logging.warning("FileNotFound: not a valid file.")
else:
continue
def get_summary(ws, month_year_format):
"""
Grabs relevant data from Summary MoM sheet.
"""
row = None
for item in ws['A']:
if month_year_format in str(item.value):
row = item.row
st.write(f'(Row: {row})')
values = [ro for ro in ws.iter_rows(min_row=row, max_row=row, values_only=True)]
new_values = [item for item in values[0][1:] if item != None]
# create dictionary from row data
row_data = {}
row_data['30s_abandonment'] = f'Abandon after 30s: {round(new_values[1]*100,2)}%'
row_data['fcr'] = f'FCR : {new_values[2]*100}0%'
row_data['dsat'] = f'DSAT : {new_values[3]*100}0%'
row_data['csat'] = f'CSAT : {new_values[4]*100}0%'
logging.info('get_summary succesful')
return row_data
def nps_check(type, number):
"""
Check size of group and return 'GOOD' or 'BAD'.
"""
if type == 'promoters':
if number >= 200:
return 'GOOD'
else:
return 'BAD'
if type == 'passives':
if number >= 100:
return 'GOOD'
else:
return 'BAD'
if type == 'detractors':
if number < 100:
return 'GOOD'
else:
return 'BAD'
def get_voc(ws, month_year_format):
"""
Grabs relevant data from VOC MoM sheet.
"""
col = None
for item in ws[1]:
if month_year_format in str(item.value):
col = item.column
st.write(f'(Column: {col})')
values = [co for co in ws.iter_cols(min_col=col, max_col=col, values_only=True)]
new_values = [item for item in values[0][1:] if item != None and isinstance(item, int)]
# create dictionary from column data
col_data = {}
col_data['base'] = f'Base Size: {new_values[0]}'
col_data['promoters'] = [f'Promoters: {new_values[1]}', nps_check('promoters', new_values[1])]
col_data['passives'] = [f'Passives: {new_values[2]}', nps_check('passives', new_values[2])]
col_data['detractors'] = [f'Detractors: {new_values[3]}', nps_check('detractors', new_values[3])]
logging.info('get_voc succesful')
return col_data
def get_current():
"""
Grabs the current month in integer / string formats and year.
"""
# format month year for datetime comparison
month = datetime.now().strftime('%m')
month_word = datetime.now().strftime('%B')
year = datetime.now().year
logging.info(f'Current: {month_word}, {month}-{year}')
return month, month_word, year
def log_summary(row_data):
"""
Log Summary data.
"""
print(row_data)
for item in row_data:
logging.info(row_data[item])
def log_voc(col_data):
"""
Log VOC data.
"""
for item in col_data:
if 'base' in item:
logging.info(col_data[item])
else:
logging.info(f'{col_data[item][0]} - {col_data[item][1]}')
def show_summary(row_data):
"""
Display Summary data in streamlit app.
"""
for item in row_data:
st.write(row_data[item])
logging.info(f'Displayed summary in app')
def show_voc(col_data):
"""
Display VOC data in streamlit app.
"""
for item in col_data:
if 'base' in item:
st.write(col_data[item])
else:
st.write(f'{col_data[item][0]} - {col_data[item][1]}')
logging.info(f'Displayed voc in app')
def show_logs():
with open('log.log') as log_file:
for line in log_file:
st.write(line)
logging.info('Viewed logs') |
py | 1a5190829e7051b58999ec9d2af561bdd2e924bb | try:
from setuptools import setup
from setuptools import find_packages
packages = find_packages()
except ImportError:
from distutils.core import setup
import os
packages = [x.strip('./').replace('/','.') for x in os.popen('find -name "__init__.py" | xargs -n1 dirname').read().strip().split('\n')]
setup(
name='cle',
description='CLE Loads Everything (at least, many binary formats!) and provides a pythonic interface to analyze what they are and what they would look like in memory.',
version='7.8.2.21',
packages=packages,
install_requires=[
'pyelftools>=0.24',
'cffi',
'idalink',
'archinfo>=7.8.2.21',
'pyvex>=7.8.2.21',
'pefile',
]
)
|
py | 1a519226b6d65c368983d7671dcf3a5ee2650fde | import os
from yt_concate.settings import DOWNLOADS_DIR
from yt_concate.settings import VIDEOS_DIR
from yt_concate.settings import CAPTIONS_DIR
class Utils:
def __int__(self):
pass
def create_dirs(self):
os.makedirs(DOWNLOADS_DIR, exist_ok=True)
os.makedirs(VIDEOS_DIR, exist_ok=True)
os.makedirs(CAPTIONS_DIR, exist_ok=True)
@staticmethod
def get_video_id_from_url(url):
return url.split('watch?v=')[-1] # split分開的左右兩邊是0和1,-1是指從後面數來第一個,這邊要寫1也可以
def get_caption_filepath(self, url):
return os.path.join(CAPTIONS_DIR, self.get_video_id_from_url(url) + 'txt')
def caption_file_exists(self, url):
path = self.get_caption_filepath(url)
return os.path.exists(path) and os.path.getsize(path) > 0
def get_video_list_filepath(self, channel_id):
return os.path.join(DOWNLOADS_DIR, channel_id + 'txt')
def video_file_exists(self, channel_id):
path = self.get_caption_filepath(channel_id)
return os.path.exists(path) and os.path.getsize(path) > 0
|
py | 1a519242c37e17b56f2fe1472f3ef73d85de6301 | import os
import csv
from typing import Tuple, Union
from pathlib import Path
import torchaudio
from torchaudio.datasets.utils import download_url, extract_archive
from torch import Tensor
from torch.utils.data import Dataset
_RELEASE_CONFIGS = {
"release1": {
"folder_in_archive": "wavs",
"url": "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2",
"checksum": "be1a30453f28eb8dd26af4101ae40cbf2c50413b1bb21936cbcdc6fae3de8aa5",
}
}
class LJSPEECH(Dataset):
"""Create a Dataset for LJSpeech-1.1.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"wavs"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
def __init__(self,
root: Union[str, Path],
url: str = _RELEASE_CONFIGS["release1"]["url"],
folder_in_archive: str = _RELEASE_CONFIGS["release1"]["folder_in_archive"],
download: bool = False) -> None:
self._parse_filesystem(root, url, folder_in_archive, download)
def _parse_filesystem(self, root: str, url: str, folder_in_archive: str, download: bool) -> None:
root = Path(root)
basename = os.path.basename(url)
archive = root / basename
basename = Path(basename.split(".tar.bz2")[0])
folder_in_archive = basename / folder_in_archive
self._path = root / folder_in_archive
self._metadata_path = root / basename / 'metadata.csv'
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _RELEASE_CONFIGS["release1"]["checksum"]
download_url(url, root, hash_value=checksum)
extract_archive(archive)
with open(self._metadata_path, "r", newline='') as metadata:
flist = csv.reader(metadata, delimiter="|", quoting=csv.QUOTE_NONE)
self._flist = list(flist)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, transcript, normalized_transcript)``
"""
line = self._flist[n]
fileid, transcript, normalized_transcript = line
fileid_audio = self._path / (fileid + ".wav")
# Load audio
waveform, sample_rate = torchaudio.load(fileid_audio)
return (
waveform,
sample_rate,
transcript,
normalized_transcript,
)
def __len__(self) -> int:
return len(self._flist)
|
py | 1a5192567097fb34ba0f5e1e9aad3ecf6c4249c7 | import random
import string
from dpaster import core
from tests.fixtures import python_code
def test_get_syntax_stdin(python_code):
assert "python" in core.get_syntax("<stdin>", python_code)
def test_get_syntax_java_file():
assert core.get_syntax("HelloWorld.java", "") == "java"
def test_get_syntax_weird_filename():
assert core.get_syntax("main.cthulhu", "") == "text"
def test_get_syntax_weird_content():
random.seed(123)
content = "".join(
ch
for ch in [
random.choice(
string.ascii_letters
+ string.digits
+ r"!@#$%^&*()_+-=[]{}\/"
)
for _ in range(100)
]
)
assert core.get_syntax("<stdin>", content) == "text"
|
py | 1a519329eb4ae5d7f7b589948e2782d816f96ba8 | # -*- coding:utf-8 -*-
from .model import ModelSerializer, ModelInfoSerializer
from .field import FieldModelSerializer
from .instance import InstanceModelSerializer
from .value import ValueModelSerializer
from .permission import PermissionModelSerializer
|
py | 1a5193824b4d8f87ad9c3f9e28a52fc5b141608d | from django.template.defaultfilters import join
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class JoinTests(SimpleTestCase):
@setup({'join01': '{{ a|join:", " }}'})
def test_join01(self):
output = self.engine.render_to_string('join01', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha, beta & me')
@setup({'join02': '{% autoescape off %}{{ a|join:", " }}{% endautoescape %}'})
def test_join02(self):
output = self.engine.render_to_string('join02', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha, beta & me')
@setup({'join03': '{{ a|join:" & " }}'})
def test_join03(self):
output = self.engine.render_to_string('join03', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join04': '{% autoescape off %}{{ a|join:" & " }}{% endautoescape %}'})
def test_join04(self):
output = self.engine.render_to_string('join04', {'a': ['alpha', 'beta & me']})
self.assertEqual(output, 'alpha & beta & me')
# #11377 Test that joining with unsafe joiners doesn't result in
# unsafe strings
@setup({'join05': '{{ a|join:var }}'})
def test_join05(self):
output = self.engine.render_to_string('join05', {'a': ['alpha', 'beta & me'], 'var': ' & '})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join06': '{{ a|join:var }}'})
def test_join06(self):
output = self.engine.render_to_string('join06', {'a': ['alpha', 'beta & me'], 'var': mark_safe(' & ')})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join07': '{{ a|join:var|lower }}'})
def test_join07(self):
output = self.engine.render_to_string('join07', {'a': ['Alpha', 'Beta & me'], 'var': ' & '})
self.assertEqual(output, 'alpha & beta & me')
@setup({'join08': '{{ a|join:var|lower }}'})
def test_join08(self):
output = self.engine.render_to_string('join08', {'a': ['Alpha', 'Beta & me'], 'var': mark_safe(' & ')})
self.assertEqual(output, 'alpha & beta & me')
class FunctionTests(SimpleTestCase):
def test_list(self):
self.assertEqual(join([0, 1, 2], 'glue'), '0glue1glue2')
|
py | 1a519547c799f71b4989baac65c822be3b6bc9a6 | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.ingest_mapping_schema import IngestMappingSchema # noqa: E501
from swagger_client.rest import ApiException
class TestIngestMappingSchema(unittest.TestCase):
"""IngestMappingSchema unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIngestMappingSchema(self):
"""Test IngestMappingSchema"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.ingest_mapping_schema.IngestMappingSchema() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a51961845f7cc84ad32e391fc9274bfee2b7ce8 | """
Some utilities and things for testing various bits of SMPP.
"""
from twisted.internet.defer import DeferredQueue
from smpp.pdu_inspector import unpack_pdu
from vumi.transports.smpp.clientserver.server import SmscServer
class SmscTestServer(SmscServer):
"""
SMSC subclass that records inbound and outbound PDUs for later assertion.
"""
def __init__(self, delivery_report_string=None):
self.pdu_queue = DeferredQueue()
SmscServer.__init__(self, delivery_report_string)
def handle_data(self, data):
self.pdu_queue.put({
'direction': 'inbound',
'pdu': unpack_pdu(data),
})
return SmscServer.handle_data(self, data)
def send_pdu(self, pdu):
self.pdu_queue.put({
'direction': 'outbound',
'pdu': pdu.get_obj(),
})
return SmscServer.send_pdu(self, pdu)
|
py | 1a519671e0acf99417f0f67c93866a266e3b93c5 | # Generated by Django 2.2.19 on 2021-09-24 16:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20210924_1606'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to=settings.AUTH_USER_MODEL),
),
]
|
py | 1a5196bfc4cf19f325b2e42a5a6e977025c178ca | from __future__ import absolute_import
from datetime import datetime
import pytz
from django.views.generic import View
from sentry.models import (
Commit, CommitAuthor, GroupSubscriptionReason, Organization, Project,
Release, Team
)
from sentry.utils.http import absolute_uri
from .mail import MailPreview
class DebugNewReleaseEmailView(View):
def get(self, request):
org = Organization(
id=1,
slug='organization',
name='My Company',
)
team = Team(
id=1,
slug='team',
name='My Team',
organization=org,
)
project = Project(
id=1,
organization=org,
team=team,
slug='project',
name='My Project',
)
release = Release(
project=project,
organization_id=project.organization_id,
version='6c998f755f304593a4713abd123eaf8833a2de5e',
date_added=datetime(2016, 10, 12, 15, 39, tzinfo=pytz.utc)
)
release_link = absolute_uri('/{}/{}/releases/{}/'.format(
org.slug,
project.slug,
release.version,
))
project_link = absolute_uri('/{}/{}/'.format(
org.slug,
project.slug,
))
commit_list = [
Commit(key='48b86fcd677da3dba5679d7a738240ce6fb74b20'),
Commit(
key='a53a2756bb8d111b43196210b34df90b87ed336b',
message='Update README.rst',
author=CommitAuthor(
name='David Cramer',
email='[email protected]',
)
),
]
return MailPreview(
html_template='sentry/emails/activity/release.html',
text_template='sentry/emails/activity/release.txt',
context={
'release': release,
'project': project,
'release_link': release_link,
'project_link': project_link,
'commit_list': commit_list,
'reason': GroupSubscriptionReason.descriptions[
GroupSubscriptionReason.committed
],
},
).render(request)
|
py | 1a519716193d55780b49c73c319a4e9cda4ee54f | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for creating input_fns."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
# A return type allowing input_fns to return multiple values in a well-
# defined way (analogous to ModelFnOps).
# The expected return values are:
# features: a dict of string to `Tensor` or `SparseTensor`, giving the features
# to be passed to the model.
# labels: a dict of string to `Tensor` or `SparseTensor`, giving labels (aka
# targets) for training.
# default_inputs: a dict of string to `Tensor` or `SparseTensor`, giving the
# input placeholders (if any) that this input_fn expects to be fed.
InputFnOps = collections.namedtuple('InputFnOps',
['features',
'labels',
'default_inputs'])
def build_parsing_serving_input_fn(feature_spec, default_batch_size=None):
"""Build an input_fn appropriate for serving, expecting fed tf.Examples.
Creates an input_fn that expects a serialized tf.Example fed into a string
placeholder. The function parses the tf.Example according to the provided
feature_spec, and returns all parsed Tensors as features. This input_fn is
for use at serving time, so the labels return value is always None.
Args:
feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
An input_fn suitable for use in serving.
"""
def input_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
inputs = {'examples': serialized_tf_example}
features = parsing_ops.parse_example(serialized_tf_example, feature_spec)
labels = None # these are not known in serving!
return InputFnOps(features, labels, inputs)
return input_fn
def build_default_serving_input_fn(features, default_batch_size=None):
"""Build an input_fn appropriate for serving, expecting feature Tensors.
Creates an input_fn that expects all features to be fed directly.
This input_fn is for use at serving time, so the labels return value is always
None.
Args:
features: a dict of string to `Tensor`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
An input_fn suitable for use in serving.
"""
def input_fn():
"""an input_fn that expects all features to be fed directly."""
features_placeholders = {}
for name, t in features.items():
shape_list = t.get_shape().as_list()
shape_list[0] = default_batch_size
shape = tensor_shape.TensorShape(shape_list)
features_placeholders[name] = array_ops.placeholder(dtype=t.dtype,
shape=shape,
name=t.name)
labels = None # these are not known in serving!
return InputFnOps(features_placeholders, labels, features_placeholders)
return input_fn
|
py | 1a51994447ee3eb998cb2cad08a684f931ef77af | # Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from six.moves import queue
from vitrage.common.constants import DatasourceAction
from vitrage.common.constants import DatasourceProperties as DSProp
from vitrage.common.constants import EdgeLabel
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import TemplateTopologyFields as TTFields
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.alarm_properties import AlarmProperties as AlarmProps
from vitrage.datasources.nagios import NAGIOS_DATASOURCE
from vitrage.datasources.nagios.properties import NagiosProperties as NProps
from vitrage.datasources.nagios.properties import NagiosTestStatus
from vitrage.datasources.nova.host import NOVA_HOST_DATASOURCE
from vitrage.datasources.nova.instance import NOVA_INSTANCE_DATASOURCE
from vitrage.entity_graph.mappings.operational_alarm_severity import \
OperationalAlarmSeverity
from vitrage.entity_graph.mappings.operational_resource_state import \
OperationalResourceState
from vitrage.evaluator.actions.action_executor import ActionExecutor
from vitrage.evaluator.actions.base import ActionMode
from vitrage.evaluator.actions.base import ActionType
from vitrage.evaluator.actions.evaluator_event_transformer \
import VITRAGE_DATASOURCE
from vitrage.evaluator.actions.recipes.action_steps import ADD_VERTEX
from vitrage.evaluator.actions.recipes.base import EVALUATOR_EVENT_TYPE
from vitrage.evaluator.template_data import ActionSpecs
from vitrage.evaluator.template_fields import TemplateFields as TFields
from vitrage.opts import register_opts
from vitrage.tests.functional.base import TestFunctionalBase
from vitrage.tests.functional.test_configuration import TestConfiguration
class TestActionExecutor(TestFunctionalBase, TestConfiguration):
# noinspection PyPep8Naming
@classmethod
def setUpClass(cls):
super(TestActionExecutor, cls).setUpClass()
cls.conf = cfg.ConfigOpts()
cls.conf.register_opts(cls.PROCESSOR_OPTS, group='entity_graph')
cls.conf.register_opts(cls.DATASOURCES_OPTS, group='datasources')
cls.add_db(cls.conf)
for vitrage_type in cls.conf.datasources.types:
register_opts(cls.conf, vitrage_type, cls.conf.datasources.path)
def _init_executer(self):
event_queue = queue.Queue()
def actions_callback(event_type, data):
event_queue.put(data)
return event_queue, ActionExecutor(self.conf, actions_callback)
def test_execute_set_state(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host_vertex_before = host_vertices[0]
targets = {TFields.TARGET: host_vertex_before}
props = {TFields.STATE: OperationalResourceState.SUBOPTIMAL}
action_spec = ActionSpecs(0, ActionType.SET_STATE, targets, props)
event_queue, action_executor = self._init_executer()
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
host_vertex_after = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
agg_state_before = \
host_vertex_before.get(VProps.VITRAGE_AGGREGATED_STATE)
self.assertNotEqual(agg_state_before,
OperationalResourceState.SUBOPTIMAL)
self.assertNotIn(VProps.VITRAGE_STATE, host_vertex_before.properties)
agg_state_after = \
host_vertex_after.get(VProps.VITRAGE_AGGREGATED_STATE)
self.assertEqual(agg_state_after, OperationalResourceState.SUBOPTIMAL)
v_state_after = host_vertex_after.get(VProps.VITRAGE_STATE)
self.assertEqual(v_state_after, OperationalResourceState.SUBOPTIMAL)
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
processor.process_event(event_queue.get())
host_vertex_after_undo = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
agg_state_after_undo = \
host_vertex_before.get(VProps.VITRAGE_AGGREGATED_STATE)
self.assertEqual(agg_state_after_undo, agg_state_before)
self.assertNotIn(
VProps.VITRAGE_STATE, host_vertex_after_undo.properties)
def test_execute_mark_instance_down(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_INSTANCE_DATASOURCE}
instance_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
instance_vertex_before = instance_vertices[0]
targets = {TFields.TARGET: instance_vertex_before}
props = {}
action_spec = ActionSpecs(0, ActionType.MARK_DOWN, targets, props)
event_queue, action_executor = self._init_executer()
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
instance_vertex_after = processor.entity_graph.get_vertex(
instance_vertex_before.vertex_id)
# Test Assertions
self.assertTrue(instance_vertex_after.get(VProps.IS_MARKED_DOWN))
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
processor.process_event(event_queue.get())
instance_vertex_after_undo = processor.entity_graph.get_vertex(
instance_vertex_before.vertex_id)
# Test Assertions
self.assertFalse(instance_vertex_after_undo.get(VProps.IS_MARKED_DOWN))
def test_execute_mark_down(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host_vertex_before = host_vertices[0]
targets = {TFields.TARGET: host_vertex_before}
props = {}
action_spec = ActionSpecs(0, ActionType.MARK_DOWN, targets, props)
event_queue, action_executor = self._init_executer()
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
host_vertex_after = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
self.assertTrue(host_vertex_after.get(VProps.IS_MARKED_DOWN))
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
processor.process_event(event_queue.get())
host_vertex_after_undo = processor.entity_graph.get_vertex(
host_vertex_before.vertex_id)
# Test Assertions
self.assertFalse(host_vertex_after_undo.get(VProps.IS_MARKED_DOWN))
def test_execute_add_edge(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host_1 = host_vertices[0]
nagios_event1 = TestActionExecutor._get_nagios_event(
host_1.get(VProps.ID), NOVA_HOST_DATASOURCE)
processor.process_event(nagios_event1)
host_2 = host_vertices[1]
nagios_event2 = TestActionExecutor._get_nagios_event(
host_2.get(VProps.ID), NOVA_HOST_DATASOURCE)
processor.process_event(nagios_event2)
alarms_attrs = {VProps.VITRAGE_TYPE: NAGIOS_DATASOURCE}
alarms_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=alarms_attrs)
alarm1 = alarms_vertices[0]
alarm2 = alarms_vertices[1]
targets = {
TFields.TARGET: alarm1,
TFields.SOURCE: alarm2
}
action_spec = ActionSpecs(
0, ActionType.ADD_CAUSAL_RELATIONSHIP, targets, {})
event_queue, action_executor = self._init_executer()
before_edge = processor.entity_graph.get_edge(alarm2.vertex_id,
alarm1.vertex_id,
EdgeLabel.CAUSES)
# Test Action - do
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
new_edge = processor.entity_graph.get_edge(alarm2.vertex_id,
alarm1.vertex_id,
EdgeLabel.CAUSES)
# Test Assertions
self.assertIsNone(before_edge)
self.assertIsNotNone(new_edge)
def test_execute_add_vertex(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host = host_vertices[0]
targets = {TFields.TARGET: host}
props = {
TFields.ALARM_NAME: 'VM_CPU_SUBOPTIMAL_PERFORMANCE',
TFields.SEVERITY: OperationalAlarmSeverity.CRITICAL,
VProps.STATE: AlarmProps.ACTIVE_STATE,
VProps.RESOURCE_ID: host[VProps.ID],
VProps.VITRAGE_ID: 'DUMMY_ID'
}
# Raise alarm action adds new vertex with type vitrage to the graph
action_spec = ActionSpecs(0, ActionType.RAISE_ALARM, targets, props)
alarm_vertex_attrs = {VProps.VITRAGE_TYPE: VITRAGE_DATASOURCE}
before_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
event_queue, action_executor = self._init_executer()
# Test Action
action_executor.execute(action_spec, ActionMode.DO)
processor.process_event(event_queue.get())
after_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
# Assertions
self.assertEqual(len(before_alarms) + 1, len(after_alarms))
self.assert_is_not_empty(after_alarms)
alarm = after_alarms[0]
self.assertEqual(alarm.properties[VProps.VITRAGE_CATEGORY],
EntityCategory.ALARM)
self.assertEqual(alarm.properties[VProps.VITRAGE_TYPE],
VITRAGE_DATASOURCE)
self.assertEqual(alarm.properties[VProps.SEVERITY],
props[TFields.SEVERITY])
self.assertEqual(alarm.properties[VProps.VITRAGE_OPERATIONAL_SEVERITY],
props[TFields.SEVERITY])
self.assertEqual(alarm.properties[VProps.STATE],
AlarmProps.ACTIVE_STATE)
self.assertEqual(alarm.properties[VProps.VITRAGE_RESOURCE_ID],
action_spec.targets
[TTFields.TARGET][VProps.VITRAGE_ID]),
self.assertEqual(alarm.properties[VProps.VITRAGE_RESOURCE_TYPE],
NOVA_HOST_DATASOURCE)
def test_execute_add_and_remove_vertex(self):
# Test Setup
processor = self._create_processor_with_graph(self.conf)
vertex_attrs = {VProps.VITRAGE_TYPE: NOVA_HOST_DATASOURCE}
host_vertices = processor.entity_graph.get_vertices(
vertex_attr_filter=vertex_attrs)
host = host_vertices[0]
targets = {TFields.TARGET: host}
props = {
TFields.ALARM_NAME: 'VM_CPU_SUBOPTIMAL_PERFORMANCE',
TFields.SEVERITY: OperationalAlarmSeverity.CRITICAL,
VProps.STATE: AlarmProps.ACTIVE_STATE,
VProps.RESOURCE_ID: host[VProps.ID]
}
action_spec = ActionSpecs(0, ActionType.RAISE_ALARM, targets, props)
add_vertex_event = TestActionExecutor._get_vitrage_add_vertex_event(
host,
props[TFields.ALARM_NAME],
props[TFields.SEVERITY])
processor.process_event(add_vertex_event)
alarm_vertex_attrs = {VProps.VITRAGE_TYPE: VITRAGE_DATASOURCE,
VProps.VITRAGE_IS_DELETED: False}
before_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
event_queue, action_executor = self._init_executer()
# Test Action - undo
action_executor.execute(action_spec, ActionMode.UNDO)
event = event_queue.get()
processor.process_event(event)
after_alarms = processor.entity_graph.get_vertices(
vertex_attr_filter=alarm_vertex_attrs)
# Test Assertions
self.assertEqual(len(before_alarms) - 1, len(after_alarms))
@staticmethod
def _get_nagios_event(resource_name, resource_type):
return {NProps.LAST_CHECK: '2016-02-07 15:26:04',
NProps.RESOURCE_NAME: resource_name,
NProps.RESOURCE_TYPE: resource_type,
NProps.SERVICE: 'Check_MK',
NProps.STATUS: NagiosTestStatus.CRITICAL,
NProps.STATUS_INFO: 'test test test',
DSProp.DATASOURCE_ACTION: DatasourceAction.SNAPSHOT,
DSProp.ENTITY_TYPE: NAGIOS_DATASOURCE,
DSProp.SAMPLE_DATE: '2016-02-07 15:26:04'}
@staticmethod
def _get_vitrage_add_vertex_event(target_vertex, alarm_name, severity):
return {TTFields.TARGET: target_vertex.vertex_id,
VProps.UPDATE_TIMESTAMP: '2016-03-17 11:33:32.443002',
DSProp.DATASOURCE_ACTION: DatasourceAction.UPDATE,
TFields.ALARM_NAME: alarm_name,
VProps.STATE: 'Active',
EVALUATOR_EVENT_TYPE: ADD_VERTEX,
DSProp.ENTITY_TYPE: VITRAGE_DATASOURCE,
VProps.SEVERITY: OperationalAlarmSeverity.CRITICAL,
VProps.VITRAGE_ID: 'mock_vitrage_id',
VProps.VITRAGE_RESOURCE_TYPE: NOVA_HOST_DATASOURCE,
VProps.VITRAGE_CATEGORY: EntityCategory.ALARM,
VProps.VITRAGE_SAMPLE_TIMESTAMP:
'2016-03-17 11:33:32.443002+00:00'}
|
py | 1a519a47a1890fc13e23708de6dbf5babbf3e640 | from rest_framework.permissions import SAFE_METHODS, BasePermission
def is_vip(user, vip_group_name='VIP'):
is_vip = (vip_group_name in [group.name for group in user.groups.all()])
return is_vip
class ReadOnly(BasePermission):
def has_permission(self, request, view):
return request.method in SAFE_METHODS
|
py | 1a519b6b45edc84990b7ae34af6acfdfada1f925 | import argparse
import os
def parse_args(args) -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Make submission')
parser.add_argument(
'-i', '--input',
help='path to input file',
type=str,
required=True
)
parser.add_argument(
'-o', '--output',
help='path to output file',
type=str,
required=True
)
return parser.parse_args(args)
def get_score(motif_list):
col_list = [''.join(seq) for seq in zip(*motif_list)]
max_c = sum([max([c.count(x) for x in 'ACGT']) for c in col_list])
return len(motif_list[0])*len(motif_list) - max_c
def get_profile(motif_list):
col_list = [''.join(seq) for seq in zip(*motif_list)]
return [[(c.count(nuc) + 1) / (len(c) + 4) for nuc in 'ACGT'] for c in col_list]
def get_kmer(dna, k, profile):
nuc_loc = {
nucleotide: index for index, nucleotide in enumerate('ACGT')
}
max_prob = -1
for i in range(len(dna)-k+1):
current_prob = 1
for j, nuc in enumerate(dna[i:i+k]):
current_prob *= profile[j][nuc_loc[nuc]]
if current_prob > max_prob:
max_prob = current_prob
result = dna[i:i+k]
return result
def calculate(input_path: str) -> str:
with open(input_path, 'r') as file:
k, t = map(int, file.readline().split())
dna_list = [line.strip() for line in file]
best_s = t*k
for i in range(len(dna_list[0])-k+1):
motifs = [dna_list[0][i:i+k]]
for j in range(1, t):
current_profile = get_profile(motifs)
motifs.append(get_kmer(dna_list[j], k, current_profile))
current_s = get_score(motifs)
if current_s < best_s:
best_s = current_s
best_motifs = motifs
return '\n'.join(best_motifs)
def main(args=None) -> None:
args=parse_args(args)
assert os.path.exists(args.input), 'no input file'
result = calculate(args.input)
open(args.output, 'w').write(result)
if __name__ == '__main__':
main()
|
py | 1a519e088ca6554c922fcaa03797b8f4590be657 | import asyncio
import traceback
import os
import logging
import sys
from pathlib import Path
from typing import Any, Dict, Optional
from chives.plotting.create_plots import resolve_plot_keys
from chives.plotters.plotters_util import run_plotter, run_command
log = logging.getLogger(__name__)
MADMAX_PLOTTER_DIR = "madmax-plotter"
def is_madmax_supported() -> bool:
return sys.platform.startswith("linux") or sys.platform in ["darwin", "win32", "cygwin"]
def get_madmax_install_path(plotters_root_path: Path) -> Path:
return plotters_root_path / MADMAX_PLOTTER_DIR
def get_madmax_package_path() -> Path:
return Path(os.path.dirname(sys.executable)) / "madmax"
def get_madmax_executable_path_for_ksize(plotters_root_path: Path, ksize: int = 29) -> Path:
madmax_dir: Path = get_madmax_package_path()
madmax_exec: str = "chia_plot"
# if ksize > 32:
# madmax_exec += "_k34" # Use the chia_plot_k34 executable for k-sizes > 32
if sys.platform in ["win32", "cygwin"]:
madmax_exec += ".exe"
if not madmax_dir.exists():
madmax_dir = get_madmax_install_path(plotters_root_path) / "build"
return madmax_dir / madmax_exec
def get_madmax_install_info(plotters_root_path: Path) -> Optional[Dict[str, Any]]:
info: Dict[str, Any] = {"display_name": "madMAx Plotter"}
installed: bool = False
supported: bool = is_madmax_supported()
if get_madmax_executable_path_for_ksize(plotters_root_path).exists():
try:
proc = run_command(
[os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path)), "--version"],
"Failed to call madmax with --version option",
capture_output=True,
text=True,
)
version = proc.stdout.strip()
except Exception as e:
print(f"Failed to determine madmax version: {e}")
if version is not None:
installed = True
info["version"] = version
else:
installed = False
info["installed"] = installed
if installed is False:
info["can_install"] = supported
return info
def install_madmax(plotters_root_path: Path):
if is_madmax_supported():
print("Installing dependencies.")
if sys.platform.startswith("linux"):
run_command(
[
"sudo",
"apt",
"install",
"-y",
"libsodium-dev",
"cmake",
"g++",
"git",
"build-essential",
],
"Could not install dependencies",
)
if sys.platform.startswith("darwin"):
run_command(
[
"brew",
"install",
"libsodium",
"cmake",
"git",
"autoconf",
"automake",
"libtool",
"wget",
],
"Could not install dependencies",
)
run_command(["git", "--version"], "Error checking Git version.")
print("Cloning git repository.")
run_command(
[
"git",
"clone",
"https://github.com/Chia-Network/chia-plotter-madmax.git",
MADMAX_PLOTTER_DIR,
],
"Could not clone madmax git repository",
cwd=os.fspath(plotters_root_path),
)
print("Installing git submodules.")
madmax_path: str = os.fspath(get_madmax_install_path(plotters_root_path))
run_command(
[
"git",
"submodule",
"update",
"--init",
"--recursive",
],
"Could not initialize git submodules",
cwd=madmax_path,
)
print("Running install script.")
run_command(["./make_devel.sh"], "Error while running install script", cwd=madmax_path)
else:
raise RuntimeError("Platform not supported yet for madmax plotter.")
progress = {
"[P1] Table 1 took": 0.01,
"[P1] Table 2 took": 0.06,
"[P1] Table 3 took": 0.12,
"[P1] Table 4 took": 0.2,
"[P1] Table 5 took": 0.28,
"[P1] Table 6 took": 0.36,
"[P1] Table 7 took": 0.42,
"[P2] Table 7 rewrite took": 0.43,
"[P2] Table 6 rewrite took": 0.48,
"[P2] Table 5 rewrite took": 0.51,
"[P2] Table 4 rewrite took": 0.55,
"[P2] Table 3 rewrite took": 0.58,
"[P2] Table 2 rewrite took": 0.61,
"[P3-2] Table 2 took": 0.66,
"[P3-2] Table 3 took": 0.73,
"[P3-2] Table 4 took": 0.79,
"[P3-2] Table 5 took": 0.85,
"[P3-2] Table 6 took": 0.92,
"[P3-2] Table 7 took": 0.98,
}
def dir_with_trailing_slash(dir: str) -> str:
return dir if dir[-1] == os.path.sep else dir + os.path.sep
def plot_madmax(args, chives_root_path: Path, plotters_root_path: Path):
if sys.platform not in ["win32", "cygwin"]:
import resource
# madMAx has a ulimit -n requirement > 296:
# "Cannot open at least 296 files, please raise maximum open file limit in OS."
resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
if not os.path.exists(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)):
print("Installing madmax plotter.")
try:
install_madmax(plotters_root_path)
except Exception as e:
print(f"Exception while installing madmax plotter: {e}")
return
plot_keys = asyncio.get_event_loop().run_until_complete(
resolve_plot_keys(
None if args.farmerkey == b"" else args.farmerkey.hex(),
None,
None if args.pool_key == b"" else args.pool_key.hex(),
None if args.contract == "" else args.contract,
chives_root_path,
log,
args.connect_to_daemon,
)
)
call_args = []
call_args.append(os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)))
call_args.append("-f")
call_args.append(bytes(plot_keys.farmer_public_key).hex())
if plot_keys.pool_public_key is not None:
call_args.append("-p")
call_args.append(bytes(plot_keys.pool_public_key).hex())
call_args.append("-t")
# s if s[-1] == os.path.sep else s + os.path.sep
call_args.append(dir_with_trailing_slash(args.tmpdir))
if len(args.tmpdir2) > 0:
call_args.append("-2")
call_args.append(dir_with_trailing_slash(args.tmpdir2))
call_args.append("-d")
call_args.append(dir_with_trailing_slash(args.finaldir))
if plot_keys.pool_contract_address is not None:
call_args.append("-c")
call_args.append(plot_keys.pool_contract_address)
call_args.append("-n")
call_args.append(str(args.count))
call_args.append("-r")
call_args.append(str(args.threads))
call_args.append("-u")
call_args.append(str(args.buckets))
call_args.append("-v")
call_args.append(str(args.buckets3))
if args.waitforcopy:
call_args.append("-w")
if args.tmptoggle:
call_args.append("-G")
call_args.append("-K")
call_args.append(str(args.rmulti2))
call_args.append("-k")
call_args.append(str(args.size))
call_args.append("-x")
call_args.append("9699")
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_plotter(call_args, progress))
except Exception as e:
print(f"Exception while plotting: {type(e)} {e}")
print(f"Traceback: {traceback.format_exc()}")
|
py | 1a519e36308a3b68abf6cc3d825570a8896a8927 | import json
from ..constants.path import get_cache_path
from ..Utils.decorators import change_working_directory, cache_data
from ..Utils.utils import search
from .text import TextEntityAnnotation
TASK_TYPE = {
'TextEntityAnnotation':TextEntityAnnotation
}
@change_working_directory
@cache_data
def list_datasets(*args, **kwargs):
'''
Lists all the datasets in the user profile
'''
data = kwargs['data']
if data == None:
print("Using Cached data...")
with open('./dumps.json', 'r') as f:
data = json.load(f)
dataset_names = list()
for task in data['annotation_data']:
dataset_names.append(task["task_name"])
return dataset_names
@change_working_directory
@cache_data
def show_dataset(dataset_name:str, samples:int=1,*args, **kwargs):
with open('./dumps.json', 'r') as f:
user_data = json.load(f)
user_dataset = search(user_data['annotation_data'], dataset_name)
if user_dataset == -1:
raise ValueError("Dataset not found. Check dataset name or recache the environment with `show_datasets(refresh=True)`")
task = TASK_TYPE[user_dataset['task_type']](user_dataset)
sents, ets = task.get_dataset(samples)
for i, (tokens, labels) in enumerate(zip(sents, ets)):
print(f"Sample {i}")
print(*tokens)
print(*labels)
print()
@change_working_directory
@cache_data
def get_dataset(dataset_name:str, *args, **kwargs):
with open('./dumps.json', 'r') as f:
user_data = json.load(f)
user_dataset = search(user_data['annotation_data'], dataset_name)
if user_dataset == -1:
raise ValueError("Dataset not found. Check dataset name or recache the environment with `show_datasets(refresh=True)`")
task = TASK_TYPE[user_dataset['task_type']](user_dataset)
return task
|
py | 1a519ec4c6e76ea290bd8ec7f02b84326ca0be19 | from setuptools import setup
import os
from codecs import open
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
here = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(here, 'inputimeout', '__version__.py'),
'r', 'utf-8') as f:
exec(f.read(), about)
tests_requirements = [
'pytest-cov', 'pytest', 'flake8',
]
setup(
name=about['__title__'],
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
long_description=readme,
packages=['inputimeout'],
python_requires='>=3.4',
license=about['__license__'],
url=about['__url__'],
py_modules=['inputimeout'],
keyword=['input', 'timeout', 'stdin'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
],
tests_require=tests_requirements,
)
|
py | 1a519f16ba841b1282e406dce8b01dfda0366755 | import atexit
import glob
import logging
import numpy as np
import os
import subprocess
from typing import Dict, List, Optional, Any
from mlagents_envs.side_channel.side_channel import SideChannel
from mlagents_envs.base_env import (
BaseEnv,
BatchedStepResult,
AgentGroupSpec,
AgentGroup,
AgentId,
)
from mlagents_envs.timers import timed, hierarchical_timer
from mlagents_envs.exception import (
UnityEnvironmentException,
UnityCommunicationException,
UnityActionException,
UnityTimeOutException,
)
from mlagents_envs.communicator_objects.command_pb2 import STEP, RESET
from mlagents_envs.rpc_utils import (
agent_group_spec_from_proto,
batched_step_result_from_proto,
)
from mlagents_envs.communicator_objects.unity_rl_input_pb2 import UnityRLInputProto
from mlagents_envs.communicator_objects.unity_rl_output_pb2 import UnityRLOutputProto
from mlagents_envs.communicator_objects.agent_action_pb2 import AgentActionProto
from mlagents_envs.communicator_objects.unity_output_pb2 import UnityOutputProto
from mlagents_envs.communicator_objects.unity_rl_initialization_input_pb2 import (
UnityRLInitializationInputProto,
)
from mlagents_envs.communicator_objects.unity_input_pb2 import UnityInputProto
from .rpc_communicator import RpcCommunicator
from sys import platform
import signal
import struct
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("mlagents_envs")
class UnityEnvironment(BaseEnv):
SCALAR_ACTION_TYPES = (int, np.int32, np.int64, float, np.float32, np.float64)
SINGLE_BRAIN_ACTION_TYPES = SCALAR_ACTION_TYPES + (list, np.ndarray)
API_VERSION = "API-12"
def __init__(
self,
file_name: Optional[str] = None,
worker_id: int = 0,
base_port: int = 5005,
seed: int = 0,
docker_training: bool = False,
no_graphics: bool = False,
timeout_wait: int = 60,
args: Optional[List[str]] = None,
side_channels: Optional[List[SideChannel]] = None,
):
"""
Starts a new unity environment and establishes a connection with the environment.
Notice: Currently communication between Unity and Python takes place over an open socket without authentication.
Ensure that the network where training takes place is secure.
:string file_name: Name of Unity environment binary.
:int base_port: Baseline port number to connect to Unity environment over. worker_id increments over this.
:int worker_id: Number to add to communication port (5005) [0]. Used for asynchronous agent scenarios.
:bool docker_training: Informs this class whether the process is being run within a container.
:bool no_graphics: Whether to run the Unity simulator in no-graphics mode
:int timeout_wait: Time (in seconds) to wait for connection from environment.
:bool train_mode: Whether to run in training mode, speeding up the simulation, by default.
:list args: Addition Unity command line arguments
:list side_channels: Additional side channel for no-rl communication with Unity
"""
args = args or []
atexit.register(self._close)
self.port = base_port + worker_id
self._buffer_size = 12000
self._version_ = UnityEnvironment.API_VERSION
# If true, this means the environment was successfully loaded
self._loaded = False
# The process that is started. If None, no process was started
self.proc1 = None
self.timeout_wait: int = timeout_wait
self.communicator = self.get_communicator(worker_id, base_port, timeout_wait)
self.worker_id = worker_id
self.side_channels: Dict[int, SideChannel] = {}
if side_channels is not None:
for _sc in side_channels:
if _sc.channel_type in self.side_channels:
raise UnityEnvironmentException(
"There cannot be two side channels with the same channel type {0}.".format(
_sc.channel_type
)
)
self.side_channels[_sc.channel_type] = _sc
# If the environment name is None, a new environment will not be launched
# and the communicator will directly try to connect to an existing unity environment.
# If the worker-id is not 0 and the environment name is None, an error is thrown
if file_name is None and worker_id != 0:
raise UnityEnvironmentException(
"If the environment name is None, "
"the worker-id must be 0 in order to connect with the Editor."
)
if file_name is not None:
self.executable_launcher(file_name, docker_training, no_graphics, args)
else:
logger.info(
f"Listening on port {self.port}. "
f"Start training by pressing the Play button in the Unity Editor."
)
self._loaded = True
rl_init_parameters_in = UnityRLInitializationInputProto(seed=seed)
try:
aca_output = self.send_academy_parameters(rl_init_parameters_in)
aca_params = aca_output.rl_initialization_output
except UnityTimeOutException:
self._close()
raise
# TODO : think of a better way to expose the academyParameters
self._unity_version = aca_params.version
if self._unity_version != self._version_:
self._close()
raise UnityEnvironmentException(
f"The API number is not compatible between Unity and python. "
f"Python API: {self._version_}, Unity API: {self._unity_version}.\n"
f"Please go to https://github.com/Unity-Technologies/ml-agents/releases/tag/latest_release"
f"to download the latest version of ML-Agents."
)
self._env_state: Dict[str, BatchedStepResult] = {}
self._env_specs: Dict[str, AgentGroupSpec] = {}
self._env_actions: Dict[str, np.ndarray] = {}
self._is_first_message = True
self._update_group_specs(aca_output)
@staticmethod
def get_communicator(worker_id, base_port, timeout_wait):
return RpcCommunicator(worker_id, base_port, timeout_wait)
def executable_launcher(self, file_name, docker_training, no_graphics, args):
cwd = os.getcwd()
file_name = (
file_name.strip()
.replace(".app", "")
.replace(".exe", "")
.replace(".x86_64", "")
.replace(".x86", "")
)
true_filename = os.path.basename(os.path.normpath(file_name))
logger.debug("The true file name is {}".format(true_filename))
launch_string = None
if platform == "linux" or platform == "linux2":
candidates = glob.glob(os.path.join(cwd, file_name) + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(os.path.join(cwd, file_name) + ".x86")
if len(candidates) == 0:
candidates = glob.glob(file_name + ".x86_64")
if len(candidates) == 0:
candidates = glob.glob(file_name + ".x86")
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "darwin":
candidates = glob.glob(
os.path.join(
cwd, file_name + ".app", "Contents", "MacOS", true_filename
)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(file_name + ".app", "Contents", "MacOS", true_filename)
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(cwd, file_name + ".app", "Contents", "MacOS", "*")
)
if len(candidates) == 0:
candidates = glob.glob(
os.path.join(file_name + ".app", "Contents", "MacOS", "*")
)
if len(candidates) > 0:
launch_string = candidates[0]
elif platform == "win32":
candidates = glob.glob(os.path.join(cwd, file_name + ".exe"))
if len(candidates) == 0:
candidates = glob.glob(file_name + ".exe")
if len(candidates) > 0:
launch_string = candidates[0]
if launch_string is None:
self._close()
raise UnityEnvironmentException(
"Couldn't launch the {0} environment. "
"Provided filename does not match any environments.".format(
true_filename
)
)
else:
logger.debug("This is the launch string {}".format(launch_string))
# Launch Unity environment
if not docker_training:
subprocess_args = [launch_string]
if no_graphics:
subprocess_args += ["-nographics", "-batchmode"]
subprocess_args += ["--port", str(self.port)]
subprocess_args += args
try:
self.proc1 = subprocess.Popen(
subprocess_args,
# start_new_session=True means that signals to the parent python process
# (e.g. SIGINT from keyboard interrupt) will not be sent to the new process on POSIX platforms.
# This is generally good since we want the environment to have a chance to shutdown,
# but may be undesirable in come cases; if so, we'll add a command-line toggle.
# Note that on Windows, the CTRL_C signal will still be sent.
start_new_session=True,
)
except PermissionError as perm:
# This is likely due to missing read or execute permissions on file.
raise UnityEnvironmentException(
f"Error when trying to launch environment - make sure "
f"permissions are set correctly. For example "
f'"chmod -R 755 {launch_string}"'
) from perm
else:
# Comments for future maintenance:
# xvfb-run is a wrapper around Xvfb, a virtual xserver where all
# rendering is done to virtual memory. It automatically creates a
# new virtual server automatically picking a server number `auto-servernum`.
# The server is passed the arguments using `server-args`, we are telling
# Xvfb to create Screen number 0 with width 640, height 480 and depth 24 bits.
# Note that 640 X 480 are the default width and height. The main reason for
# us to add this is because we'd like to change the depth from the default
# of 8 bits to 24.
# Unfortunately, this means that we will need to pass the arguments through
# a shell which is why we set `shell=True`. Now, this adds its own
# complications. E.g SIGINT can bounce off the shell and not get propagated
# to the child processes. This is why we add `exec`, so that the shell gets
# launched, the arguments are passed to `xvfb-run`. `exec` replaces the shell
# we created with `xvfb`.
#
docker_ls = (
"exec xvfb-run --auto-servernum"
" --server-args='-screen 0 640x480x24'"
" {0} --port {1}"
).format(launch_string, str(self.port))
self.proc1 = subprocess.Popen(
docker_ls,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
def _update_group_specs(self, output: UnityOutputProto) -> None:
init_output = output.rl_initialization_output
for brain_param in init_output.brain_parameters:
# Each BrainParameter in the rl_initialization_output should have at least one AgentInfo
# Get that agent, because we need some of its observations.
agent_infos = output.rl_output.agentInfos[brain_param.brain_name]
if agent_infos.value:
agent = agent_infos.value[0]
new_spec = agent_group_spec_from_proto(brain_param, agent)
self._env_specs[brain_param.brain_name] = new_spec
logger.info(f"Connected new brain:\n{brain_param.brain_name}")
def _update_state(self, output: UnityRLOutputProto) -> None:
"""
Collects experience information from all external brains in environment at current step.
"""
for brain_name in self._env_specs.keys():
if brain_name in output.agentInfos:
agent_info_list = output.agentInfos[brain_name].value
self._env_state[brain_name] = batched_step_result_from_proto(
agent_info_list, self._env_specs[brain_name]
)
else:
self._env_state[brain_name] = BatchedStepResult.empty(
self._env_specs[brain_name]
)
self._parse_side_channel_message(self.side_channels, output.side_channel)
def reset(self) -> None:
if self._loaded:
outputs = self.communicator.exchange(self._generate_reset_input())
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_group_specs(outputs)
rl_output = outputs.rl_output
self._update_state(rl_output)
self._is_first_message = False
self._env_actions.clear()
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
@timed
def step(self) -> None:
if self._is_first_message:
return self.reset()
if not self._loaded:
raise UnityEnvironmentException("No Unity environment is loaded.")
# fill the blanks for missing actions
for group_name in self._env_specs:
if group_name not in self._env_actions:
n_agents = 0
if group_name in self._env_state:
n_agents = self._env_state[group_name].n_agents()
self._env_actions[group_name] = self._env_specs[
group_name
].create_empty_action(n_agents)
step_input = self._generate_step_input(self._env_actions)
with hierarchical_timer("communicator.exchange"):
outputs = self.communicator.exchange(step_input)
if outputs is None:
raise UnityCommunicationException("Communicator has stopped.")
self._update_group_specs(outputs)
rl_output = outputs.rl_output
self._update_state(rl_output)
self._env_actions.clear()
def get_agent_groups(self) -> List[AgentGroup]:
return list(self._env_specs.keys())
def _assert_group_exists(self, agent_group: str) -> None:
if agent_group not in self._env_specs:
raise UnityActionException(
"The group {0} does not correspond to an existing agent group "
"in the environment".format(agent_group)
)
def set_actions(self, agent_group: AgentGroup, action: np.ndarray) -> None:
self._assert_group_exists(agent_group)
if agent_group not in self._env_state:
return
spec = self._env_specs[agent_group]
expected_type = np.float32 if spec.is_action_continuous() else np.int32
expected_shape = (self._env_state[agent_group].n_agents(), spec.action_size)
if action.shape != expected_shape:
raise UnityActionException(
"The group {0} needs an input of dimension {1} but received input of dimension {2}".format(
agent_group, expected_shape, action.shape
)
)
if action.dtype != expected_type:
action = action.astype(expected_type)
self._env_actions[agent_group] = action
def set_action_for_agent(
self, agent_group: AgentGroup, agent_id: AgentId, action: np.ndarray
) -> None:
self._assert_group_exists(agent_group)
if agent_group not in self._env_state:
return
spec = self._env_specs[agent_group]
expected_shape = (spec.action_size,)
if action.shape != expected_shape:
raise UnityActionException(
"The Agent {0} in group {1} needs an input of dimension {2} but received input of dimension {3}".format(
agent_id, agent_group, expected_shape, action.shape
)
)
expected_type = np.float32 if spec.is_action_continuous() else np.int32
if action.dtype != expected_type:
action = action.astype(expected_type)
if agent_group not in self._env_actions:
self._env_actions[agent_group] = spec.create_empty_action(
self._env_state[agent_group].n_agents()
)
try:
index = np.where(self._env_state[agent_group].agent_id == agent_id)[0][0]
except IndexError as ie:
raise IndexError(
"agent_id {} is did not request a decision at the previous step".format(
agent_id
)
) from ie
self._env_actions[agent_group][index] = action
def get_step_result(self, agent_group: AgentGroup) -> BatchedStepResult:
self._assert_group_exists(agent_group)
return self._env_state[agent_group]
def get_agent_group_spec(self, agent_group: AgentGroup) -> AgentGroupSpec:
self._assert_group_exists(agent_group)
return self._env_specs[agent_group]
def close(self):
"""
Sends a shutdown signal to the unity environment, and closes the socket connection.
"""
if self._loaded:
self._close()
else:
raise UnityEnvironmentException("No Unity environment is loaded.")
def _close(self):
self._loaded = False
self.communicator.close()
if self.proc1 is not None:
# Wait a bit for the process to shutdown, but kill it if it takes too long
try:
self.proc1.wait(timeout=self.timeout_wait)
signal_name = self.returncode_to_signal_name(self.proc1.returncode)
signal_name = f" ({signal_name})" if signal_name else ""
return_info = f"Environment shut down with return code {self.proc1.returncode}{signal_name}."
logger.info(return_info)
except subprocess.TimeoutExpired:
logger.info("Environment timed out shutting down. Killing...")
self.proc1.kill()
# Set to None so we don't try to close multiple times.
self.proc1 = None
@classmethod
def _flatten(cls, arr: Any) -> List[float]:
"""
Converts arrays to list.
:param arr: numpy vector.
:return: flattened list.
"""
if isinstance(arr, cls.SCALAR_ACTION_TYPES):
arr = [float(arr)]
if isinstance(arr, np.ndarray):
arr = arr.tolist()
if len(arr) == 0:
return arr
if isinstance(arr[0], np.ndarray):
# pylint: disable=no-member
arr = [item for sublist in arr for item in sublist.tolist()]
if isinstance(arr[0], list):
# pylint: disable=not-an-iterable
arr = [item for sublist in arr for item in sublist]
arr = [float(x) for x in arr]
return arr
@staticmethod
def _parse_side_channel_message(
side_channels: Dict[int, SideChannel], data: bytes
) -> None:
offset = 0
while offset < len(data):
try:
channel_type, message_len = struct.unpack_from("<ii", data, offset)
offset = offset + 8
message_data = data[offset : offset + message_len]
offset = offset + message_len
except Exception:
raise UnityEnvironmentException(
"There was a problem reading a message in a SideChannel. "
"Please make sure the version of MLAgents in Unity is "
"compatible with the Python version."
)
if len(message_data) != message_len:
raise UnityEnvironmentException(
"The message received by the side channel {0} was "
"unexpectedly short. Make sure your Unity Environment "
"sending side channel data properly.".format(channel_type)
)
if channel_type in side_channels:
side_channels[channel_type].on_message_received(message_data)
else:
logger.warning(
"Unknown side channel data received. Channel type "
": {0}.".format(channel_type)
)
@staticmethod
def _generate_side_channel_data(side_channels: Dict[int, SideChannel]) -> bytearray:
result = bytearray()
for channel_type, channel in side_channels.items():
for message in channel.message_queue:
result += struct.pack("<ii", channel_type, len(message))
result += message
channel.message_queue = []
return result
@timed
def _generate_step_input(
self, vector_action: Dict[str, np.ndarray]
) -> UnityInputProto:
rl_in = UnityRLInputProto()
for b in vector_action:
n_agents = self._env_state[b].n_agents()
if n_agents == 0:
continue
for i in range(n_agents):
action = AgentActionProto(vector_actions=vector_action[b][i])
rl_in.agent_actions[b].value.extend([action])
rl_in.command = STEP
rl_in.side_channel = bytes(self._generate_side_channel_data(self.side_channels))
return self.wrap_unity_input(rl_in)
def _generate_reset_input(self) -> UnityInputProto:
rl_in = UnityRLInputProto()
rl_in.command = RESET
rl_in.side_channel = bytes(self._generate_side_channel_data(self.side_channels))
return self.wrap_unity_input(rl_in)
def send_academy_parameters(
self, init_parameters: UnityRLInitializationInputProto
) -> UnityOutputProto:
inputs = UnityInputProto()
inputs.rl_initialization_input.CopyFrom(init_parameters)
return self.communicator.initialize(inputs)
@staticmethod
def wrap_unity_input(rl_input: UnityRLInputProto) -> UnityInputProto:
result = UnityInputProto()
result.rl_input.CopyFrom(rl_input)
return result
@staticmethod
def returncode_to_signal_name(returncode: int) -> Optional[str]:
"""
Try to convert return codes into their corresponding signal name.
E.g. returncode_to_signal_name(-2) -> "SIGINT"
"""
try:
# A negative value -N indicates that the child was terminated by signal N (POSIX only).
s = signal.Signals(-returncode) # pylint: disable=no-member
return s.name
except Exception:
# Should generally be a ValueError, but catch everything just in case.
return None
|
py | 1a51a000351c44c27b4fc3765e1515c6e5467cc7 | #! /usr/bin/env python2
#
# This file is part of khmer, http://github.com/ged-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see doc/LICENSE.txt.
# Contact: [email protected]
#
# pylint: disable=invalid-name,missing-docstring
"""
Take two files containing left & right reads from a paired-end sequencing run,
and interleave them.
% scripts/interleave-reads.py <R1> <R2> [ -o <outputfile> ]
By default, output is sent to stdout; or use -o. Use '-h' for parameter help.
"""
# TODO: take fa as well?
# support gzip option?
import screed
import sys
import itertools
import os
import textwrap
import argparse
import khmer
from khmer.kfile import check_file_status, check_space
from khmer.khmer_args import info
from khmer.utils import (write_record_pair, check_is_left, check_is_right,
check_is_pair)
def get_parser():
epilog = """
The output is an interleaved set of reads, with each read in <R1> paired
with a read in <R2>. By default, the output goes to stdout unless
:option:`-o`/:option:`--output` is specified.
As a "bonus", this file ensures that if read names are not already
formatted properly, they are reformatted consistently, such that
they look like the pre-1.8 Casava format (@name/1, @name/2).
Example::
""" " interleave-reads.py tests/test-data/paired.fq.1 tests/test-data/paired.fq.2 -o paired.fq" # noqa
parser = argparse.ArgumentParser(
description='Produce interleaved files from R1/R2 paired files',
epilog=textwrap.dedent(epilog),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infiles', nargs='+')
parser.add_argument('-o', '--output', metavar="filename",
type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('--version', action='version', version='%(prog)s '
+ khmer.__version__)
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
return parser
def main():
info('interleave-reads.py')
args = get_parser().parse_args()
for _ in args.infiles:
check_file_status(_, args.force)
check_space(args.infiles, args.force)
s1_file = args.infiles[0]
if len(args.infiles) == 2:
s2_file = args.infiles[1]
else:
s2_file = s1_file.replace('_R1_', '_R2_')
if s1_file == s2_file:
print >>sys.stderr, ("ERROR: given only one filename, that "
"doesn't contain _R1_. Exiting.")
sys.exit(1)
print >> sys.stderr, ("given only one file; "
"guessing that R2 file is %s" % s2_file)
fail = False
if not os.path.exists(s1_file):
print >> sys.stderr, "Error! R1 file %s does not exist" % s1_file
fail = True
if not os.path.exists(s2_file):
print >> sys.stderr, "Error! R2 file %s does not exist" % s2_file
fail = True
if fail and not args.force:
sys.exit(1)
print >> sys.stderr, "Interleaving:\n\t%s\n\t%s" % (s1_file, s2_file)
counter = 0
screed_iter_1 = screed.open(s1_file, parse_description=False)
screed_iter_2 = screed.open(s2_file, parse_description=False)
for read1, read2 in itertools.izip_longest(screed_iter_1, screed_iter_2):
if read1 is None or read2 is None:
print >>sys.stderr, ("ERROR: Input files contain different number"
" of records.")
sys.exit(1)
if counter % 100000 == 0:
print >> sys.stderr, '...', counter, 'pairs'
counter += 1
name1 = read1.name
if not check_is_left(name1):
name1 += '/1'
name2 = read2.name
if not check_is_right(name2):
name2 += '/2'
read1.name = name1
read2.name = name2
if not check_is_pair(read1, read2):
print >>sys.stderr, "ERROR: This doesn't look like paired data! " \
"%s %s" % (read1.name, read2.name)
sys.exit(1)
write_record_pair(read1, read2, args.output)
print >> sys.stderr, 'final: interleaved %d pairs' % counter
print >> sys.stderr, 'output written to', args.output.name
if __name__ == '__main__':
main()
|
py | 1a51a0c2773fdb31140c0d508300a2f759b83b46 | # -*- coding: utf-8 -*-
"""
Project: Appraise evaluation system
Author: Christian Federmann <[email protected]>
"""
import logging
import uuid
from datetime import datetime
from xml.etree.ElementTree import fromstring, ParseError, tostring
from django.dispatch import receiver
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from django.core.validators import RegexValidator
from django.db import models
from django.template import Context
from django.template.loader import get_template
from appraise.wmt16.validators import validate_hit_xml, validate_segment_xml
from appraise.settings import LOG_LEVEL, LOG_HANDLER
from appraise.utils import datetime_to_seconds, AnnotationTask
# Setup logging support.
logging.basicConfig(level=LOG_LEVEL)
LOGGER = logging.getLogger('appraise.wmt16.models')
LOGGER.addHandler(LOG_HANDLER)
# How many users can annotate a given HIT
MAX_USERS_PER_HIT = 1
LANGUAGE_PAIR_CHOICES = (
# News task languages
('eng2ces', 'English → Czech'),
('eng2deu', 'English → German'),
('eng2fin', 'English → Finnish'),
('eng2rom', 'English → Romanian'),
('eng2rus', 'English → Russian'),
('eng2trk', 'English → Turkish'),
('ces2eng', 'Czech → English'),
('deu2eng', 'German → English'),
('fin2eng', 'Finnish → English'),
('rom2eng', 'Romanian → English'),
('rus2eng', 'Russian → English'),
('trk2eng', 'Turkish → English'),
# IT task languages
('eng2bul', 'English → Bulgarian'),
('eng2esn', 'English → Spanish'),
('eng2baq', 'English → Basque'),
('eng2nld', 'English → Dutch'),
('eng2ptb', 'English → Portguese'),
)
ISO639_3_TO_NAME_MAPPING = {
'ces': 'Czech', 'cze': 'Czech', 'deu': 'German', 'ger': 'German',
'eng': 'English', 'esn': 'Spanish', 'spa': 'Spanish', 'fra': 'French',
'fre': 'French', 'rus': 'Russian', 'fin': 'Finnish', 'rom': 'Romanian',
'ron': 'Romanian', 'trk': 'Turkish', 'tur': 'Turkish', 'eus': 'Basque',
'baq': 'Basque', 'bul': 'Bulgarian', 'nld': 'Dutch', 'ptb': 'Portguese',
}
GROUP_HIT_REQUIREMENTS = {
# volunteers
'MSR': 0,
'MTMA': 0,
# NewsTask participants
'Aalto': 100,
'Abu-Matran': 300,
'AFRL-MITLL': 400,
'AMU-UEDIN': 200,
'CMU': 100,
'CUNI': 500,
'JHU': 1600,
'KIT': 300,
'KIT-LIMSI': 100,
'LIMSI': 300,
'LMU-CUNI': 100,
'METAMIND': 100,
'TBTK': 200,
'Cambridge': 100,
'NRC': 100,
'NYU-UMontreal': 400,
'PJATK': 200,
'PROMT': 800, # 500 NewsTask, 300 ITTask
'QT21/HimL': 100,
'RWTH': 100,
'UEdin': 1900,
'UH': 400,
'USFD': 100,
'UUT': 100,
'YSDA': 200,
'JXNU-IIP': 100,
'UPF': 100,
'ParFDA': 200,
# ITTask participants
'Berlin': 600,
'Hamburg': 200,
'Prague': 400,
'Amsterdam': 200,
'Saarbrücken': 100,
'Groningen': 300,
'Sofia': 200,
'Donostia': 500,
'Lisbon': 300,
}
# pylint: disable-msg=E1101
class HIT(models.Model):
"""
HIT object model for wmt16 ranking evaluation.
Each HIT contains 3 RankingTask instances for 3 consecutive sentences.
"""
hit_id = models.CharField(
max_length=8,
db_index=True,
unique=True,
editable=False,
help_text="Unique identifier for this HIT instance.",
verbose_name="HIT identifier"
)
block_id = models.IntegerField(
db_index=True,
help_text="Block ID for this HIT instance.",
verbose_name="HIT block identifier"
)
hit_xml = models.TextField(
help_text="XML source for this HIT instance.",
validators=[validate_hit_xml],
verbose_name="HIT source XML"
)
language_pair = models.CharField(
max_length=7,
choices=LANGUAGE_PAIR_CHOICES,
db_index=True,
help_text="Language pair choice for this HIT instance.",
verbose_name="Language pair"
)
# This is derived from hit_xml and NOT stored in the database.
hit_attributes = {}
users = models.ManyToManyField(
User,
blank=True,
db_index=True,
null=True,
help_text="Users who work on this HIT instance."
)
active = models.BooleanField(
db_index=True,
default=True,
help_text="Indicates that this HIT instance is still in use.",
verbose_name="Active?"
)
mturk_only = models.BooleanField(
db_index=True,
default=False,
help_text="Indicates that this HIT instance is ONLY usable via MTurk.",
verbose_name="MTurk only?"
)
completed = models.BooleanField(
db_index=True,
default=False,
help_text="Indicates that this HIT instance is completed.",
verbose_name="Completed?"
)
assigned = models.DateTimeField(blank=True, null=True, editable=False)
finished = models.DateTimeField(blank=True, null=True, editable=False)
class Meta:
"""
Metadata options for the HIT object model.
"""
ordering = ('id', 'hit_id', 'language_pair', 'block_id')
verbose_name = "HIT instance"
verbose_name_plural = "HIT instances"
# pylint: disable-msg=E1002
def __init__(self, *args, **kwargs):
"""
Makes sure that self.hit_attributes are available.
"""
super(HIT, self).__init__(*args, **kwargs)
if not self.hit_id:
self.hit_id = self.__class__._create_hit_id()
# If a hit_xml file is available, populate self.hit_attributes.
self.reload_dynamic_fields()
def __unicode__(self):
"""
Returns a Unicode String for this HIT object.
"""
return u'<HIT id="{0}" hit="{1}" block="{2}" language-pair="{3}">' \
.format(self.id, self.hit_id, self.block_id, self.language_pair)
@classmethod
def _create_hit_id(cls):
"""Creates a random UUID-4 8-digit hex number for use as HIT id."""
new_id = uuid.uuid4().hex[:8]
while cls.objects.filter(hit_id=new_id):
new_id = uuid.uuid4().hex[:8]
return new_id
@classmethod
def compute_remaining_hits(cls, language_pair=None):
"""
Computes the number of remaining HITs in the database.
If language_pair is given, it constraints on the HITs' language pair.
"""
hits_qs = cls.objects.filter(active=True, mturk_only=False, completed=False)
if language_pair:
hits_qs = hits_qs.filter(language_pair=language_pair)
available = 0
for hit in hits_qs:
# Before we checked if `hit.users.count() < 3`.
if hit.users.count() < MAX_USERS_PER_HIT:
available = available + 1
# Set active HITs to completed if there exists at least one result.
else:
hit.completed = True
hit.save()
return available
@classmethod
def compute_status_for_user(cls, user, project=None, language_pair=None):
"""
Computes the HIT completion status for the given user.
If project is given, it constraints on the HITs' project.
If language_pair is given, it constraints on the HITs' language pair.
Returns a list containing:
- number of completed HITs;
- average duration per HIT in seconds;
- total duration in seconds.
"""
hits_qs = cls.objects.filter(users=user)
if project:
project_instance = Project.objects.filter(id=project.id)
if project_instance.exists():
hits_qs = hits_qs.filter(project=project_instance[0])
else:
return [0, 0, 0]
if language_pair:
hits_qs = hits_qs.filter(language_pair=language_pair)
_completed_hits = hits_qs.count()
_durations = []
for hit in hits_qs:
_results = RankingResult.objects.filter(user=user, item__hit=hit)
_durations.extend(_results.values_list('duration', flat=True))
_durations = [datetime_to_seconds(d) for d in _durations if d]
_total_duration = sum(_durations)
_average_duration = _total_duration / float(_completed_hits or 1)
current_status = []
current_status.append(_completed_hits)
current_status.append(_average_duration)
current_status.append(_total_duration)
return current_status
@classmethod
def compute_status_for_group(cls, group, project=None, language_pair=None):
"""
Computes the HIT completion status for users of the given group.
"""
combined = [0, 0, 0]
for user in group.user_set.all():
_user_status = cls.compute_status_for_user(user, project, language_pair)
combined[0] = combined[0] + _user_status[0]
combined[1] = combined[1] + _user_status[1]
combined[2] = combined[2] + _user_status[2]
combined[1] = combined[2] / float(combined[0] or 1)
return combined
# pylint: disable-msg=E1002
def save(self, *args, **kwargs):
"""
Makes sure that validation is run before saving an object instance.
"""
# Enforce validation before saving HIT objects.
if not self.id:
self.full_clean()
# We have to call save() here to get an id for this instance.
super(HIT, self).save(*args, **kwargs)
_tree = fromstring(self.hit_xml.encode("utf-8"))
for _child in _tree:
new_item = RankingTask(hit=self, item_xml=tostring(_child))
new_item.save()
# Check ranking tasks to update
try:
related_result = RankingResult.objects.filter(item__hit=self).latest('completion')
self.finished = related_result.completion
except RankingResult.DoesNotExist:
pass
super(HIT, self).save(*args, **kwargs)
def get_absolute_url(self):
"""
Returns the URL for this HIT object instance.
"""
hit_handler_view = 'appraise.wmt16.views.hit_handler'
kwargs = {'hit_id': self.hit_id}
return reverse(hit_handler_view, kwargs=kwargs)
def get_status_url(self):
"""
Returns the status URL for this HIT object instance.
"""
status_handler_view = 'appraise.wmt16.views.status_view'
kwargs = {'hit_id': self.hit_id}
return reverse(status_handler_view, kwargs=kwargs)
def reload_dynamic_fields(self):
"""
Reloads hit_attributes from self.hit_xml contents.
"""
# If a hit_xml file is available, populate self.hit_attributes.
if self.hit_xml:
try:
_hit_xml = fromstring(self.hit_xml.encode("utf-8"))
self.hit_attributes = {}
for key, value in _hit_xml.attrib.items():
self.hit_attributes[key] = value
# For parse errors, set self.hit_attributes s.t. it gives an
# error message to the user for debugging.
except (ParseError), msg:
self.hit_attributes = {'note': msg}
def export_to_xml(self):
"""
Renders this HIT as XML String.
"""
template = get_template('wmt16/task_result.xml')
# If a hit_xml file is available, populate self.hit_attributes.
self.reload_dynamic_fields()
_attr = self.hit_attributes.items()
attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in _attr])
results = []
for item in RankingTask.objects.filter(hit=self):
item.reload_dynamic_fields()
try:
source_id = item.source[1]["id"]
except:
source_id = -1
_results = []
for _result in item.rankingresult_set.all():
_results.append(_result.export_to_xml())
results.append((source_id, _results))
context = {'hit_id': self.hit_id, 'attributes': attributes,
'results': results}
return template.render(Context(context))
def export_to_apf(self):
"""
Exports this HIT's results to Artstein and Poesio (2007) format.
"""
results = []
for item in RankingTask.objects.filter(hit=self):
for _result in item.rankingresult_set.all():
_apf_output = _result.export_to_apf()
if _apf_output:
results.append(_apf_output)
return u"\n".join(results)
def compute_agreement_scores(self):
"""
Computes alpha, kappa, pi and Bennett's S agreement scores using NLTK.
"""
_raw = self.export_to_apf()
if not _raw:
return None
else:
_raw = _raw.split('\n')
# Convert raw results data into data triples and create a new
# AnnotationTask object for computation of agreement scores.
_data = [_line.split(',') for _line in _raw]
try:
_data = [(x[0], x[1], x[2]) for x in _data]
except IndexError:
return None
# Compute alpha, kappa, pi, and S scores.
_task = AnnotationTask(data=_data)
try:
_alpha = _task.alpha()
_kappa = _task.kappa()
_pi = _task.pi()
# pylint: disable-msg=C0103
_S = _task.S()
except ZeroDivisionError, msg:
LOGGER.debug(msg)
return None
return (_alpha, _kappa, _pi, _S)
class Project(models.Model):
"""
Defines object model for an annotation project
"""
# Project names are string-based and should match regex [a-zA-Z0-9\-]{1,100}
name = models.CharField(
blank=False,
db_index=True,
max_length=100,
null=False,
unique=True,
validators=[RegexValidator(regex=r'[a-zA-Z0-9\-]{1,100}')],
)
# Users working on this project
users = models.ManyToManyField(
User,
blank=True,
db_index=True,
null=True,
)
# HITs belonging to this project
HITs = models.ManyToManyField(
HIT,
blank=True,
db_index=True,
null=True,
)
def __str__(self):
return '<project id="{0}" name="{1}" users="{2}" HITs="{3}" />'.format(self.id, self.name, self.users.count(), self.HITs.count())
class RankingTask(models.Model):
"""
RankingTask object model for wmt16 ranking evaluation.
"""
hit = models.ForeignKey(
HIT,
db_index=True
)
item_xml = models.TextField(
help_text="XML source for this RankingTask instance.",
validators=[validate_segment_xml],
verbose_name="RankingTask source XML"
)
# These fields are derived from item_xml and NOT stored in the database.
attributes = None
source = None
reference = None
translations = None
class Meta:
"""
Metadata options for the RankingTask object model.
"""
ordering = ('id',)
verbose_name = "RankingTask instance"
verbose_name_plural = "RankingTask instances"
# pylint: disable-msg=E1002
def __init__(self, *args, **kwargs):
"""
Makes sure that self.translations are available.
"""
super(RankingTask, self).__init__(*args, **kwargs)
# If item_xml is available, populate dynamic fields.
self.reload_dynamic_fields()
def __unicode__(self):
"""
Returns a Unicode String for this RankingTask object.
"""
return u'<ranking-task id="{0}">'.format(self.id)
# pylint: disable-msg=E1002
def save(self, *args, **kwargs):
"""
Makes sure that validation is run before saving an object instance.
"""
# Enforce validation before saving RankingTask objects.
self.full_clean()
super(RankingTask, self).save(*args, **kwargs)
def reload_dynamic_fields(self):
"""
Reloads source, reference, and translations from self.item_xml.
"""
if self.item_xml:
try:
_item_xml = fromstring(self.item_xml)
self.attributes = _item_xml.attrib
_source = _item_xml.find('source')
if _source is not None:
self.source = (_source.text, _source.attrib)
_reference = _item_xml.find('reference')
if _reference is not None:
self.reference = (_reference.text, _reference.attrib)
self.translations = []
for _translation in _item_xml.iterfind('translation'):
self.translations.append((_translation.text,
_translation.attrib))
except ParseError:
self.source = None
self.reference = None
self.translations = None
class RankingResult(models.Model):
"""
Evaluation Result object model.
"""
item = models.ForeignKey(
RankingTask,
db_index=True
)
user = models.ForeignKey(
User,
db_index=True
)
duration = models.TimeField(blank=True, null=True, editable=False)
completion = models.DateTimeField(auto_now_add=True, blank=True, null=True, editable=False)
def readable_duration(self):
"""
Returns a readable version of the this RankingResult's duration.
"""
return '{}'.format(self.duration)
raw_result = models.TextField(editable=False, blank=False)
results = None
systems = 0
class Meta:
"""
Metadata options for the RankingResult object model.
"""
ordering = ('id',)
verbose_name = "RankingResult object"
verbose_name_plural = "RankingResult objects"
# pylint: disable-msg=E1002
def __init__(self, *args, **kwargs):
"""
Makes sure that self.results are available.
"""
super(RankingResult, self).__init__(*args, **kwargs)
# If raw_result is available, populate dynamic field.
self.reload_dynamic_fields()
def __unicode__(self):
"""
Returns a Unicode String for this RankingResult object.
"""
return u'<ranking-result id="{0}">'.format(self.id)
def reload_dynamic_fields(self):
"""
Reloads source, reference, and translations from self.item_xml.
"""
if self.raw_result and self.raw_result != 'SKIPPED':
try:
self.results = self.raw_result.split(',')
self.results = [int(x) for x in self.results]
self.systems = sum([len(x[1]['system'].split(',')) for x in self.item.translations])
# pylint: disable-msg=W0703
except Exception, msg:
self.results = msg
def export_to_xml(self):
"""
Renders this RankingResult as XML String.
"""
return self.export_to_ranking_xml()
def export_to_ranking_xml(self):
"""
Renders this RankingResult as Ranking XML String.
"""
template = get_template('wmt16/ranking_result.xml')
_attr = self.item.attributes.items()
attributes = ' '.join(['{}="{}"'.format(k, v) for k, v in _attr])
skipped = self.results is None
translations = []
if not skipped:
for index, translation in enumerate(self.item.translations):
_items = translation[1].items()
_attr = ' '.join(['{}="{}"'.format(k, v) for k, v in _items])
_rank = self.results[index]
translations.append((_attr, _rank))
context = {
'attributes': attributes,
'duration': '{}'.format(self.duration),
'skipped': skipped,
'translations': translations,
'user': self.user,
}
return template.render(Context(context))
def export_to_pairwise_csv(self):
"""
Renders this RankingResult as pairwise CSV String.
Format:
srclang,trglang,srcIndex,segmentId,judgeID,system1Id,system1rank,system2Id,system2rank,rankingID
"""
skipped = self.results is None
if skipped:
return None
try:
srcIndex = self.item.source[1]["id"]
except:
srcIndex = -1
_src_lang = self.item.hit.hit_attributes['source-language']
_trg_lang = self.item.hit.hit_attributes['target-language']
csv_data = []
csv_data.append(ISO639_3_TO_NAME_MAPPING[_src_lang]) # srclang
csv_data.append(ISO639_3_TO_NAME_MAPPING[_trg_lang]) # trglang
csv_data.append(srcIndex) # srcIndex
csv_data.append(srcIndex) # segmentId
csv_data.append(self.user.username) # judgeID
base_values = csv_data
systems = set()
for index, translation in enumerate(self.item.translations):
name = translation[1]['system'].replace(',', '+')
rank = self.results[index]
systems.add((name, rank))
csv_output = []
from itertools import combinations
for (sysA, sysB) in combinations(systems, 2):
# Compute all systems in sysA, sysB which can be multi systems
expandedA = sysA[0].split('+')
expandedB = sysB[0].split('+')
# Pairwise comparisons without intra-multi-system pairs
for singleA in expandedA:
for singleB in expandedB:
csv_local = []
csv_local.extend(base_values)
csv_local.append(singleA) # system1Id
csv_local.append(str(sysA[1])) # system1rank
csv_local.append(singleB) # system2Id
csv_local.append(str(sysB[1])) # system2rank
csv_local.append(str(self.item.id)) # rankingID
csv_joint = u",".join(csv_local)
if not csv_joint in csv_output:
csv_output.append(csv_joint)
# Intra-multi-system pairs, sharing the same rank
# We'll only add these once to prevent duplicate entries
if len(expandedA) > 1:
for (singleA1, singleA2) in combinations(expandedA, 2):
csv_local = []
csv_local.extend(base_values)
csv_local.append(singleA1) # system1Id
csv_local.append(str(sysA[1])) # system1rank
csv_local.append(singleA2) # system2Id
csv_local.append(str(sysA[1])) # system2rank
csv_local.append(str(self.item.id)) # rankingID
csv_joint = u",".join(csv_local)
if not csv_joint in csv_output:
csv_output.append(csv_joint)
# Intra-multi-system pairs, sharing the same rank
# We'll only add these once to prevent duplicate entries
if len(expandedB) > 1:
for (singleB1, singleB2) in combinations(expandedB, 2):
csv_local = []
csv_local.extend(base_values)
csv_local.append(singleB1) # system1Id
csv_local.append(str(sysB[1])) # system1rank
csv_local.append(singleB2) # system2Id
csv_local.append(str(sysB[1])) # system2rank
csv_local.append(str(self.item.id)) # rankingID
csv_joint = u",".join(csv_local)
if not csv_joint in csv_output:
csv_output.append(csv_joint)
return u"\n".join(csv_output)
def export_to_ranking_csv(self):
"""
Renders this RankingResult as Ranking CSV String.
Format:
ID,srcLang,tgtLang,user,duration,rank_1,word_count_1,rank_2,word_count_2,rank_3,word_count_3,rank_4,word_count_5,rank_1,word_count_5
"""
# TODO: this needs to be cleaned up...
# We'd like to have a minimal version of the ranking CSV output.
# Not sure why this one generates ranks and word counts... :)
raise NotImplementedError("not ready yet")
ranking_csv_data = []
try:
ranking_csv_data.append(self.item.source[1]["id"])
except:
ranking_csv_data.append(-1)
_src_lang = self.item.hit.hit_attributes['source-language']
_trg_lang = self.item.hit.hit_attributes['target-language']
ranking_csv_data.append(ISO639_3_TO_NAME_MAPPING[_src_lang]) # srclang
ranking_csv_data.append(ISO639_3_TO_NAME_MAPPING[_trg_lang]) # trglang
ranking_csv_data.append(self.user.username)
ranking_csv_data.append(str(datetime_to_seconds(self.duration)))
skipped = self.results is None
translations = []
if not skipped:
for index, translation in enumerate(self.item.translations):
_word_count = len(translation[0].split())
_rank = self.results[index]
translations.append((_rank, _word_count))
for rank, word_count in translations:
ranking_csv_data.append(str(rank))
ranking_csv_data.append(str(word_count))
return u",".join(ranking_csv_data)
def export_to_csv(self, expand_multi_systems=False):
"""
Exports this RankingResult in CSV format.
"""
item = self.item
hit = self.item.hit
values = []
_src_lang = hit.hit_attributes['source-language']
_trg_lang = hit.hit_attributes['target-language']
# TODO: this relies on the fact that we have five systems per HIT.
# To resolve this, we might have to skip systems detection based
# on the HIT attribute and instead process the translations.
#
# System ids can be retrieved from HIT or segment level.
#
# We cannot do this anymore as we might have multi-systems.
#if 'systems' in hit.hit_attributes.keys():
# _systems = hit.hit_attributes['systems'].split(',')
# See below for a potential implementation to address multi-systems.
#
# On segment level, we have to extract the individual "system" values
# from the <translation> attributes which are stored in the second
# position of the translation tuple: (text, attrib).
_systems = []
for translation in item.translations:
_systems.append(translation[1]['system'])
# Note that srcIndex and segmentId are 1-indexed for compatibility
# with evaluation scripts from previous editions of the WMT.
values.append(ISO639_3_TO_NAME_MAPPING[_src_lang]) # srclang
values.append(ISO639_3_TO_NAME_MAPPING[_trg_lang]) # trglang
values.append(item.source[1]['id']) # srcIndex
values.append('-1') # documentId
values.append(item.source[1]['id']) # segmentId (= srcIndex)
values.append(self.user.username) # judgeId
# Save current data values as we might have to write them out
# several times when multi-systems trigger multiple results...
base_values = values
# Don't fail for skipped items
if not self.results:
self.results = [-1] * len(_systems)
_system_names = []
_system_ranks = []
for _result_index, _system in enumerate(_systems):
if expand_multi_systems:
_local_systems = _system.split(',')
_local_results = [str(self.results[_result_index])] * len(_local_systems)
_system_names.extend(_local_systems)
_system_ranks.extend(_local_results)
else:
_system_names.append(_system.replace(',', '+'))
_system_ranks.append(str(self.results[_result_index]))
# Check if we need to add placeholder systems to pad to 5*k systems.
# This is needed as our export format expects five systems per line.
if len(_system_names) % 5 > 0:
_missing_systems = 5 - len(_system_names) % 5
for x in range(_missing_systems):
_system_names.append('PLACEHOLDER')
_system_ranks.append('-1')
all_values = []
for _base_index in range(len(_system_names))[::5]:
current_values = list(base_values)
current_ranks = []
for _current_index in range(len(_system_names))[_base_index:_base_index+5]:
current_values.append('-1')
current_values.append(str(_system_names[_current_index]))
current_ranks.append(_system_ranks[_current_index])
current_values.extend(current_ranks)
all_values.append(u",".join(current_values))
# This does not work anymore as we face multi-systems.
#
#values.append('-1') # system1Number
#values.append(str(_systems[0])) # system1Id
#values.append('-1') # system2Number
#values.append(str(_systems[1])) # system2Id
#values.append('-1') # system3Number
#values.append(str(_systems[2])) # system3Id
#values.append('-1') # system4Number
#values.append(str(_systems[3])) # system4Id
#values.append('-1') # system5Number
#values.append(str(_systems[4])) # system5Id
#
# TODO: decide what happens in case of k>5 systems due to
# multi-systems. Can we simply add annother CSV line and
# add the extra system rankings? If so, we should define
# a "dummy" system to make sure we don't break CSV format.
#
# Specifying a value of -1 for system rank should work...
#
# system1rank,system2rank,system3rank,system4rank,system5rank
#if self.results:
# values.extend([str(x) for x in self.results])
#else:
# values.extend(['-1'] * 5)
return u"\n".join(all_values)
# pylint: disable-msg=C0103
def export_to_apf(self):
"""
Exports this RankingResult to Artstein and Poesio (2007) format.
"""
if not self.results:
return None
item = self.item
hit = self.item.hit
_systems = []
# System ids can be retrieved from HIT or segment level.
#
# We cannot do this anymore as we might have multi-systems.
# if 'systems' in hit.hit_attributes.keys():
# _systems = hit.hit_attributes['systems'].split(',')
# On segment level, we have to extract the individual "system" values
# from the <translation> attributes which are stored in the second
# position of the translation tuple: (text, attrib).
for translation in item.translations:
_systems.append(translation[1]['system'])
from itertools import combinations, product
results = []
# TODO: this relies on the fact that we have five systems per HIT.
# To resolve this, we might have to skip systems detection based
# on the HIT attribute and instead process the translations.
#
# An additional problem is that we might have multi-systems.
# These occur when two systems had the same translation output
# during batch creation. Such cases will spawn additional
# result items when multi-systems get expanded into individual
# units. This may happen for both sides, e.g., systems A, B.
#
# Note that srcIndex is 1-indexed for compatibility with evaluation
# scripts from previous editions of the WMT.
for a, b in combinations(range(5), 2):
_c = self.user.username
_i = '{0}.{1}.{2}'.format(item.source[1]['id'], a+1, b+1)
# Determine individual systems for multi-system entries.
_individualA = _systems[a].split(',')
_individualB = _systems[b].split(',')
for _systemA, _systemB in product(_individualA, _individualB):
_verdict = '?'
if self.results[a] > self.results[b]:
_verdict = '>'
elif self.results[a] < self.results[b]:
_verdict = '<'
elif self.results[a] == self.results[b]:
_verdict = '='
_v = '{0}{1}{2}'.format(str(_systemA), _verdict, str(_systemB))
results.append('{0},{1},{2}'.format(_c, _i, _v))
return u'\n'.join(results)
@receiver(models.signals.post_save, sender=RankingResult)
def update_user_hit_mappings(sender, instance, created, **kwargs):
"""
Updates the User/Project/HIT mappings.
"""
hit = instance.item.hit
user = instance.user
results = RankingResult.objects.filter(user=user, item__hit=hit)
if len(results) > 2:
from appraise.wmt16.views import _compute_next_task_for_user
LOGGER.debug('Deleting stale User/HIT mapping {0}->{1}'.format(
user, hit))
hit.users.add(user)
for project in hit.project_set.all():
UserHITMapping.objects.filter(user=user, project=project, hit=hit).delete()
_compute_next_task_for_user(user, project, hit.language_pair)
@receiver(models.signals.post_delete, sender=RankingResult)
def remove_user_from_hit(sender, instance, **kwargs):
"""
Removes user from list of users who have completed corresponding HIT.
"""
user = instance.user
try:
hit = instance.item.hit
LOGGER.debug('Removing user "{0}" from HIT {1}'.format(user, hit))
hit.users.remove(user)
from appraise.wmt16.views import _compute_next_task_for_user
_compute_next_task_for_user(user, hit.project, hit.language_pair)
except (HIT.DoesNotExist, RankingTask.DoesNotExist):
pass
# pylint: disable-msg=E1101
class UserHITMapping(models.Model):
"""
Object model mapping users to their current HIT instances.
"""
user = models.ForeignKey(
User,
db_index=True
)
project = models.ForeignKey(
Project,
db_index=True
)
hit = models.ForeignKey(
HIT,
db_index=True
)
class Meta:
"""
Metadata options for the UserHITMapping object model.
"""
verbose_name = "User/Project/HIT mapping instance"
verbose_name_plural = "User/Project/HIT mapping instances"
def __unicode__(self):
"""
Returns a Unicode String for this UserHITMapping object.
"""
return u'<hitmap id="{0}" user="{1}" project="{2}" hit="{3}">'.format(self.id,
self.user.username, self.project.name, self.hit.hit_id)
# pylint: disable-msg=E1002
def save(self, *args, **kwargs):
"""
Makes sure that HIT's assigned field is updated.
"""
self.hit.assigned = datetime.now()
self.hit.save()
super(UserHITMapping, self).save(*args, **kwargs)
# pylint: disable-msg=E1101
class UserInviteToken(models.Model):
"""
User invite tokens allowing to register an account.
"""
group = models.ForeignKey(
Group,
db_index=True
)
token = models.CharField(
max_length=8,
db_index=True,
default=lambda: UserInviteToken._create_token(),
unique=True,
help_text="Unique invite token",
verbose_name="Invite token"
)
active = models.BooleanField(
db_index=True,
default=True,
help_text="Indicates that this invite can still be used.",
verbose_name="Active?"
)
class Meta:
"""
Metadata options for the UserInviteToken object model.
"""
verbose_name = "User invite token"
verbose_name_plural = "User invite tokens"
# pylint: disable-msg=E1002
def __init__(self, *args, **kwargs):
"""
Makes sure that self.token is properly set up.
"""
super(UserInviteToken, self).__init__(*args, **kwargs)
if not self.token:
self.token = self.__class__._create_token()
def __unicode__(self):
"""
Returns a Unicode String for this UserInviteToken object.
"""
return u'<user-invite id="{0}" token="{1}" active="{2}">'.format(
self.id, self.token, self.active)
@classmethod
def _create_token(cls):
"""Creates a random UUID-4 8-digit hex number for use as a token."""
new_token = uuid.uuid4().hex[:8]
while cls.objects.filter(token=new_token):
new_token = uuid.uuid4().hex[:8]
return new_token
class TimedKeyValueData(models.Model):
"""
Stores a simple (key, value) pair.
"""
key = models.CharField(max_length=100, blank=False, null=False)
value = models.TextField(blank=False, null=False)
date_and_time = models.DateTimeField(blank=False, null=False, editable=False, auto_now_add=True)
@classmethod
def update_status_if_changed(cls, key, new_value):
"""
Stores a new TimedKeyValueData instance if value for key has changed
"""
_latest_values = cls.objects.filter(key=key).order_by('date_and_time').reverse().values_list('value', flat=True)
if not _latest_values or _latest_values[0] != new_value:
new_data = cls(key=key, value=new_value)
new_data.save()
def initialize_database():
"""
Initializes database with required language code and WMT16 groups
"""
researcher_group_names = set(GROUP_HIT_REQUIREMENTS.keys())
for researcher_group_name in researcher_group_names:
LOGGER.debug("Validating researcher group '{0}'".format(researcher_group_name))
_ = Group.objects.get_or_create(name=researcher_group_name)
language_pair_codes = set(x[0] for x in LANGUAGE_PAIR_CHOICES)
for language_pair_code in language_pair_codes:
LOGGER.debug("Validating group '{0}'".format(language_pair_code))
_ = Group.objects.get_or_create(name=language_pair_code)
LOGGER.debug("Validating group 'WMT16'")
_ = Group.objects.get_or_create(name='WMT16') |
py | 1a51a0d79678b13c8e33e5860401e85d29f85ce4 | import collections
from packaging.version import Version
import inspect
import logging
from numbers import Number
import numpy as np
import time
import warnings
from mlflow.tracking.client import MlflowClient
from mlflow.utils.file_utils import TempDir
from mlflow.utils.mlflow_tags import MLFLOW_PARENT_RUN_ID
from mlflow.utils.arguments_utils import _get_arg_names
_logger = logging.getLogger(__name__)
# The earliest version we're guaranteed to support. Autologging utilities may not work properly
# on scikit-learn older than this version.
_MIN_SKLEARN_VERSION = "0.20.3"
# The prefix to note that all calculated metrics and artifacts are solely based on training datasets
_TRAINING_PREFIX = "training_"
_SAMPLE_WEIGHT = "sample_weight"
# _SklearnArtifact represents a artifact (e.g confusion matrix) that will be computed and
# logged during the autologging routine for a particular model type (eg, classifier, regressor).
_SklearnArtifact = collections.namedtuple(
"_SklearnArtifact", ["name", "function", "arguments", "title"]
)
# _SklearnMetric represents a metric (e.g, precision_score) that will be computed and
# logged during the autologging routine for a particular model type (eg, classifier, regressor).
_SklearnMetric = collections.namedtuple("_SklearnMetric", ["name", "function", "arguments"])
def _get_estimator_info_tags(estimator):
"""
:return: A dictionary of MLflow run tag keys and values
describing the specified estimator.
"""
return {
"estimator_name": estimator.__class__.__name__,
"estimator_class": (estimator.__class__.__module__ + "." + estimator.__class__.__name__),
}
def _get_args_for_metrics(fit_func, fit_args, fit_kwargs):
"""
Get arguments to pass to metric computations in the following steps.
1. Extract X and y from fit_args and fit_kwargs.
2. If the sample_weight argument exists in fit_func,
extract it from fit_args or fit_kwargs and return (X, y, sample_weight),
otherwise return (X, y)
:param fit_func: A fit function object.
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:returns: A tuple of either (X, y, sample_weight), where `y` and `sample_weight` may be
`None` if the specified `fit_args` and `fit_kwargs` do not specify labels or
a sample weighting.
"""
def _get_Xy(args, kwargs, X_var_name, y_var_name):
# corresponds to: model.fit(X, y)
if len(args) >= 2:
return args[:2]
# corresponds to: model.fit(X, <y_var_name>=y)
if len(args) == 1:
return args[0], kwargs.get(y_var_name)
# corresponds to: model.fit(<X_var_name>=X, <y_var_name>=y)
return kwargs[X_var_name], kwargs.get(y_var_name)
def _get_sample_weight(arg_names, args, kwargs):
sample_weight_index = arg_names.index(_SAMPLE_WEIGHT)
# corresponds to: model.fit(X, y, ..., sample_weight)
if len(args) > sample_weight_index:
return args[sample_weight_index]
# corresponds to: model.fit(X, y, ..., sample_weight=sample_weight)
if _SAMPLE_WEIGHT in kwargs:
return kwargs[_SAMPLE_WEIGHT]
return None
fit_arg_names = _get_arg_names(fit_func)
# In most cases, X_var_name and y_var_name become "X" and "y", respectively.
# However, certain sklearn models use different variable names for X and y.
# E.g., see: https://scikit-learn.org/stable/modules/generated/sklearn.multioutput.MultiOutputClassifier.html#sklearn.multioutput.MultiOutputClassifier.fit # noqa: E501
X_var_name, y_var_name = fit_arg_names[:2]
Xy = _get_Xy(fit_args, fit_kwargs, X_var_name, y_var_name)
sample_weight = (
_get_sample_weight(fit_arg_names, fit_args, fit_kwargs)
if (_SAMPLE_WEIGHT in fit_arg_names)
else None
)
return (*Xy, sample_weight)
def _get_metrics_value_dict(metrics_list):
metric_value_dict = {}
for metric in metrics_list:
try:
metric_value = metric.function(**metric.arguments)
except Exception as e:
_log_warning_for_metrics(metric.name, metric.function, e)
else:
metric_value_dict[metric.name] = metric_value
return metric_value_dict
def _get_classifier_metrics(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Compute and record various common metrics for classifiers
For (1) precision score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html
(2) recall score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html
(3) f1_score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html
By default, we choose the parameter `labels` to be `None`, `pos_label` to be `1`,
`average` to be `weighted` to compute the weighted precision score.
For (4) accuracy score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html
we choose the parameter `normalize` to be `True` to output the percentage of accuracy,
as opposed to `False` that outputs the absolute correct number of sample prediction
We log additional metrics if certain classifier has method `predict_proba`
(5) log loss:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html
(6) roc_auc_score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html
By default, for roc_auc_score, we pick `average` to be `weighted`, `multi_class` to be `ovo`,
to make the output more insensitive to dataset imbalance.
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and compute y_pred.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, ...... sample_weight), otherwise as (y_true, y_pred, ......)
3. return a dictionary of metric(name, value)
:param fitted_estimator: The already fitted classifier
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: dictionary of (function name, computed value)
"""
import sklearn
y_pred = fitted_estimator.predict(X)
classifier_metrics = [
_SklearnMetric(
name=prefix + "precision_score",
function=sklearn.metrics.precision_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "recall_score",
function=sklearn.metrics.recall_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "f1_score",
function=sklearn.metrics.f1_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, average="weighted", sample_weight=sample_weight
),
),
_SklearnMetric(
name=prefix + "accuracy_score",
function=sklearn.metrics.accuracy_score,
arguments=dict(
y_true=y_true, y_pred=y_pred, normalize=True, sample_weight=sample_weight
),
),
]
if hasattr(fitted_estimator, "predict_proba"):
y_pred_proba = fitted_estimator.predict_proba(X)
classifier_metrics.extend(
[
_SklearnMetric(
name=prefix + "log_loss",
function=sklearn.metrics.log_loss,
arguments=dict(y_true=y_true, y_pred=y_pred_proba, sample_weight=sample_weight),
),
]
)
if _is_metric_supported("roc_auc_score"):
# For binary case, the parameter `y_score` expect scores must be
# the scores of the class with the greater label.
if len(y_pred_proba[0]) == 2:
y_pred_proba = y_pred_proba[:, 1]
classifier_metrics.extend(
[
_SklearnMetric(
name=prefix + "roc_auc_score",
function=sklearn.metrics.roc_auc_score,
arguments=dict(
y_true=y_true,
y_score=y_pred_proba,
average="weighted",
sample_weight=sample_weight,
multi_class="ovo",
),
),
]
)
return _get_metrics_value_dict(classifier_metrics)
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Draw and record various common artifacts for classifier
For all classifiers, we always log:
(1) confusion matrix:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html
For only binary classifiers, we will log:
(2) precision recall curve:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html
(3) roc curve:
https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)
3. return a list of artifacts path to be logged
:param fitted_estimator: The already fitted regressor
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: List of artifacts to be logged
"""
import sklearn
if not _is_plotting_supported():
return []
classifier_artifacts = [
_SklearnArtifact(
name=prefix + "confusion_matrix",
function=sklearn.metrics.plot_confusion_matrix,
arguments=dict(
estimator=fitted_estimator,
X=X,
y_true=y_true,
sample_weight=sample_weight,
normalize="true",
cmap="Blues",
),
title="Normalized confusion matrix",
),
]
# The plot_roc_curve and plot_precision_recall_curve can only be
# supported for binary classifier
if len(set(y_true)) == 2:
classifier_artifacts.extend(
[
_SklearnArtifact(
name=prefix + "roc_curve",
function=sklearn.metrics.plot_roc_curve,
arguments=dict(
estimator=fitted_estimator,
X=X,
y=y_true,
sample_weight=sample_weight,
),
title="ROC curve",
),
_SklearnArtifact(
name=prefix + "precision_recall_curve",
function=sklearn.metrics.plot_precision_recall_curve,
arguments=dict(
estimator=fitted_estimator,
X=X,
y=y_true,
sample_weight=sample_weight,
),
title="Precision recall curve",
),
]
)
return classifier_artifacts
def _get_regressor_metrics(fitted_estimator, prefix, X, y_true, sample_weight):
"""
Compute and record various common metrics for regressors
For (1) (root) mean squared error:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html
(2) mean absolute error:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html
(3) r2 score:
https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html
By default, we choose the parameter `multioutput` to be `uniform_average`
to average outputs with uniform weight.
Steps:
1. Extract X and y_true from fit_args and fit_kwargs, and compute y_pred.
2. If the sample_weight argument exists in fit_func (accuracy_score by default
has sample_weight), extract it from fit_args or fit_kwargs as
(y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput)
3. return a dictionary of metric(name, value)
:param fitted_estimator: The already fitted regressor
:param fit_args: Positional arguments given to fit_func.
:param fit_kwargs: Keyword arguments given to fit_func.
:return: dictionary of (function name, computed value)
"""
import sklearn
y_pred = fitted_estimator.predict(X)
regressor_metrics = [
_SklearnMetric(
name=prefix + "mse",
function=sklearn.metrics.mean_squared_error,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
_SklearnMetric(
name=prefix + "mae",
function=sklearn.metrics.mean_absolute_error,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
_SklearnMetric(
name=prefix + "r2_score",
function=sklearn.metrics.r2_score,
arguments=dict(
y_true=y_true,
y_pred=y_pred,
sample_weight=sample_weight,
multioutput="uniform_average",
),
),
]
# To be compatible with older versions of scikit-learn (below 0.22.2), where
# `sklearn.metrics.mean_squared_error` does not have "squared" parameter to calculate `rmse`,
# we compute it through np.sqrt(<value of mse>)
metrics_value_dict = _get_metrics_value_dict(regressor_metrics)
metrics_value_dict[prefix + "rmse"] = np.sqrt(metrics_value_dict[prefix + "mse"])
return metrics_value_dict
def _log_warning_for_metrics(func_name, func_call, err):
msg = (
func_call.__qualname__
+ " failed. The metric "
+ func_name
+ " will not be recorded."
+ " Metric error: "
+ str(err)
)
_logger.warning(msg)
def _log_warning_for_artifacts(func_name, func_call, err):
msg = (
func_call.__qualname__
+ " failed. The artifact "
+ func_name
+ " will not be recorded."
+ " Artifact error: "
+ str(err)
)
_logger.warning(msg)
def _log_specialized_estimator_content(
autologging_client, fitted_estimator, run_id, prefix, X, y_true=None, sample_weight=None
):
import sklearn
metrics = dict()
if y_true is not None:
try:
if sklearn.base.is_classifier(fitted_estimator):
metrics = _get_classifier_metrics(
fitted_estimator, prefix, X, y_true, sample_weight
)
elif sklearn.base.is_regressor(fitted_estimator):
metrics = _get_regressor_metrics(fitted_estimator, prefix, X, y_true, sample_weight)
except Exception as err:
msg = (
"Failed to autolog metrics for "
+ fitted_estimator.__class__.__name__
+ ". Logging error: "
+ str(err)
)
_logger.warning(msg)
else:
autologging_client.log_metrics(run_id=run_id, metrics=metrics)
if sklearn.base.is_classifier(fitted_estimator):
try:
artifacts = _get_classifier_artifacts(
fitted_estimator, prefix, X, y_true, sample_weight
)
except Exception as e:
msg = (
"Failed to autolog artifacts for "
+ fitted_estimator.__class__.__name__
+ ". Logging error: "
+ str(e)
)
_logger.warning(msg)
return
with TempDir() as tmp_dir:
for artifact in artifacts:
try:
display = artifact.function(**artifact.arguments)
display.ax_.set_title(artifact.title)
artifact_path = "{}.png".format(artifact.name)
filepath = tmp_dir.path(artifact_path)
display.figure_.savefig(filepath)
import matplotlib.pyplot as plt
plt.close(display.figure_)
except Exception as e:
_log_warning_for_artifacts(artifact.name, artifact.function, e)
MlflowClient().log_artifacts(run_id, tmp_dir.path())
return metrics
def _log_estimator_content(
autologging_client, estimator, run_id, prefix, X, y_true=None, sample_weight=None
):
"""
Logs content for the given estimator, which includes metrics and artifacts that might be
tailored to the estimator's type (e.g., regression vs classification). Training labels
are required for metric computation; metrics will be omitted if labels are not available.
:param autologging_client: An instance of `MlflowAutologgingQueueingClient` used for
efficiently logging run data to MLflow Tracking.
:param estimator: The estimator used to compute metrics and artifacts.
:param run_id: The run under which the content is logged.
:param prefix: A prefix used to name the logged content. Typically it's 'training_' for
training-time content and user-controlled for evaluation-time content.
:param X: The data samples.
:param y_true: Labels.
:param sample_weight: Per-sample weights used in the computation of metrics and artifacts.
:return: A dict of the computed metrics.
"""
metrics = _log_specialized_estimator_content(
autologging_client=autologging_client,
fitted_estimator=estimator,
run_id=run_id,
prefix=prefix,
X=X,
y_true=y_true,
sample_weight=sample_weight,
)
if hasattr(estimator, "score") and y_true is not None:
try:
# Use the sample weight only if it is present in the score args
score_arg_names = _get_arg_names(estimator.score)
score_args = (
(X, y_true, sample_weight) if _SAMPLE_WEIGHT in score_arg_names else (X, y_true)
)
score = estimator.score(*score_args)
except Exception as e:
msg = (
estimator.score.__qualname__
+ " failed. The 'training_score' metric will not be recorded. Scoring error: "
+ str(e)
)
_logger.warning(msg)
else:
score_key = prefix + "score"
autologging_client.log_metrics(run_id=run_id, metrics={score_key: score})
metrics[score_key] = score
return metrics
def _get_meta_estimators_for_autologging():
"""
:return: A list of meta estimator class definitions
(e.g., `sklearn.model_selection.GridSearchCV`) that should be included
when patching training functions for autologging
"""
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.pipeline import Pipeline
return [
GridSearchCV,
RandomizedSearchCV,
Pipeline,
]
def _is_parameter_search_estimator(estimator):
"""
:return: `True` if the specified scikit-learn estimator is a parameter search estimator,
such as `GridSearchCV`. `False` otherwise.
"""
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
parameter_search_estimators = [
GridSearchCV,
RandomizedSearchCV,
]
return any(
[
isinstance(estimator, param_search_estimator)
for param_search_estimator in parameter_search_estimators
]
)
def _log_parameter_search_results_as_artifact(cv_results_df, run_id):
"""
Records a collection of parameter search results as an MLflow artifact
for the specified run.
:param cv_results_df: A Pandas DataFrame containing the results of a parameter search
training session, which may be obtained by parsing the `cv_results_`
attribute of a trained parameter search estimator such as
`GridSearchCV`.
:param run_id: The ID of the MLflow Run to which the artifact should be recorded.
"""
with TempDir() as t:
results_path = t.path("cv_results.csv")
cv_results_df.to_csv(results_path, index=False)
MlflowClient().log_artifact(run_id, results_path)
# Log how many child runs will be created vs omitted based on `max_tuning_runs`.
def _log_child_runs_info(max_tuning_runs, total_runs):
rest = total_runs - max_tuning_runs
# Set logging statement for runs to be logged.
if max_tuning_runs == 0:
logging_phrase = "no runs"
elif max_tuning_runs == 1:
logging_phrase = "the best run"
else:
logging_phrase = "the {} best runs".format(max_tuning_runs)
# Set logging statement for runs to be omitted.
if rest <= 0:
omitting_phrase = "no runs"
elif rest == 1:
omitting_phrase = "one run"
else:
omitting_phrase = "{} runs".format(rest)
_logger.info("Logging %s, %s will be omitted.", logging_phrase, omitting_phrase)
def _create_child_runs_for_parameter_search(
autologging_client, cv_estimator, parent_run, max_tuning_runs, child_tags=None
):
"""
Creates a collection of child runs for a parameter search training session.
Runs are reconstructed from the `cv_results_` attribute of the specified trained
parameter search estimator - `cv_estimator`, which provides relevant performance
metrics for each point in the parameter search space. One child run is created
for each point in the parameter search space. For additional information, see
`https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html`_. # noqa: E501
:param autologging_client: An instance of `MlflowAutologgingQueueingClient` used for
efficiently logging run data to MLflow Tracking.
:param cv_estimator: The trained parameter search estimator for which to create
child runs.
:param parent_run: A py:class:`mlflow.entities.Run` object referring to the parent
parameter search run for which child runs should be created.
:param child_tags: An optional dictionary of MLflow tag keys and values to log
for each child run.
"""
import pandas as pd
def first_custom_rank_column(df):
column_names = df.columns.values
for col_name in column_names:
if "rank_test_" in col_name:
return col_name
# Use the start time of the parent parameter search run as a rough estimate for the
# start time of child runs, since we cannot precisely determine when each point
# in the parameter search space was explored
child_run_start_time = parent_run.info.start_time
child_run_end_time = int(time.time() * 1000)
seed_estimator = cv_estimator.estimator
# In the unlikely case that a seed of a parameter search estimator is,
# itself, a parameter search estimator, we should avoid logging the untuned
# parameters of the seeds's seed estimator
should_log_params_deeply = not _is_parameter_search_estimator(seed_estimator)
# Each row of `cv_results_` only provides parameters that vary across
# the user-specified parameter grid. In order to log the complete set
# of parameters for each child run, we fetch the parameters defined by
# the seed estimator and update them with parameter subset specified
# in the result row
base_params = seed_estimator.get_params(deep=should_log_params_deeply)
cv_results_df = pd.DataFrame.from_dict(cv_estimator.cv_results_)
if max_tuning_runs is None:
cv_results_best_n_df = cv_results_df
else:
rank_column_name = "rank_test_score"
if rank_column_name not in cv_results_df.columns.values:
rank_column_name = first_custom_rank_column(cv_results_df)
warnings.warn(
"Top {} child runs will be created based on ordering in {} column.".format(
max_tuning_runs,
rank_column_name,
)
+ " You can choose not to limit the number of child runs created by"
+ " setting `max_tuning_runs=None`."
)
cv_results_best_n_df = cv_results_df.nsmallest(max_tuning_runs, rank_column_name)
# Log how many child runs will be created vs omitted.
_log_child_runs_info(max_tuning_runs, len(cv_results_df))
for _, result_row in cv_results_best_n_df.iterrows():
tags_to_log = dict(child_tags) if child_tags else {}
tags_to_log.update({MLFLOW_PARENT_RUN_ID: parent_run.info.run_id})
tags_to_log.update(_get_estimator_info_tags(seed_estimator))
pending_child_run_id = autologging_client.create_run(
experiment_id=parent_run.info.experiment_id,
start_time=child_run_start_time,
tags=tags_to_log,
)
params_to_log = dict(base_params)
params_to_log.update(result_row.get("params", {}))
autologging_client.log_params(run_id=pending_child_run_id, params=params_to_log)
# Parameters values are recorded twice in the set of search `cv_results_`:
# once within a `params` column with dictionary values and once within
# a separate dataframe column that is created for each parameter. To prevent
# duplication of parameters, we log the consolidated values from the parameter
# dictionary column and filter out the other parameter-specific columns with
# names of the form `param_{param_name}`. Additionally, `cv_results_` produces
# metrics for each training split, which is fairly verbose; accordingly, we filter
# out per-split metrics in favor of aggregate metrics (mean, std, etc.)
excluded_metric_prefixes = ["param", "split"]
metrics_to_log = {
key: value
for key, value in result_row.iteritems()
if not any([key.startswith(prefix) for prefix in excluded_metric_prefixes])
and isinstance(value, Number)
}
autologging_client.log_metrics(
run_id=pending_child_run_id,
metrics=metrics_to_log,
)
autologging_client.set_terminated(run_id=pending_child_run_id, end_time=child_run_end_time)
def _is_supported_version():
import sklearn
return Version(sklearn.__version__) >= Version(_MIN_SKLEARN_VERSION)
# Util function to check whether a metric is able to be computed in given sklearn version
def _is_metric_supported(metric_name):
import sklearn
# This dict can be extended to store special metrics' specific supported versions
_metric_supported_version = {"roc_auc_score": "0.22.2"}
return Version(sklearn.__version__) >= Version(_metric_supported_version[metric_name])
# Util function to check whether artifact plotting functions are able to be computed
# in given sklearn version (should >= 0.22.0)
def _is_plotting_supported():
import sklearn
return Version(sklearn.__version__) >= Version("0.22.0")
def _all_estimators():
try:
from sklearn.utils import all_estimators
return all_estimators()
except ImportError:
return _backported_all_estimators()
def _backported_all_estimators(type_filter=None):
"""
Backported from scikit-learn 0.23.2:
https://github.com/scikit-learn/scikit-learn/blob/0.23.2/sklearn/utils/__init__.py#L1146
Use this backported `all_estimators` in old versions of sklearn because:
1. An inferior version of `all_estimators` that old versions of sklearn use for testing,
might function differently from a newer version.
2. This backported `all_estimators` works on old versions of sklearn that don’t even define
the testing utility variant of `all_estimators`.
========== original docstring ==========
Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
# lazy import to avoid circular imports from sklearn.base
import pkgutil
import platform
import sklearn
from importlib import import_module
from operator import itemgetter
# pylint: disable=no-name-in-module, import-error
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (
BaseEstimator,
ClassifierMixin,
RegressorMixin,
TransformerMixin,
ClusterMixin,
)
IS_PYPY = platform.python_implementation() == "PyPy"
def is_abstract(c):
if not (hasattr(c, "__abstractmethods__")):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
modules_to_ignore = {"tests", "externals", "setup", "conftest"}
root = sklearn.__path__[0] # sklearn package
# Ignore deprecation warnings triggered at import time and from walking
# packages
with ignore_warnings(category=FutureWarning):
for _, modname, _ in pkgutil.walk_packages(path=[root], prefix="sklearn."):
mod_parts = modname.split(".")
if any(part in modules_to_ignore for part in mod_parts) or "._" in modname:
continue
module = import_module(modname)
classes = inspect.getmembers(module, inspect.isclass)
classes = [(name, est_cls) for name, est_cls in classes if not name.startswith("_")]
# TODO: Remove when FeatureHasher is implemented in PYPY
# Skips FeatureHasher for PYPY
if IS_PYPY and "feature_extraction" in modname:
classes = [(name, est_cls) for name, est_cls in classes if name == "FeatureHasher"]
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [
c for c in all_classes if (issubclass(c[1], BaseEstimator) and c[0] != "BaseEstimator")
]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {
"classifier": ClassifierMixin,
"regressor": RegressorMixin,
"transformer": TransformerMixin,
"cluster": ClusterMixin,
}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError(
"Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter)
)
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
|
py | 1a51a20062dde8a33d6cd5d3f78ab65de2200384 | import json
import threading
import time
import os
import stat
from copy import deepcopy
from .util import user_dir, print_error, print_stderr, PrintError
from .bitcoin import MAX_FEE_RATE, FEE_TARGETS
SYSTEM_CONFIG_PATH = "/etc/electrum.conf"
config = None
def get_config():
global config
return config
def set_config(c):
global config
config = c
class SimpleConfig(PrintError):
"""
The SimpleConfig class is responsible for handling operations involving
configuration files.
There are 3 different sources of possible configuration values:
1. Command line options.
2. User configuration (in the user's config directory)
3. System configuration (in /etc/)
They are taken in order (1. overrides config options set in 2., that
override config set in 3.)
"""
fee_rates = [5000, 10000, 20000, 30000, 50000, 70000, 100000, 150000, 200000, 300000]
def __init__(self, options={}, read_system_config_function=None,
read_user_config_function=None, read_user_dir_function=None):
# This lock needs to be acquired for updating and reading the config in
# a thread-safe way.
self.lock = threading.RLock()
self.fee_estimates = {}
self.fee_estimates_last_updated = {}
self.last_time_fee_estimates_requested = 0 # zero ensures immediate fees
# The following two functions are there for dependency injection when
# testing.
if read_system_config_function is None:
read_system_config_function = read_system_config
if read_user_config_function is None:
read_user_config_function = read_user_config
if read_user_dir_function is None:
self.user_dir = user_dir
else:
self.user_dir = read_user_dir_function
# The command line options
self.cmdline_options = deepcopy(options)
# Portable wallets don't use a system config
if self.cmdline_options.get('portable', False):
self.system_config = {}
else:
self.system_config = read_system_config_function()
# Set self.path and read the user config
self.user_config = {} # for self.get in electrum_path()
self.path = self.electrum_path()
self.user_config = read_user_config_function(self.path)
# Upgrade obsolete keys
self.fixup_keys({'auto_cycle': 'auto_connect'})
# Make a singleton instance of 'self'
set_config(self)
def electrum_path(self):
# Read electrum_path from command line / system configuration
# Otherwise use the user's default data directory.
path = self.get('electrum_path')
if path is None:
path = self.user_dir()
if self.get('testnet'):
path = os.path.join(path, 'testnet')
# Make directory if it does not yet exist.
if not os.path.exists(path):
if os.path.islink(path):
raise BaseException('Dangling link: ' + path)
#os.mkdir(path)
os.makedirs(path, exist_ok=True)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
self.print_error("electrum directory", path)
return path
def fixup_config_keys(self, config, keypairs):
updated = False
for old_key, new_key in keypairs.items():
if old_key in config:
if not new_key in config:
config[new_key] = config[old_key]
del config[old_key]
updated = True
return updated
def fixup_keys(self, keypairs):
'''Migrate old key names to new ones'''
self.fixup_config_keys(self.cmdline_options, keypairs)
self.fixup_config_keys(self.system_config, keypairs)
if self.fixup_config_keys(self.user_config, keypairs):
self.save_user_config()
def set_key(self, key, value, save = True):
if not self.is_modifiable(key):
print_stderr("Warning: not changing config key '%s' set on the command line" % key)
return
with self.lock:
self.user_config[key] = value
if save:
self.save_user_config()
return
def get(self, key, default=None):
with self.lock:
out = self.cmdline_options.get(key)
if out is None:
out = self.user_config.get(key)
if out is None:
out = self.system_config.get(key, default)
return out
def is_modifiable(self, key):
return not key in self.cmdline_options
def save_user_config(self):
if not self.path:
return
path = os.path.join(self.path, "config")
s = json.dumps(self.user_config, indent=4, sort_keys=True)
with open(path, "w") as f:
f.write(s)
os.chmod(path, stat.S_IREAD | stat.S_IWRITE)
def get_wallet_path(self):
"""Set the path of the wallet."""
# command line -w option
if self.get('wallet_path'):
return os.path.join(self.get('cwd'), self.get('wallet_path'))
# path in config file
path = self.get('default_wallet_path')
if path and os.path.exists(path):
return path
# default path
dirpath = os.path.join(self.path, "wallets")
if not os.path.exists(dirpath):
if os.path.islink(dirpath):
raise BaseException('Dangling link: ' + dirpath)
os.mkdir(dirpath)
os.chmod(dirpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
new_path = os.path.join(self.path, "wallets", "default_wallet")
# default path in pre 1.9 versions
old_path = os.path.join(self.path, "electrum.dat")
if os.path.exists(old_path) and not os.path.exists(new_path):
os.rename(old_path, new_path)
return new_path
def remove_from_recently_open(self, filename):
recent = self.get('recently_open', [])
if filename in recent:
recent.remove(filename)
self.set_key('recently_open', recent)
def set_session_timeout(self, seconds):
self.print_error("session timeout -> %d seconds" % seconds)
self.set_key('session_timeout', seconds)
def get_session_timeout(self):
return self.get('session_timeout', 300)
def open_last_wallet(self):
if self.get('wallet_path') is None:
last_wallet = self.get('gui_last_wallet')
if last_wallet is not None and os.path.exists(last_wallet):
self.cmdline_options['default_wallet_path'] = last_wallet
def save_last_wallet(self, wallet):
if self.get('wallet_path') is None:
path = wallet.storage.path
self.set_key('gui_last_wallet', path)
def max_fee_rate(self):
f = self.get('max_fee_rate', MAX_FEE_RATE)
if f==0:
f = MAX_FEE_RATE
return f
def dynfee(self, i):
if i < 4:
j = FEE_TARGETS[i]
fee = self.fee_estimates.get(j)
else:
assert i == 4
fee = self.fee_estimates.get(2)
if fee is not None:
fee += fee/2
if fee is not None:
fee = min(5*MAX_FEE_RATE, fee)
return fee
def reverse_dynfee(self, fee_per_kb):
import operator
l = list(self.fee_estimates.items()) + [(1, self.dynfee(4))]
dist = map(lambda x: (x[0], abs(x[1] - fee_per_kb)), l)
min_target, min_value = min(dist, key=operator.itemgetter(1))
if fee_per_kb < self.fee_estimates.get(25)/2:
min_target = -1
return min_target
def static_fee(self, i):
return self.fee_rates[i]
def static_fee_index(self, value):
dist = list(map(lambda x: abs(x - value), self.fee_rates))
return min(range(len(dist)), key=dist.__getitem__)
def has_fee_estimates(self):
return len(self.fee_estimates)==4
def is_dynfee(self):
#return self.get('dynamic_fees', True)
return self.get('dynamic_fees', False)
def fee_per_kb(self):
dyn = self.is_dynfee()
if dyn:
fee_rate = self.dynfee(self.get('fee_level', 2))
else:
fee_rate = self.get('fee_per_kb', self.max_fee_rate()/2)
return fee_rate
def estimate_fee(self, size):
return int(self.fee_per_kb() * size / 1000.)
def update_fee_estimates(self, key, value):
self.fee_estimates[key] = value
self.fee_estimates_last_updated[key] = time.time()
def is_fee_estimates_update_required(self):
"""Checks time since last requested and updated fee estimates.
Returns True if an update should be requested.
"""
now = time.time()
prev_updates = self.fee_estimates_last_updated.values()
oldest_fee_time = min(prev_updates) if prev_updates else 0
stale_fees = now - oldest_fee_time > 7200
old_request = now - self.last_time_fee_estimates_requested > 60
return stale_fees and old_request
def requested_fee_estimates(self):
self.last_time_fee_estimates_requested = time.time()
def get_video_device(self):
device = self.get("video_device", "default")
if device == 'default':
device = ''
return device
def read_system_config(path=SYSTEM_CONFIG_PATH):
"""Parse and return the system config settings in /etc/electrum.conf."""
result = {}
if os.path.exists(path):
import configparser
p = configparser.ConfigParser()
try:
p.read(path)
for k, v in p.items('client'):
result[k] = v
except (configparser.NoSectionError, configparser.MissingSectionHeaderError):
pass
return result
def read_user_config(path):
"""Parse and store the user config settings in electrum.conf into user_config[]."""
if not path:
return {}
config_path = os.path.join(path, "config")
if not os.path.exists(config_path):
return {}
try:
with open(config_path, "r") as f:
data = f.read()
result = json.loads(data)
except:
print_error("Warning: Cannot read config file.", config_path)
return {}
if not type(result) is dict:
return {}
return result
|
py | 1a51a2c1b7d8dbfef4be33758edcfb9c4022ecb0 | from django.forms import ModelForm
from .models import Photo
class PhotoForm(ModelForm):
class Meta:
model = Photo
fields = '__all__'
|
py | 1a51a334cb302f8e5ef5da721c38905172b45f02 | """
Pulls data from specified iLO and presents as Prometheus metrics
"""
from __future__ import print_function
from _socket import gaierror
import sys
import os
import hpilo
import time
import prometheus_metrics
from BaseHTTPServer import BaseHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from SocketServer import ForkingMixIn
from prometheus_client import generate_latest, Summary
from urlparse import parse_qs
from urlparse import urlparse
# Create a metric to track time spent and requests made.
REQUEST_TIME = Summary('request_processing_seconds',
'Time spent processing request')
def print_err(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class ForkingHTTPServer(ForkingMixIn, HTTPServer):
max_children = 30
timeout = 30
class RequestHandler(BaseHTTPRequestHandler):
"""
Endpoint handler
"""
def return_error(self):
self.send_response(500)
self.end_headers()
def _health(self):
# get health at glance
health_at_glance = self.ilo.get_embedded_health()['health_at_a_glance']
if health_at_glance is not None:
for key, value in health_at_glance.items():
for status in value.items():
if status[0] == 'status':
gauge = 'hpilo_{}_gauge'.format(key)
if status[1].upper() == 'OK':
prometheus_metrics.gauges[gauge].labels(
product_name=self.product_name, server_name=self.server_name).set(0)
elif status[1].upper() == 'DEGRADED':
prometheus_metrics.gauges[gauge].labels(
product_name=self.product_name, server_name=self.server_name).set(1)
else:
prometheus_metrics.gauges[gauge].labels(
product_name=self.product_name, server_name=self.server_name).set(2)
def _host_power(self):
_power = self.ilo.get_host_power_status()
_gauge = 'hpilo_{}_gauge'.format('host_power')
if _power == 'ON':
prometheus_metrics.gauges[_gauge].labels(
product_name=self.product_name,
server_name=self.server_name).set(0)
else:
prometheus_metrics.gauges[_gauge].labels(
product_name=self.product_name,
server_name=self.server_name).set(1)
def _firmware(self):
_version = self.ilo.get_fw_version()["firmware_version"]
prometheus_metrics.hpilo_firmware_version.labels(
product_name=self.product_name,
server_name=self.server_name).set(_version)
def _power_readings(self):
(_present, _) = self.ilo.get_power_readings()['present_power_reading']
prometheus_metrics.hpilo_present_power_reading.labels(
product_name=self.product_name, server_name=self.server_name).set(_present)
def do_GET(self):
"""
Process GET request
:return: Response with Prometheus metrics
"""
# get parameters from the URL
_url = urlparse(self.path)
if _url.path == self.server.endpoint:
query_components = parse_qs(urlparse(self.path).query)
_host = None
_port = None
_user = None
_password = None
try:
_host = query_components['target'][0]
except KeyError as e:
print_err("** missing parameter 'target' in url **")
self.return_error()
return
try:
_port = os.environ['ilo_port']
_user = os.environ['ilo_user']
_password = os.environ['ilo_password']
except KeyError as e:
print_err("** missing environment parameter %s **" % e)
self.return_error()
return
self.server_name = _host
self.ilo = None
if _host and _user and _password and _port:
try:
self.ilo = hpilo.Ilo(hostname=_host,
login=_user,
password=_password,
port=int(_port), timeout=10)
except hpilo.IloLoginFailed:
print("ILO login failed")
self.return_error()
except gaierror:
print("ILO invalid address or port")
self.return_error()
except hpilo.IloCommunicationError as e:
print(e)
# this will be used to return the total amount of time the request
# took
start_time = time.time()
try:
self.product_name = self.ilo.get_product_name()
except BaseException:
self.product_name = "Unknown HP Server"
self._health()
self._host_power()
self._firmware()
self._power_readings()
# get the amount of time the request took
REQUEST_TIME.observe(time.time() - start_time)
# generate and publish metrics
metrics = generate_latest(prometheus_metrics.registry)
self.send_response(200)
self.send_header('Content-Type', 'text/plain')
self.end_headers()
self.wfile.write(metrics)
return
# tell users the /metrics endpoint
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write("""<html>
<head><title>HP iLO Exporter</title></head>
<body>
<h1>HP iLO Exporter</h1>
<p>Visit <a href="/metrics">Metrics</a> to use.</p>
</body>
</html>""")
class ILOExporterServer(object):
"""
Basic server implementation that exposes metrics to Prometheus
"""
def __init__(self, address='0.0.0.0', port=8080, endpoint="/metrics"):
self._address = address
self._port = port
self.endpoint = endpoint
def print_info(self):
print_err("Starting exporter on: http://{}:{}{}".format(self._address,
self._port,
self.endpoint))
print_err("Press Ctrl+C to quit")
def run(self):
self.print_info()
server = ForkingHTTPServer((self._address, self._port), RequestHandler)
server.endpoint = self.endpoint
try:
while True:
server.handle_request()
except KeyboardInterrupt:
print_err("Killing exporter")
server.server_close()
|
py | 1a51a412b583aa76c90f3e7dfb27f2a3621cbe62 | from django.contrib import admin
from .models import MRIScan
# Register your models here.
admin.site.register(MRIScan) |
py | 1a51a5a5b0a7603412bc795717dac51c34de7a99 | """
Tests for the company model database migrations
"""
from django_test_migrations.contrib.unittest_case import MigratorTestCase
from InvenTree import helpers
class TestForwardMigrations(MigratorTestCase):
migrate_from = ('company', helpers.getOldestMigrationFile('company'))
migrate_to = ('company', helpers.getNewestMigrationFile('company'))
def prepare(self):
"""
Create some simple Company data, and ensure that it migrates OK
"""
Company = self.old_state.apps.get_model('company', 'company')
Company.objects.create(
name='MSPC',
description='Michael Scotts Paper Company',
is_supplier=True
)
def test_migrations(self):
Company = self.new_state.apps.get_model('company', 'company')
self.assertEqual(Company.objects.count(), 1)
class TestManufacturerField(MigratorTestCase):
"""
Tests for migration 0019 which migrates from old 'manufacturer_name' field to new 'manufacturer' field
"""
migrate_from = ('company', '0018_supplierpart_manufacturer')
migrate_to = ('company', '0019_auto_20200413_0642')
def prepare(self):
"""
Prepare the database by adding some test data 'before' the change:
- Part object
- Company object (supplier)
- SupplierPart object
"""
Part = self.old_state.apps.get_model('part', 'part')
Company = self.old_state.apps.get_model('company', 'company')
SupplierPart = self.old_state.apps.get_model('company', 'supplierpart')
# Create an initial part
part = Part.objects.create(
name='Screw',
description='A single screw'
)
# Create a company to act as the supplier
supplier = Company.objects.create(
name='Supplier',
description='A supplier of parts',
is_supplier=True,
is_customer=False,
)
# Add some SupplierPart objects
SupplierPart.objects.create(
part=part,
supplier=supplier,
SKU='SCREW.001',
manufacturer_name='ACME',
)
SupplierPart.objects.create(
part=part,
supplier=supplier,
SKU='SCREW.002',
manufacturer_name='Zero Corp'
)
self.assertEqual(Company.objects.count(), 1)
def test_company_objects(self):
"""
Test that the new companies have been created successfully
"""
# Two additional company objects should have been created
Company = self.new_state.apps.get_model('company', 'company')
self.assertEqual(Company.objects.count(), 3)
# The new company/ies must be marked as "manufacturers"
acme = Company.objects.get(name='ACME')
self.assertTrue(acme.is_manufacturer)
SupplierPart = self.new_state.apps.get_model('company', 'supplierpart')
parts = SupplierPart.objects.filter(manufacturer=acme)
self.assertEqual(parts.count(), 1)
part = parts.first()
# Checks on the SupplierPart object
self.assertEqual(part.manufacturer_name, 'ACME')
self.assertEqual(part.manufacturer.name, 'ACME')
class TestCurrencyMigration(MigratorTestCase):
"""
Tests for upgrade from basic currency support to django-money
"""
migrate_from = ('company', '0025_auto_20201110_1001')
migrate_to = ('company', '0026_auto_20201110_1011')
def prepare(self):
"""
Prepare some data:
- A part to buy
- A supplier to buy from
- A supplier part
- Multiple currency objects
- Multiple supplier price breaks
"""
Part = self.old_state.apps.get_model('part', 'part')
part = Part.objects.create(
name="PART", description="A purchaseable part",
purchaseable=True,
level=0,
tree_id=0,
lft=0,
rght=0
)
Company = self.old_state.apps.get_model('company', 'company')
supplier = Company.objects.create(name='Supplier', description='A supplier', is_supplier=True)
SupplierPart = self.old_state.apps.get_model('company', 'supplierpart')
sp = SupplierPart.objects.create(part=part, supplier=supplier, SKU='12345')
Currency = self.old_state.apps.get_model('common', 'currency')
aud = Currency.objects.create(symbol='$', suffix='AUD', description='Australian Dollars', value=1.0)
usd = Currency.objects.create(symbol='$', suffix='USD', description='US Dollars', value=1.0)
PB = self.old_state.apps.get_model('company', 'supplierpricebreak')
PB.objects.create(part=sp, quantity=10, cost=5, currency=aud)
PB.objects.create(part=sp, quantity=20, cost=3, currency=aud)
PB.objects.create(part=sp, quantity=30, cost=2, currency=aud)
PB.objects.create(part=sp, quantity=40, cost=2, currency=usd)
PB.objects.create(part=sp, quantity=50, cost=2, currency=usd)
for pb in PB.objects.all():
self.assertIsNone(pb.price)
def test_currency_migration(self):
PB = self.new_state.apps.get_model('company', 'supplierpricebreak')
for pb in PB.objects.all():
# Test that a price has been assigned
self.assertIsNotNone(pb.price)
|
py | 1a51a64da37bba9f0d0c21687022130eca4f2587 | # -*- coding: utf-8 -*-
from os import path
from setuptools import find_packages, setup
README_rst = path.join(path.abspath(path.dirname(__file__)), "README.rst")
with open(README_rst, "r") as f:
long_description = f.read()
setup(
name="pyee",
version="8.2.2",
packages=find_packages(),
include_package_data=True,
description="A port of node.js's EventEmitter to python.",
long_description=long_description,
author="Josh Holbrook",
author_email="[email protected]",
url="https://github.com/jfhbrook/pyee",
license="MIT",
keywords=["events", "emitter", "node.js", "node", "eventemitter", "event_emitter"],
classifiers=[
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Other/Nonlisted Topic",
],
)
|
py | 1a51a6ce3248d390d369c6cb90509724629c80db | import unittest
from siobrultech_protocols.gem import packets
from tests.gem.packet_test_data import assert_packet, read_packet
class TestPacketFormats(unittest.TestCase):
def test_bin32_abs(self):
check_packet("BIN32-ABS.bin", packets.BIN32_ABS)
def test_bin32_net(self):
check_packet("BIN32-NET.bin", packets.BIN32_NET)
def test_bin48_abs(self):
check_packet("BIN48-ABS.bin", packets.BIN48_ABS)
def test_bin48_net(self):
check_packet("BIN48-NET.bin", packets.BIN48_NET)
def test_bin48_net_time(self):
check_packet("BIN48-NET-TIME.bin", packets.BIN48_NET_TIME)
def test_bin48_net_time_tricky(self):
"""BIN48_NET and BIN48_NET_TIME packets both have the same packet type
code, so in order to detect the difference you must try to parse as
BIN48_NET first, and if that fails try BIN48_NET_TIME. However, if
the parser just checks the checksum and not the footer, it's possible
for a BIN48_NET_TIME packet to be mistaken for a BIN48_NET. This is
one such packet."""
try:
parse_packet("BIN48-NET-TIME_tricky.bin", packets.BIN48_NET)
self.fail("Should have thrown")
except packets.MalformedPacketException:
pass
check_packet("BIN48-NET-TIME_tricky.bin", packets.BIN48_NET_TIME)
def test_short_packet(self):
packet = read_packet("BIN32-NET.bin")
with self.assertRaisesRegex(
packets.MalformedPacketException, "Packet too short."
):
packets.BIN32_NET.parse(packet[:-1])
def test_packet_with_extra_after(self):
data = bytearray()
data.extend(read_packet("BIN32-NET.bin"))
data.extend(read_packet("BIN32-ABS.bin"))
packet = packets.BIN32_NET.parse(data)
assert_packet("BIN32-NET.bin", packet)
class TestPacketDeltaComputation(unittest.TestCase):
def test_packet_delta_seconds(self):
packet = parse_packet("BIN32-ABS.bin", packets.BIN32_ABS)
self.assertEqual(997492, packet.seconds)
self.assertEqual(997493, packet.delta_seconds(2 ** 24 - 1))
self.assertEqual(1000000, packet.delta_seconds(2 ** 24 - (1000000 - 997492)))
def test_packet_delta_pulses(self):
packet = parse_packet("BIN48-NET-TIME_tricky.bin", packets.BIN48_NET_TIME)
# All the pulse counts in our packets are 0, so let's fake some out
packet.pulse_counts = [100, 200, 300, 400]
self.assertEqual(
[1100, 1200, 1300, 1400],
[
packet.delta_pulse_count(i, 2 ** 24 - 1000)
for i in range(0, len(packet.pulse_counts))
],
)
def test_packet_delta_absolute_watt_seconds(self):
packet = parse_packet("BIN32-ABS.bin", packets.BIN32_ABS)
self.assertEqual(
[
3123664,
9249700,
195388151,
100917236,
7139112,
1440,
4,
3,
14645520,
111396601,
33259670,
38296448,
1108415,
2184858,
5191049,
1,
71032651,
60190845,
47638292,
12017483,
36186563,
14681918,
69832947,
37693,
60941899,
1685614,
902,
799182,
302590,
3190972,
5,
647375119,
],
packet.absolute_watt_seconds,
)
self.assertEqual(
[
packet.absolute_watt_seconds[i] + 1000
for i in range(0, len(packet.absolute_watt_seconds))
],
[
packet.delta_absolute_watt_seconds(i, 2 ** 40 - 1000)
for i in range(0, len(packet.absolute_watt_seconds))
],
)
def test_packet_delta_polarized_watt_seconds(self):
packet = parse_packet("BIN32-NET.bin", packets.BIN32_NET)
# Packet didn't have any negative numbers, so let's do some manual ones
packet.polarized_watt_seconds = [
-1600 + 100 * i for i in range(0, packet.num_channels)
]
self.assertEqual(
[
packet.polarized_watt_seconds[i] + 1000 + 2 ** 39
for i in range(0, len(packet.polarized_watt_seconds))
],
[
packet.delta_polarized_watt_seconds(i, 2 ** 39 - 1000)
for i in range(0, len(packet.polarized_watt_seconds))
],
)
def check_packet(packet_file_name: str, packet_format: packets.PacketFormat):
packet = parse_packet(packet_file_name, packet_format)
assert_packet(packet_file_name, packet)
def parse_packet(packet_file_name: str, packet_format: packets.PacketFormat):
return packet_format.parse(read_packet(packet_file_name))
if __name__ == "__main__":
unittest.main()
|
py | 1a51a73d17a6cfb224c443f08a396d29b745962e | import sys, time, cv2
from matplotlib import pyplot as plt
sys.path.insert(0, sys.path[0].replace('examples', 'src'))
from robot import Robot
from utils import *
def display_image(image):
"""
Displays a image with matplotlib.
Args:
image: The BGR image numpy array. See src/utils.py.
"""
plt.imshow(image, cmap = 'gray', interpolation = 'bicubic')
plt.xticks([]), plt.yticks([]) # to hide tick values on X and Y axis
plt.show()
robot = Robot()
#Reading ultrassonic sensors
ultrassonic = robot.read_ultrassonic_sensors()
print("Ultrassonic: ", ultrassonic)
#Reading laser sensor
laser = robot.read_laser()
print("Laser: ", laser)
#Reading camera
resolution, raw_img = robot.read_vision_sensor()
img = vrep2array(raw_img, resolution)
display_image(img)
|
py | 1a51a85b507f550db2d9ea789bcfad89cec628bd | from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from elements.base import BasePageElement
from locators.checkout import CheckoutPageLocators
class CheckoutOptions(BasePageElement):
def __init__(self, driver):
super().__init__(driver)
WebDriverWait(self.driver, 10).until(
EC.presence_of_element_located(CheckoutPageLocators.GUEST_ACCOUNT))
self.guest_account = self.driver.find_element(*CheckoutPageLocators.GUEST_ACCOUNT)
self.register_account = self.driver.find_element(*CheckoutPageLocators.REGISTER_ACCOUNT)
self.btn_account = self.driver.find_element(*CheckoutPageLocators.BTN_ACCOUNT)
self.btn_login = self.driver.find_element(*CheckoutPageLocators.BTN_LOGIN)
self.email_login = self.driver.find_element(*CheckoutPageLocators.EMAIL_LOGIN)
self.password_login = self.driver.find_element(*CheckoutPageLocators.PASSWORD_LOGIN) |
py | 1a51a9a6b8455b8efe7d45faf3a9a22e9230f127 | # this is chenqi's modification for custom datasets!
# version 2: based on v1, do the following updates:
# (1) in the func test(), instead of kNN on class, do kNN on img index! <-- then each image represents a class during implementation.
# (2) in data-aug for training, replace color jitter with Gaussian blur (+ Gaussian noise?) .
# (3) During training simCLR, add fake (gan generated) images to the original dataset to train!
import argparse
import os
import pandas as pd
import torch
import torch.optim as optim
from thop import profile, clever_format
from torch.utils.data import DataLoader
from tqdm import tqdm
#import utils_chenqi # import utils
import utils_chenqi_v2
from model import Model
# newly added:
from PIL import Image
from torchvision import transforms, datasets
# train for one epoch to learn unique features
def train(net, data_loader, train_optimizer):
net.train()
total_loss, total_num, train_bar = 0.0, 0, tqdm(data_loader)
for pos_1, pos_2, target in train_bar: # target.shape: torch.Size([batch_size])
pos_1, pos_2 = pos_1.cuda(non_blocking=True), pos_2.cuda(non_blocking=True) # pos_1.shape: torch.Size([batch_size, img_ch, img_h, img_w])
# note: feature: h (the embedding we want to do NN query), of shape: torch.Size([batch_size, 2048])
# out: z (the projection used to maximize agreement) of shape: torch.Size([batch_size, feature_dim]).
feature_1, out_1 = net(pos_1)
feature_2, out_2 = net(pos_2)
# [2*B, D]
out = torch.cat([out_1, out_2], dim=0) # shape: torch.Size([2*batch_size, feature_dim])
# [2*B, 2*B]
sim_matrix = torch.exp(torch.mm(out, out.t().contiguous()) / temperature)
mask = (torch.ones_like(sim_matrix) - torch.eye(2 * batch_size, device=sim_matrix.device)).bool()
# [2*B, 2*B-1]
sim_matrix = sim_matrix.masked_select(mask).view(2 * batch_size, -1)
# compute loss
pos_sim = torch.exp(torch.sum(out_1 * out_2, dim=-1) / temperature)
# [2*B]
pos_sim = torch.cat([pos_sim, pos_sim], dim=0)
loss = (- torch.log(pos_sim / sim_matrix.sum(dim=-1))).mean()
train_optimizer.zero_grad()
loss.backward()
train_optimizer.step()
total_num += batch_size
total_loss += loss.item() * batch_size
train_bar.set_description('Train Epoch: [{}/{}] Loss: {:.4f}'.format(epoch, epochs, total_loss / total_num))
return total_loss / total_num
# test for one epoch, use weighted knn to find the most similar images' label to assign the test image
def test(net, memory_data_loader, test_data_loader):
net.eval()
total_top1, total_top5, total_num, feature_bank = 0.0, 0.0, 0, []
with torch.no_grad():
# generate feature bank
for data, _, target in tqdm(memory_data_loader, desc='Feature extracting'):
feature, out = net(data.cuda(non_blocking=True))
feature_bank.append(feature)
# [D, N]
feature_bank = torch.cat(feature_bank, dim=0).t().contiguous()
# [N]
feature_labels = torch.tensor(memory_data_loader.dataset.targets, device=feature_bank.device)
# loop test data to predict the label by weighted knn search
test_bar = tqdm(test_data_loader)
for data, _, target in test_bar:
data, target = data.cuda(non_blocking=True), target.cuda(non_blocking=True)
feature, out = net(data)
total_num += data.size(0)
# compute cos similarity between each feature vector and feature bank ---> [B, N]
sim_matrix = torch.mm(feature, feature_bank)
# [B, K]
sim_weight, sim_indices = sim_matrix.topk(k=k, dim=-1)
# [B, K]
sim_labels = torch.gather(feature_labels.expand(data.size(0), -1), dim=-1, index=sim_indices)
sim_weight = (sim_weight / temperature).exp()
# counts for each class
one_hot_label = torch.zeros(data.size(0) * k, c, device=sim_labels.device)
# [B*K, C]
# to check error: for debug:
#torch.max(sim_labels.view(-1, 1)) # cls_num-1
#torch.min(sim_labels.view(-1, 1)) # 0
# error here!!!
one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0)
# weighted score ---> [B, C]
pred_scores = torch.sum(one_hot_label.view(data.size(0), -1, c) * sim_weight.unsqueeze(dim=-1), dim=1)
pred_labels = pred_scores.argsort(dim=-1, descending=True) # torch.Size([26, 102])
total_top1 += torch.sum((pred_labels[:, :1] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
total_top5 += torch.sum((pred_labels[:, :5] == target.unsqueeze(dim=-1)).any(dim=-1).float()).item()
test_bar.set_description('Test Epoch: [{}/{}] Acc@1:{:.2f}% Acc@5:{:.2f}%'
.format(epoch, epochs, total_top1 / total_num * 100, total_top5 / total_num * 100))
return total_top1 / total_num * 100, total_top5 / total_num * 100
def get_mean_std_forDataset(data_dir,img_size,batch_size,isGray):
# newly added: compute the mean and std for transforms.Normalize using whole dataset:
tmp_data = datasets.ImageFolder(root=data_dir, transform=transforms.Compose([transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor()]))
tmp_loader = DataLoader(tmp_data, batch_size=batch_size, shuffle=False, num_workers=16)
mean = 0.
std = 0.
nb_samples = 0.
if not isGray:
for data, _ in tmp_loader:
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
mean += data.mean(2).sum(0)
std += data.std(2).sum(0)
nb_samples += batch_samples
#else: for MNIST
mean /= nb_samples
std /= nb_samples
return (mean, std)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train SimCLR')
parser.add_argument('--feature_dim', default=128, type=int, help='Feature dim for latent vector')
parser.add_argument('--temperature', default=0.5, type=float, help='Temperature used in softmax')
parser.add_argument('--k', default=200, type=int, help='Top k most similar images used to predict the label')
parser.add_argument('--batch_size', default=26, type=int, help='Number of images in each mini-batch')
parser.add_argument('--epochs', default=2000, type=int, help='Number of sweeps over the dataset to train')
# newly added:
parser.add_argument('--dataset', default='FLOWER_128', type=str, help='Name of the training dataset, eg, FLOWER_128')
parser.add_argument('--data_dir', default='/eecf/cbcsl/data100b/Chenqi/new_metrics/SimCLR/data/FLOWER_gan/', type=str, help='Dir of the original & GAN generated fake training dataset')
#parser.add_argument('--label_file', default='/eecf/cbcsl/data100b/Chenqi/data/flower_labels.txt', type=str, help='Path to the txt file with class labels')
# maybe also add arg like: choices of data-aug...
# args parse
args = parser.parse_args()
feature_dim, temperature, k = args.feature_dim, args.temperature, args.k
batch_size, epochs = args.batch_size, args.epochs
# newly added:
dataset, data_dir = args.dataset, args.data_dir
img_size = int(dataset.split('_')[-1])
#label_file = args.label_file
# newly added: note: we should compute transforms.Normalize for our custom dataset each time! <-- will later modify it
# also note: for MNIST (gray-scale imgs), needs to modify color jitter & random gray & normalize!! <-- will later modify it
if 'MNIST' not in dataset:
# newly added: compute the mean and std for transforms.Normalize using whole dataset:
img_means, img_stds = get_mean_std_forDataset(data_dir,img_size,batch_size,isGray=False)
if 'FLOWER' in dataset:
train_transform = transforms.Compose([
transforms.Resize(img_size),transforms.CenterCrop(img_size), # NOT use random crop! use resize & center crop!!!
#transforms.RandomHorizontalFlip(p=0.5), # for FLOWER & MNIST: NOT do this!
transforms.GaussianBlur(51, sigma=(0.1, 1.0)), # NOT jitter that much for FLOWER!! Add Gaussian blurring.
#transforms.RandomGrayscale(p=0.2),
transforms.RandomAffine(degrees=10, translate=None, scale=None, shear=10), # maybe also add affain warping?
transforms.ToTensor(),
transforms.Normalize(img_means, img_stds)]) # ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) cifar10
elif 'CelebA' in dataset:
train_transform = transforms.Compose([
transforms.Resize(img_size),transforms.CenterCrop(img_size), # NOT use random crop! use resize & center crop!!!
transforms.RandomHorizontalFlip(p=0.5), # for FLOWER & MNIST: NOT do this!
transforms.GaussianBlur(51, sigma=(0.1, 1.0)), # NOT jitter that much for FLOWER!! Add Gaussian blurring.
transforms.RandomGrayscale(p=0.2),
#transforms.RandomAffine(degrees=5, translate=None, scale=None, shear=5), # maybe also add affain warping?
transforms.ToTensor(),
transforms.Normalize(img_means, img_stds)]) # ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) cifar10
test_transform = transforms.Compose([
transforms.Resize(img_size),
transforms.CenterCrop(img_size),
transforms.ToTensor(),
transforms.Normalize(img_means, img_stds)]) # ([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010]) for cifar10
# else:... (for MNIST)
# data prepare
# newly modified: to adjust to custom dataset!
"""
# original old code:
train_data = utils.CIFAR10Pair(root='data', train=True, transform=utils.train_transform, download=True)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True,
drop_last=True)
"""
# new code for custom dataset:
#train_data = datasets.ImageFolder(root=data_dir, transform=train_transform)
train_data = utils_chenqi_v2.MyCustomDataset_v2(root=data_dir, transform=train_transform)
train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True,
drop_last=True)
"""
# original old code:
memory_data = utils.CIFAR10Pair(root='data', train=True, transform=utils.test_transform, download=True)
memory_loader = DataLoader(memory_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
"""
# new code for custom dataset:
#memory_data = datasets.ImageFolder(root=data_dir, transform=test_transform)
memory_data = utils_chenqi_v2.MyCustomDataset_v2(root=data_dir, transform=test_transform)
memory_loader = DataLoader(memory_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
"""
# original old code:
test_data = utils.CIFAR10Pair(root='data', train=False, transform=utils.test_transform, download=True)
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=16, pin_memory=True)
"""
# new code for custom dataset:
#test_data = datasets.ImageFolder(root=data_dir, transform=test_transform)
test_data = utils_chenqi_v2.MyCustomDataset_v2(root=data_dir, transform=train_transform) # make the testing set to be the transformed original image!!!
test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=16, pin_memory=True)
# model setup and optimizer config
model = Model(feature_dim).cuda()
flops, params = profile(model, inputs=(torch.randn(1, 3, 32, 32).cuda(),))
flops, params = clever_format([flops, params])
print('# Model Params: {} FLOPs: {}'.format(params, flops))
optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-6)
c = len(memory_data.classes)
"""
# for debug:
print('***************** DEBUG *****************')
print('c = ' + str(c))
print('memory_data.classes = ')
print(memory_data.classes)
assert(False)
"""
# training loop
results = {'train_loss': [], 'test_acc@1': [], 'test_acc@5': []}
save_name_pre = '{}_{}_{}_{}_{}'.format(feature_dim, temperature, k, batch_size, epochs)
# newly modified:
if not os.path.exists('results_v2/' + dataset + '/'):
os.mkdir('results_v2/' + dataset + '/')
best_acc = 0.0
for epoch in range(1, epochs + 1):
train_loss = train(model, train_loader, optimizer)
results['train_loss'].append(train_loss)
test_acc_1, test_acc_5 = test(model, memory_loader, test_loader)
results['test_acc@1'].append(test_acc_1)
results['test_acc@5'].append(test_acc_5)
# save statistics
data_frame = pd.DataFrame(data=results, index=range(1, epoch + 1))
# newly modified:
data_frame.to_csv('results_v2/' + dataset + '/' + '{}_statistics.csv'.format(save_name_pre), index_label='epoch')
"""
# original code: only save the "best" model:
if test_acc_1 > best_acc:
best_acc = test_acc_1
# newly modified:
torch.save(model.state_dict(), 'results_v2/' + dataset + '/' + '{}_model.pth'.format(save_name_pre))
"""
# new code: save all the models!!! (while also keep track on the "best" model):
torch.save(model.state_dict(), 'results_v2/' + dataset + '/' + 'epoch{}'.format(epoch) + '_{}_model.pth'.format(save_name_pre))
if test_acc_1 > best_acc:
best_acc = test_acc_1
# newly modified:
torch.save(model.state_dict(), 'results_v2/' + dataset + '/' + 'best_{}_model.pth'.format(save_name_pre))
|
py | 1a51ab0bc241d551f4f33c3652cc2847ffc5fcd2 | import pytest
from mock import MagicMock
from mock import AsyncMock
from datetime import datetime
from robot_server.service.dependencies import get_session_manager
from robot_server.service.errors import RobotServerError
from robot_server.service.session.errors import (
SessionCreationException, UnsupportedCommandException,
CommandExecutionException)
from robot_server.service.session.manager import SessionManager
from robot_server.service.session.models.command import (
SimpleCommandRequest, SimpleCommandResponse, CommandStatus
)
from robot_server.service.session.models.common import EmptyModel
from robot_server.service.session.models.command_definitions import (
ProtocolCommand)
from robot_server.service.session import router
from robot_server.service.session.session_types import BaseSession
@pytest.fixture
def mock_session_manager():
return AsyncMock(spec=SessionManager)
@pytest.fixture
def mock_session():
session = AsyncMock(spec=BaseSession)
session.meta.identifier = "some id"
session.meta.created_at = datetime(2020, 1, 1)
session.meta.create_params = None
session.get_response_model.return_value = {
"createdAt": session.meta.created_at,
"details": EmptyModel(),
"id": session.meta.identifier,
"createParams": session.meta.create_params
}
return session
@pytest.fixture
def sessions_api_client(mock_session_manager, api_client):
"""Test api client that overrides get_session_manager dependency."""
async def get():
return mock_session_manager
api_client.app.dependency_overrides[get_session_manager] = get
return api_client
def test_get_session(mock_session_manager):
"""It gets the session from session manager"""
session_id = "sess"
mock_session = MagicMock()
mock_session_manager.get_by_id.return_value = mock_session
session = router.get_session(mock_session_manager, session_id)
mock_session_manager.get_by_id.called_once_with(session_id)
assert session is mock_session
def test_get_session_not_found(mock_session_manager):
"""It raises an exception if session is not found"""
session_id = "sess"
mock_session_manager.get_by_id.return_value = None
with pytest.raises(RobotServerError):
router.get_session(mock_session_manager, session_id)
def test_sessions_create_error(
sessions_api_client,
mock_session_manager):
"""It raises an error if session manager raises an exception."""
async def raiser(*args, **kwargs):
raise SessionCreationException(
"Please attach pipettes before proceeding"
)
mock_session_manager.add.side_effect = raiser
response = sessions_api_client.post("/sessions", json={
"data": {
"sessionType": "liveProtocol"
}
})
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'detail': "Please attach pipettes before proceeding",
'title': 'Action Forbidden'}
]}
assert response.status_code == 403
def test_sessions_create(
sessions_api_client,
mock_session_manager,
mock_session):
"""It creates a session."""
mock_session_manager.add.return_value = mock_session
response = sessions_api_client.post("/sessions", json={
"data": {
"sessionType": "liveProtocol"
}
})
assert response.json() == {
'data': {
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier,
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session.meta.identifier}/commands/execute', # noqa: E501
'meta': None,
},
'self': {
'href': f'/sessions/{mock_session.meta.identifier}',
'meta': None,
},
'sessions': {
'href': '/sessions', 'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}', 'meta': None,
}
}
}
assert response.status_code == 201
def test_sessions_delete_not_found(
sessions_api_client,
mock_session_manager):
"""It fails when session is not found"""
mock_session_manager.get_by_id.return_value = None
response = sessions_api_client.delete("/sessions/check")
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'title': 'Resource Not Found',
'detail': "Resource type 'session' with id 'check' was not found",
}],
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
}
assert response.status_code == 404
def test_sessions_delete(
sessions_api_client,
mock_session_manager,
mock_session):
"""It deletes a found session."""
mock_session_manager.get_by_id.return_value = mock_session
response = sessions_api_client.delete(
f"/sessions/{mock_session.meta.identifier}")
mock_session_manager.remove.assert_called_once_with(
mock_session.meta.identifier)
assert response.json() == {
'data': {
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier
},
'links': {
'self': {
'href': '/sessions', 'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}', 'meta': None,
},
}
}
assert response.status_code == 200
def test_sessions_get_not_found(
mock_session_manager,
sessions_api_client):
"""It returns an error when session is not found."""
mock_session_manager.get_by_id.return_value = None
response = sessions_api_client.get("/sessions/1234")
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'detail': "Resource type 'session' with id '1234' was not found",
'title': 'Resource Not Found'
}],
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
}
assert response.status_code == 404
def test_sessions_get(
sessions_api_client,
mock_session_manager,
mock_session):
"""It returns the found session."""
mock_session_manager.get_by_id.return_value = mock_session
response = sessions_api_client.get(
f"/sessions/{mock_session.meta.identifier}")
assert response.json() == {
'data': {
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session.meta.identifier}/commands/execute', # noqa: E501
'meta': None,
},
'self': {
'href': f'/sessions/{mock_session.meta.identifier}',
'meta': None,
},
'sessions': {
'href': '/sessions',
'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}',
'meta': None,
}
}
}
assert response.status_code == 200
def test_sessions_get_all_no_sessions(
sessions_api_client,
mock_session_manager):
"""It returns a response when there are no sessions."""
mock_session_manager.get.return_value = []
response = sessions_api_client.get("/sessions")
assert response.json() == {
'data': [], 'links': None
}
assert response.status_code == 200
def test_sessions_get_all(
sessions_api_client,
mock_session_manager,
mock_session):
"""It returns the sessions."""
mock_session_manager.get.return_value = [mock_session]
response = sessions_api_client.get("/sessions")
assert response.json() == {
'data': [{
'details': {},
'sessionType': 'liveProtocol',
'createdAt': mock_session.meta.created_at.isoformat(),
'createParams': None,
'id': mock_session.meta.identifier
}], 'links': None
}
assert response.status_code == 200
def test_sessions_execute_command_no_session(
sessions_api_client,
mock_session_manager):
"""It rejects command if there's no session"""
mock_session_manager.get_by_id.return_value = None
response = sessions_api_client.post(
"/sessions/1234/commands/execute",
json={
"data": {
"command": "protocol.pause",
"data": {}
}
}
)
mock_session_manager.get_by_id.assert_called_once_with("1234")
assert response.json() == {
'errors': [{
'id': 'UncategorizedError',
'title': 'Resource Not Found',
'detail': "Resource type 'session' with id '1234' was not found", # noqa: E501
}],
'links': {
'self': {'href': '/sessions'},
'sessionById': {'href': '/sessions/{sessionId}'}
},
}
assert response.status_code == 404
def test_sessions_execute_command(
sessions_api_client,
mock_session_manager,
mock_session):
"""It accepts the session command"""
mock_session_manager.get_by_id.return_value = mock_session
mock_session.execute_command.return_value = SimpleCommandResponse(
id="44",
command=ProtocolCommand.pause,
data=EmptyModel(),
createdAt=datetime(2020, 1, 2),
startedAt=datetime(2020, 1, 3),
completedAt=datetime(2020, 1, 4),
status=CommandStatus.executed
)
response = sessions_api_client.post(
f"/sessions/{mock_session.meta.identifier}/commands/execute",
json={
"data": {
"command": "protocol.pause",
"data": {}
}
}
)
mock_session.execute_command.assert_called_once_with(
SimpleCommandRequest(command=ProtocolCommand.pause,
data=EmptyModel())
)
assert response.json() == {
'data': {
'command': 'protocol.pause',
'data': {},
'status': 'executed',
'createdAt': '2020-01-02T00:00:00',
'startedAt': '2020-01-03T00:00:00',
'completedAt': '2020-01-04T00:00:00',
'result': None,
'id': "44",
},
'links': {
'commandExecute': {
'href': f'/sessions/{mock_session.meta.identifier}/commands/execute', # noqa: E501
'meta': None,
},
'self': {
'href': f'/sessions/{mock_session.meta.identifier}',
'meta': None,
},
'sessions': {
'href': '/sessions',
'meta': None,
},
'sessionById': {
'href': '/sessions/{sessionId}',
'meta': None,
},
},
}
assert response.status_code == 200
@pytest.mark.parametrize(argnames="exception,expected_status",
argvalues=[
[UnsupportedCommandException, 403],
[CommandExecutionException, 403],
])
def test_execute_command_error(sessions_api_client,
mock_session_manager,
mock_session,
exception,
expected_status):
"""Test that we handle executor errors correctly"""
mock_session_manager.get_by_id.return_value = mock_session
async def raiser(*args, **kwargs):
raise exception("Cannot do it")
mock_session.execute_command.side_effect = raiser
response = sessions_api_client.post(
f"/sessions/{mock_session.meta.identifier}/commands/execute",
json={
'data': {
'command': 'protocol.pause',
'data': {}
}
}
)
assert response.json() == {
'errors': [
{
'detail': 'Cannot do it',
'title': 'Action Forbidden',
'id': 'UncategorizedError',
}
]
}
assert response.status_code == expected_status
def test_execute_command_session_inactive(
sessions_api_client,
mock_session_manager,
mock_session,
):
"""Test that only the active session can execute commands"""
mock_session_manager.get_by_id.return_value = mock_session
mock_session_manager.is_active.return_value = False
response = sessions_api_client.post(
f"/sessions/{mock_session.meta.identifier}/commands/execute",
json={
'data': {
'command': 'protocol.pause',
'data': {}
}
}
)
mock_session_manager.is_active.assert_called_once_with(
mock_session.meta.identifier)
assert response.json() == {
'errors': [
{
'id': 'UncategorizedError',
'title': 'Action Forbidden',
'detail': f"Session '{mock_session.meta.identifier}'"
f" is not active. Only the active session can "
f"execute commands"
}
]
}
assert response.status_code == 403
|
py | 1a51ab2d529452729b3893becf356aa3cbd51e0f | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InstanceConfigurationInstanceSourceDetails(object):
"""
InstanceConfigurationInstanceSourceDetails model.
"""
def __init__(self, **kwargs):
"""
Initializes a new InstanceConfigurationInstanceSourceDetails object with values from keyword arguments. This class has the following subclasses and if you are using this class as input
to a service operations then you should favor using a subclass over the base class:
* :class:`~oci.core.models.InstanceConfigurationInstanceSourceViaImageDetails`
* :class:`~oci.core.models.InstanceConfigurationInstanceSourceViaBootVolumeDetails`
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param source_type:
The value to assign to the source_type property of this InstanceConfigurationInstanceSourceDetails.
:type source_type: str
"""
self.swagger_types = {
'source_type': 'str'
}
self.attribute_map = {
'source_type': 'sourceType'
}
self._source_type = None
@staticmethod
def get_subtype(object_dictionary):
"""
Given the hash representation of a subtype of this class,
use the info in the hash to return the class of the subtype.
"""
type = object_dictionary['sourceType']
if type == 'image':
return 'InstanceConfigurationInstanceSourceViaImageDetails'
if type == 'bootVolume':
return 'InstanceConfigurationInstanceSourceViaBootVolumeDetails'
else:
return 'InstanceConfigurationInstanceSourceDetails'
@property
def source_type(self):
"""
**[Required]** Gets the source_type of this InstanceConfigurationInstanceSourceDetails.
The source type for the instance.
Use `image` when specifying the image OCID. Use `bootVolume` when specifying
the boot volume OCID.
:return: The source_type of this InstanceConfigurationInstanceSourceDetails.
:rtype: str
"""
return self._source_type
@source_type.setter
def source_type(self, source_type):
"""
Sets the source_type of this InstanceConfigurationInstanceSourceDetails.
The source type for the instance.
Use `image` when specifying the image OCID. Use `bootVolume` when specifying
the boot volume OCID.
:param source_type: The source_type of this InstanceConfigurationInstanceSourceDetails.
:type: str
"""
self._source_type = source_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | 1a51ac4dc0959cbc3b384f9375b17f6199488ff0 | # Generated by Django 3.0.5 on 2020-05-03 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='SafariAccount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=254)),
('password', models.CharField(max_length=255)),
('is_expired', models.BooleanField(default=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_expired', models.DateTimeField(blank=True, null=True)),
],
),
]
|
py | 1a51ad3b649d06191433498b12b4f496d09f17b5 | from django.utils.deprecation import MiddlewareMixin#中间件基类
from django.core.cache import cache
class CountMiddleware(MiddlewareMixin):
#中间件类必须接受一个response参数,就是说必须在中间件类中定义一个__init__函数和一个__call__函数
#def __init__(self, get_response):
#self.get_response = get_response
#def __call__(self, request):
#return self.get_response(request)
def process_request(self, request):#在处理url请求之前执行
#self.online_ips = get_online_count()
#获取用户IP并设置到cache
if 'HTTP_X_FORWARDED_FOR' in request.META:
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
#ip作为key,时间重置,定时5分
cache.set(ip, 0, 5 * 60)
#online_ips用来存放所有没有失效的ip的元组
online_ips = cache.get("online_ips", [])
if online_ips:
#根据ip List获取所有没有失效的ip Key,即IP值(更新online_ips)
online_ips = cache.get_many(online_ips).keys()
#此时online_ips为dict.keys()类型,需要转为元组
online_ips=list(online_ips)
#添加新IP在表中
if ip not in online_ips:
online_ips.append(ip)
#online_ips做为key,用来存放所有的ip[]
cache.set("online_ips", online_ips) |
py | 1a51ae5b0cb646c1f74a982cf5ed51a25462b23f | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2018-08-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-08-01"
self.config = config
def _create_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, parameters, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PacketCapture')
# Construct and send request
request = self._client.put(url, query_parameters, header_parameters, body_content)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [201]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create(
self, resource_group_name, network_watcher_name, packet_capture_name, parameters, custom_headers=None, raw=False, polling=True, **operation_config):
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture
operation.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.PacketCapture
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns PacketCaptureResult or
ClientRawResponse<PacketCaptureResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_08_01.models.PacketCaptureResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_08_01.models.PacketCaptureResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2018_08_01.models.ErrorResponseException>`
"""
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def get(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PacketCaptureResult or ClientRawResponse if raw=true
:rtype: ~azure.mgmt.network.v2018_08_01.models.PacketCaptureResult or
~msrest.pipeline.ClientRawResponse
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2018_08_01.models.ErrorResponseException>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def _delete_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [202, 204]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def delete(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2018_08_01.models.ErrorResponseException>`
"""
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'}
def _stop_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.stop.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def stop(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns None or
ClientRawResponse<None> if raw==True
:rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2018_08_01.models.ErrorResponseException>`
"""
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'}
def _get_status_initial(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.get_status.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.post(url, query_parameters, header_parameters)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def get_status(
self, resource_group_name, network_watcher_name, packet_capture_name, custom_headers=None, raw=False, polling=True, **operation_config):
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture
session.
:type packet_capture_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
PacketCaptureQueryStatusResult or
ClientRawResponse<PacketCaptureQueryStatusResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.network.v2018_08_01.models.PacketCaptureQueryStatusResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.network.v2018_08_01.models.PacketCaptureQueryStatusResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2018_08_01.models.ErrorResponseException>`
"""
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'}
def list(
self, resource_group_name, network_watcher_name, custom_headers=None, raw=False, **operation_config):
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PacketCaptureResult
:rtype:
~azure.mgmt.network.v2018_08_01.models.PacketCaptureResultPaged[~azure.mgmt.network.v2018_08_01.models.PacketCaptureResult]
:raises:
:class:`ErrorResponseException<azure.mgmt.network.v2018_08_01.models.ErrorResponseException>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.PacketCaptureResultPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'}
|
py | 1a51aea51075f0751b703de91a2ceb6997fcb9fa | ################################################################
### various add-ons to the SciPy morphology package
################################################################
from numpy import *
import pylab
from pylab import *
from scipy.ndimage import morphology,measurements,filters
from scipy.ndimage.morphology import *
from .toplevel import *
@checks(ABINARY2)
def label(image,**kw):
"""Redefine the scipy.ndimage.measurements.label function to
work with a wider range of data types. The default function
is inconsistent about the data types it accepts on different
platforms."""
try: return measurements.label(image,**kw)
except: pass
types = ["int32","uint32","int64","uint64","int16","uint16"]
for t in types:
try: return measurements.label(array(image,dtype=t),**kw)
except: pass
# let it raise the same exception as before
return measurements.label(image,**kw)
@checks(AINT2)
def find_objects(image,**kw):
"""Redefine the scipy.ndimage.measurements.find_objects function to
work with a wider range of data types. The default function
is inconsistent about the data types it accepts on different
platforms."""
try: return measurements.find_objects(image,**kw)
except: pass
types = ["int32","uint32","int64","uint64","int16","uint16"]
for t in types:
try: return measurements.find_objects(array(image,dtype=t),**kw)
except: pass
# let it raise the same exception as before
return measurements.find_objects(image,**kw)
def check_binary(image):
assert image.dtype=='B' or image.dtype=='i' or image.dtype==dtype('bool'),\
"array should be binary, is %s %s"%(image.dtype,image.shape)
assert amin(image)>=0 and amax(image)<=1,\
"array should be binary, has values %g to %g"%(amin(image),amax(image))
@checks(ABINARY2,uintpair)
def r_dilation(image,size,origin=0):
"""Dilation with rectangular structuring element using maximum_filter"""
return filters.maximum_filter(image,size,origin=origin)
@checks(ABINARY2,uintpair)
def r_erosion(image,size,origin=0):
"""Erosion with rectangular structuring element using maximum_filter"""
return filters.minimum_filter(image,size,origin=origin, mode='constant', cval=1)
@checks(ABINARY2,uintpair)
def r_opening(image,size,origin=0):
"""Opening with rectangular structuring element using maximum/minimum filter"""
check_binary(image)
image = r_erosion(image,size,origin=origin)
return r_dilation(image,size,origin=origin)
@checks(ABINARY2,uintpair)
def r_closing(image,size,origin=0):
"""Closing with rectangular structuring element using maximum/minimum filter"""
check_binary(image)
image = r_dilation(image,size,origin=0)
return r_erosion(image,size,origin=0)
@checks(ABINARY2,uintpair)
def rb_dilation(image,size,origin=0):
"""Binary dilation using linear filters."""
output = zeros(image.shape,'f')
filters.uniform_filter(image,size,output=output,origin=origin)
return array(output>0,'i')
@checks(ABINARY2,uintpair)
def rb_erosion(image,size,origin=0):
"""Binary erosion using linear filters."""
output = zeros(image.shape,'f')
filters.uniform_filter(image,size,output=output,origin=origin, mode='constant', cval=1)
return array(output==1,'i')
@checks(ABINARY2,uintpair)
def rb_opening(image,size,origin=0):
"""Binary opening using linear filters."""
image = rb_erosion(image,size,origin=origin)
return rb_dilation(image,size,origin=origin)
@checks(ABINARY2,uintpair)
def rb_closing(image,size,origin=0):
"""Binary closing using linear filters."""
image = rb_dilation(image,size,origin=origin)
return rb_erosion(image,size,origin=origin)
@checks(GRAYSCALE,uintpair)
def rg_dilation(image,size,origin=0):
"""Grayscale dilation with maximum/minimum filters."""
return filters.maximum_filter(image,size,origin=origin)
@checks(GRAYSCALE,uintpair)
def rg_erosion(image,size,origin=0):
"""Grayscale erosion with maximum/minimum filters."""
return filters.minimum_filter(image,size,origin=origin, mode='constant', cval=1)
@checks(GRAYSCALE,uintpair)
def rg_opening(image,size,origin=0):
"""Grayscale opening with maximum/minimum filters."""
image = r_erosion(image,size,origin=origin)
return r_dilation(image,size,origin=origin)
@checks(GRAYSCALE,uintpair)
def rg_closing(image,size,origin=0):
"""Grayscale closing with maximum/minimum filters."""
image = r_dilation(image,size,origin=0)
return r_erosion(image,size,origin=0)
@checks(SEGMENTATION)
def showlabels(x,n=7):
pylab.imshow(where(x>0,x%n+1,0),cmap=pylab.cm.gist_stern)
@checks(SEGMENTATION)
def spread_labels(labels,maxdist=9999999):
"""Spread the given labels to the background"""
distances,features = morphology.distance_transform_edt(labels==0,return_distances=1,return_indices=1)
indexes = features[0]*labels.shape[1]+features[1]
spread = labels.ravel()[indexes.ravel()].reshape(*labels.shape)
spread *= (distances<maxdist)
return spread
@checks(ABINARY2,ABINARY2)
def keep_marked(image,markers):
"""Given a marker image, keep only the connected components
that overlap the markers."""
labels,_ = label(image)
marked = unique(labels*(markers!=0))
kept = in1d(labels.ravel(),marked)
return (image!=0)*kept.reshape(*labels.shape)
@checks(ABINARY2,ABINARY2)
def remove_marked(image,markers):
"""Given a marker image, remove all the connected components
that overlap markers."""
marked = keep_marked(image,markers)
return image*(marked==0)
@checks(SEGMENTATION,SEGMENTATION)
def correspondences(labels1,labels2):
"""Given two labeled images, compute an array giving the correspondences
between labels in the two images (as tuples of label in `labels1`,
label in `labels2`, and pixel count)."""
q = 100000
assert amin(labels1)>=0 and amin(labels2)>=0
assert amax(labels2)<q
combo = labels1*q+labels2
result, counts = unique(combo, return_counts=True)
result = array([result//q,result%q,counts])
return result
@checks(ABINARY2,SEGMENTATION)
def propagate_labels_simple(regions,labels):
"""Given an image and a set of labels, apply the labels
to all the connected components in the image that overlap a label."""
rlabels,_ = label(regions)
cors = correspondences(rlabels,labels)
outputs = zeros(amax(rlabels)+1,'i')
for o,i,_ in cors.T: outputs[o] = i
outputs[0] = 0
return outputs[rlabels]
@checks(ABINARY2,SEGMENTATION)
def propagate_labels_majority(image,labels):
"""Given an image and a set of labels, apply the labels
to all the connected components in the image that overlap a label.
For each component that has a conflict, select the label
with the largest overlap."""
rlabels,_ = label(image)
cors = correspondences(rlabels,labels)
outputs = zeros(amax(rlabels)+1,'i')
counts = zeros(amax(rlabels)+1,'i')
for rlabel, label_, count in cors.T:
if not rlabel or not label_:
# ignore background correspondences
continue
if counts[rlabel] < count:
outputs[rlabel] = label_
counts[rlabel] = count
outputs[0] = 0
return outputs[rlabels]
@checks(ABINARY2,SEGMENTATION)
def propagate_labels(image,labels,conflict=0):
"""Given an image and a set of labels, apply the labels
to all the connected components in the image that overlap a label.
Assign the value `conflict` to any components that have a conflict."""
rlabels,_ = label(image)
cors = correspondences(rlabels,labels)
outputs = zeros(amax(rlabels)+1,'i')
oops = -(1<<30)
for o,i,_ in cors.T:
if outputs[o]!=0: outputs[o] = oops
else: outputs[o] = i
outputs[outputs==oops] = conflict
outputs[0] = 0
return outputs[rlabels]
@checks(ABINARY2,True)
def select_regions(binary,f,min=0,nbest=100000):
"""Given a scoring function f over slice tuples (as returned by
find_objects), keeps at most nbest components whose scores is higher
than min."""
labels,n = label(binary)
objects = find_objects(labels)
scores = [f(o) for o in objects]
best = argsort(scores)
keep = zeros(len(objects)+1,'i')
if nbest > 0:
for i in best[-nbest:]:
if scores[i]<=min: continue
keep[i+1] = 1
# print scores,best[-nbest:],keep
# print sorted(list(set(labels.ravel())))
# print sorted(list(set(keep[labels].ravel())))
return keep[labels]
@checks(SEGMENTATION)
def all_neighbors(image):
"""Given an image with labels, find all pairs of labels
that are directly neighboring each other."""
q = 100000
assert amax(image)<q
assert amin(image)>=0
u = unique(q*image+roll(image,1,0))
d = unique(q*image+roll(image,-1,0))
l = unique(q*image+roll(image,1,1))
r = unique(q*image+roll(image,-1,1))
all = unique(r_[u,d,l,r])
all = c_[all//q,all%q]
all = unique(array([sorted(x) for x in all]))
return all
################################################################
### Iterate through the regions of a color image.
################################################################
@checks(SEGMENTATION)
def renumber_labels_ordered(a,correspondence=0):
"""Renumber the labels of the input array in numerical order so
that they are arranged from 1...N"""
assert amin(a)>=0
assert amax(a)<=2**25
labels = sorted(unique(ravel(a)))
renum = zeros(amax(labels)+1,dtype='i')
renum[labels] = arange(len(labels),dtype='i')
if correspondence:
return renum[a],labels
else:
return renum[a]
@checks(SEGMENTATION)
def renumber_labels(a):
"""Alias for renumber_labels_ordered"""
return renumber_labels_ordered(a)
def pyargsort(seq,cmp=None,key=lambda x:x):
"""Like numpy's argsort, but using the builtin Python sorting
function. Takes an optional cmp."""
return sorted(list(range(len(seq))),key=lambda x:key(seq.__getitem__(x)),cmp=None)
@checks(SEGMENTATION)
def renumber_by_xcenter(seg):
"""Given a segmentation (as a color image), change the labels
assigned to each region such that when the labels are considered
in ascending sequence, the x-centers of their bounding boxes
are non-decreasing. This is used for sorting the components
of a segmented text line into left-to-right reading order."""
objects = [(slice(0,0),slice(0,0))]+find_objects(seg)
def xc(o):
# if some labels of the segmentation are missing, we
# return a very large xcenter, which will move them all
# the way to the right (they don't show up in the final
# segmentation anyway)
if o is None: return 999999
return mean((o[1].start,o[1].stop))
xs = array([xc(o) for o in objects])
order = argsort(xs)
segmap = zeros(amax(seg)+1,'i')
for i,j in enumerate(order): segmap[j] = i
return segmap[seg]
@checks(SEGMENTATION)
def ordered_by_xcenter(seg):
"""Verify that the labels of a segmentation are ordered
spatially (as determined by the x-center of their bounding
boxes) in left-to-right reading order."""
objects = [(slice(0,0),slice(0,0))]+find_objects(seg)
def xc(o): return mean((o[1].start,o[1].stop))
xs = array([xc(o) for o in objects])
for i in range(1,len(xs)):
if xs[i-1]>xs[i]: return 0
return 1
|
py | 1a51af1f853a31846f8b8e31e822a7aa1f551f3c | import os, sys, tempfile
import datetime, time, re
from seiscomp import mseedlite as mseed
def _timeparse(t, format):
"""Parse a time string that might contain fractions of a second.
Fractional seconds are supported using a fragile, miserable hack.
Given a time string like '02:03:04.234234' and a format string of
'%H:%M:%S', time.strptime() will raise a ValueError with this
message: 'unconverted data remains: .234234'. If %S is in the
format string and the ValueError matches as above, a datetime
object will be created from the part that matches and the
microseconds in the time string.
"""
try:
return datetime.datetime(*time.strptime(t, format)[0:6]).time()
except ValueError as msg:
if "%S" in format:
msg = str(msg)
mat = re.match(r"unconverted data remains:"
" \.([0-9]{1,6})$", msg)
if mat is not None:
# fractional seconds are present - this is the style
# used by datetime's isoformat() method
frac = "." + mat.group(1)
t = t[:-len(frac)]
t = datetime.datetime(*time.strptime(t, format)[0:6])
microsecond = int(float(frac)*1e6)
return t.replace(microsecond=microsecond)
else:
mat = re.match(r"unconverted data remains:"
" \,([0-9]{3,3})$", msg)
if mat is not None:
# fractional seconds are present - this is the style
# used by the logging module
frac = "." + mat.group(1)
t = t[:-len(frac)]
t = datetime.datetime(*time.strptime(t, format)[0:6])
microsecond = int(float(frac)*1e6)
return t.replace(microsecond=microsecond)
raise
def timeparse(t):
return _timeparse(t, "%Y/%m/%d %H:%M:%S")
class Input(mseed.Input):
def __init__(self, server, streams,
stime=None, etime=None, timeout=None, verbose=0):
# XXX Add the possibility for supplying stime and etime as
# individual times for each stream.
"""
'streams' must be a list containing tuples of (net,sta,loc,cha)
"""
import subprocess
streams = [ "%-3s %5s %s%3s.D" % s for s in streams ]
streams.sort()
self.tmp = tempfile.NamedTemporaryFile(mode="w", prefix="slinktool.")
self.tmp.write("\n".join(streams)+"\n")
self.tmp.flush()
if verbose:
sys.stderr.write("\n".join(streams)+"\n")
slinktool = os.getenv("SLINKTOOL")
if not slinktool:
slinktool = "slinktool"
args = [slinktool, "-l", self.tmp.name, "-o", "-"]
if stime:
args.append("-tw")
tw = "%d,%d,%d,%d,%d,%d:" % (stime.year,stime.month,stime.day,stime.hour,stime.minute,stime.second)
if etime:
rw += "%d,%d,%d,%d,%d,%d" % (etime.year,etime.month,etime.day,etime.hour,etime.minute,etime.second)
args.append(tw)
if verbose: args.append("-v")
if timeout:
try: assert int(timeout) > 0
except: raise TypeError("illegal timeout parameter")
args += ["-nt", "%d" % int(timeout)]
args.append(server)
# start 'slinktool' as sub-process
self.popen = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
infile = self.popen.stdout
mseed.Input.__init__(self, infile)
def __del__(self):
"""
Shut down SeedLink connections and close input.
"""
sys.stderr.write("shutting down slinktool\n")
sys.stderr.flush()
slinktool_pid = self.popen.pid
# It would of course be much better to send SIGTERM,
# but somehow slinktool often appears to ignore it.
# XXX Need to figure out why, and perhaps fix it (not critical).
self.popen.kill()
self.popen.communicate()
# mseed.Input.__del__(self) # closes the input file
class Input2(mseed.Input):
def __init__(self, server, streams, stime=None, etime=None, verbose=0):
"""
XXX information not uptodate!!! XXX
'streams' must be a dict containing tuples of (stime, etime),
with the key being the stream_id and stime and etime being
the starting and end time of the time window, respectively.
The times must be seis.Time objects. For instance
stime = seis.Time(...)
etime = seis.Time(...)
streams["GE.KBS.00.BHZ.D"] = (stime, etime)
It is more efficient to request the same time interval for
all streams. Wildcards for the channels are allowed. If
stime is None, only new data are retrieved as they come in.
"""
streams = [ "%-3s %5s %s%3s.D" % tuple(s.split(".")[:4])
for s in streams ]
streams.sort()
self.tmp = tempfile.NamedTemporaryFile(mode="w", prefix="slinktool.")
self.tmp.write("\n".join(streams)+"\n")
sys.stderr.write("\n".join(streams)+"\n")
self.tmp.flush()
cmd = "slinktool -l %s -o -" % self.tmp.name
if stime:
assert isinstance(stime, seis.Time)
cmd += " -tw %d,%d,%d,%d,%d,%d:" % stime.asDate
if etime:
assert isinstance(etime, seis.Time)
cmd += "%d,%d,%d,%d,%d,%d" % etime.asDate
cmd = cmd + "%s '%s'" % (verbose*" -v", server)
infile = os.popen(cmd)
mseed.Input.__init__(self, infile)
def available(server="localhost:18000",
time_window=None, stream_ids=None, verbose=0):
"""
Connects to server and returns a dictionary of lists of available
time windows as tuples of (start_time, end_time) for each available
stream. The stream set can be limited by specifying a list of
stream_ids in the format usual format, i.e. net.sta.loc.cha.type,
e.g. "GE.KBS.00.BHZ.D".
Note that often the returned lists contain only one time tuple,
corresponding to one contiguous time window available.
NEW:
The search for available data can be limited to a time window by
specifying the "time_window" parameter, which must be a tuple
containing the starting and end time as seis.Time objects.
"""
import re
if time_window:
stime, etime = time_window
assert stime <= etime
else:
stime, etime = None, None
cmd = "slinktool -Q %s %s " % (verbose*"-v ", server)
infile = os.popen(cmd)
windows = {}
# parse the output of "slinktool -Q"
# It is assumed that the lines consist of the fields
# net,sta,[loc,], cha, type, date1, time1, "-", date2, time2
# Since the location code (loc) may or may not be present, we
# determine the position of the dash "-" to determine where the
# other fields are.
regex = re.compile("^[A-Z][A-Z]\ [A-Z].*[12][0-9]{3}(/[0-9]{2}){2}.*$")
for line in infile:
if regex.match(line): # line containing a time window, a bit crude
line = line.split()
try:
dash = line.index("-")
except ValueError:
continue
if dash==7: # location code is present
loc = line[2]
else: loc = ""
net, sta, cha, typ = line[0], line[1], line[dash-4], line[dash-3]
stream_id = "%s.%s.%s.%s.%s" % (net, sta, loc, cha, typ)
if stream_ids and stream_id not in stream_ids:
continue
t1 = seis.Time("%s %s" % (line[dash-2], line[dash-1]))
t2 = seis.Time("%s %s" % (line[dash+1], line[dash+2]))
if stime and t2<stime or etime and t1>etime:
continue # non-overlapping time windows
if stime and t1<stime:
t1 = stime
if etime and t2>etime:
t2 = etime
if not stream_id in windows:
windows[stream_id] = []
windows[stream_id].append((t1,t2))
elif verbose:
# probably some diagnostic output
sys.stdout.write("%s\n" % line.strip())
return windows
def server_version(host, port=18000):
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
except:
return None
s.send("HELLO\n")
data = s.recv(1024)
s.close()
if data[:8] != "SeedLink":
return None
return data[10:13]
def server_running(host, port=18000):
if server_version(host, port):
return True
return False
|
py | 1a51afb90f03441656c2ebbd0cae1ed8f9cca371 | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm1vf#tv!vs2mg7i7rlv)+djzru(c-w8wq4bxui9b+k33d)bg$3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'profiles_api',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile' |
py | 1a51b0a38a6a10727cf8139b4a535cf055550d5f | from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import TensorBoard, CSVLogger, ModelCheckpoint
from lipnet.lipreading.generators import BasicGenerator
from lipnet.lipreading.callbacks import Statistics, Visualize
from lipnet.lipreading.curriculums import Curriculum
from lipnet.core.decoders import Decoder
from lipnet.lipreading.helpers import labels_to_text
from lipnet.utils.spell import Spell
from lipnet.model2 import LipNet
import numpy as np
import datetime
import os
np.random.seed(55)
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
DATASET_DIR = os.path.join(CURRENT_PATH, 'datasets')
OUTPUT_DIR = os.path.join(CURRENT_PATH, 'results')
LOG_DIR = os.path.join(CURRENT_PATH, 'logs')
PREDICT_GREEDY = False
PREDICT_BEAM_WIDTH = 200
PREDICT_DICTIONARY = os.path.join(CURRENT_PATH,'..','..','common','dictionaries','grid.txt')
def curriculum_rules(epoch):
return { 'sentence_length': -1, 'flip_probability': 0.5, 'jitter_probability': 0.05 }
def train(run_name, start_epoch, stop_epoch, img_c, img_w, img_h, frames_n, absolute_max_string_len, minibatch_size):
curriculum = Curriculum(curriculum_rules)
lip_gen = BasicGenerator(dataset_path=DATASET_DIR,
minibatch_size=minibatch_size,
img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len,
curriculum=curriculum, start_epoch=start_epoch).build()
lipnet = LipNet(img_c=img_c, img_w=img_w, img_h=img_h, frames_n=frames_n,
absolute_max_string_len=absolute_max_string_len, output_size=lip_gen.get_output_size())
lipnet.summary()
adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
lipnet.model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)
# load weight if necessary
if start_epoch > 0:
weight_file = os.path.join(OUTPUT_DIR, os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
lipnet.model.load_weights(weight_file)
spell = Spell(path=PREDICT_DICTIONARY)
decoder = Decoder(greedy=PREDICT_GREEDY, beam_width=PREDICT_BEAM_WIDTH,
postprocessors=[labels_to_text, spell.sentence])
# define callbacks
statistics = Statistics(lipnet, lip_gen.next_val(), decoder, 256, output_dir=os.path.join(OUTPUT_DIR, run_name))
visualize = Visualize(os.path.join(OUTPUT_DIR, run_name), lipnet, lip_gen.next_val(), decoder, num_display_sentences=minibatch_size)
tensorboard = TensorBoard(log_dir=os.path.join(LOG_DIR, run_name))
csv_logger= CSVLogger(os.path.join(LOG_DIR, "{}-{}.csv".format('training',run_name)), separator=',', append=True)
checkpoint = ModelCheckpoint(os.path.join(OUTPUT_DIR, run_name, "weights{epoch:02d}.h5"), monitor='val_loss', save_weights_only=True, mode='auto', period=1)
lipnet.model.fit_generator(generator=lip_gen.next_train(),
steps_per_epoch=lip_gen.default_training_steps, epochs=stop_epoch,
validation_data=lip_gen.next_val(), validation_steps=lip_gen.default_validation_steps,
callbacks=[checkpoint, statistics, visualize, lip_gen, tensorboard, csv_logger],
initial_epoch=start_epoch,
verbose=1,
max_q_size=5,
workers=2,
pickle_safe=True)
if __name__ == '__main__':
run_name = datetime.datetime.now().strftime('%Y:%m:%d:%H:%M:%S')
train(run_name, 0, 5000, 3, 100, 50, 75, 32, 50)
|
py | 1a51b0c55f830cc216210d00f64db78b6639af2c | # -*- coding: utf-8 -*-
import logging
from . import config, models
from .models.util import _fetch_data
class Nvdb(object):
""" The main class for interfacing with the API.
:param client: Name of client using the API
:type client: str
:param contact: Contact information of user of the API
:type contact: str
:param autoupdate: Indicated wether constants should be up to date with latest API-Version. Default value = True
:type autoupdate: Bool
:returns: Nvdb Class
:usage:
>>> import pnvdb
>>> nvdb = pnvdb.Nvdb(client='Your-App-Name', contact='Your-contact-information')
"""
def __init__(self, client='pnvdb', contact='', autoupdate=True):
self.base_url = config.lesapi_base_url
self.headers = {'X-Client': client, 'X-Kontaktperson': contact}
self.srid = ''
self.antall = 1000
self.name2id = None
"""
status = _fetch_data(self, 'status')
if autoupdate and last_seen_version != float(status['datakatalog']['versjon']):
try:
update_CONST()
except:
print('Autoupdate of the CONST.py file failed.\nTry initializing with adminstrative privleleges, or set autoupdate = False')
logging.info('Updated name2id and kommune values from version: {} to version {}'.
format(last_seen_version, status['datakatalog']['versjon']))
"""
def _generator(self, url, _payload, objekt_type, data):
while True:
returnert = data['metadata']['returnert']
if returnert == 0:
break
_payload.update({'start': data['metadata']['neste']['start']})
for obj in enumerate(data['objekter']):
yield models.Objekt(self, objekt_type, obj[1]['id'], obj)
data = _fetch_data(self, url, _payload)
def status(self):
""" Method for getting information about the current status of the API
:returns: Dict
:keys: ['datakatalog', 'datagrunnlag']
:usage:
>>> status = nvdb.status()
>>> print(status['datakatalog']['versjon'])
2.13
"""
return _fetch_data(self, 'status')
def objekt(self, objekt_type, nvdb_id):
""" Method for creating a spesific nvdb python Objekt
:param objekt_type: nvdb objekttype id.
:type objekt_type: int
:param nvdb_id: the unique nvdb id
:type nvdb_id: int
:returns: :class:`.Objekt`
:usage:
>>> obj = nvdb.objekt(objekt_type=67, nvdb_id=89204552)
>>> print(obj.metadata)
{'versjon': 3, 'type':P {'navn': 'Tunnelløp', 'id': 67}, 'startdato': '2014-01-17',
'sist_modifisert': '2017-10-23 15:15:50'}
"""
return models.Objekt(self, objekt_type, nvdb_id)
def objekt_type(self, objekt_type):
""" Method for creating a spesific nvdb python
:param objekt_type: nvdb objekttype id.
:type objekt_type: int
:returns: :class:`.ObjektType`
:usage:
>>> obj = nvdb.objekt_type(objekt_type=67)
>>> print(obj.metadata['sosinvdbnavn'])
Tunnelløp_67
"""
return models.ObjektType(self, objekt_type)
def objekt_typer(self):
""" Returns objekt_type of every avaliable obj type in nvdb
:returns: List of :class:`.ObjektType`
:usage:
>>> obj_types = nvdb.objekt_typer()
>>> print(obj_types[0].metadata['sosinvdbnavn'])
Skjerm_3
"""
data = _fetch_data(self, 'vegobjekttyper')
objekt_typer = []
for objekt_type in data:
objekt_type_id = objekt_type['id']
objekt_typer.append(models.ObjektType(
self, objekt_type_id, meta=objekt_type))
return objekt_typer
def hent(self, objekt_type, kriterie=None):
""" Return a generator object that can be itterated over
to fetch the results of the query.
:param objekt_type: nvdb objekttype id.
:type objekt_type: int
:param payload: filters for the query
:type payload: dict
:returns: generator of :class:`.Objekt`
:usage:
>>> criteria = {'fylke':'2','egenskap':'1820>=20'}
>>> bomstasjoner = nvdb.hent(45, kriterie=criteria)
>>> for bomstasjon in bomstasjoner:
>>> print(bomstasjon)
"""
_payload = dict()
if kriterie:
_payload = kriterie.copy()
_payload.update(
{'antall': self.antall, 'segmentering': 'false', 'inkluder': 'alle'})
url = 'vegobjekter/{objekt_type}'.format(objekt_type=objekt_type)
data = _fetch_data(self, url, payload=_payload)
if data['metadata']['returnert'] == 0:
return None
else:
return self._generator(url, _payload, objekt_type, data)
def vegreferanse(self, vegreferanse):
""" Return vegreferanse object.
PS : Only support point refferences
:param vegreferanse: The road refferences to objectify
:type vegreferanse: string
:returns: :class:`.Vegreferanse`
:usage:
>>> print(nvdb.vegreferanse('1600Ev6hp12m1000'))
"""
if isinstance(vegreferanse, list):
return [models.Vegreferanse(self, vegref)
for vegref in vegreferanse]
return models.Vegreferanse(self, vegreferanse)
def posisjon(self, x_coordinate=None, y_coordinate=None, lat=None, lon=None):
"""Returns a posisjon object for a given location
:param x: X-coordinate in EUREF89 UTM 33
:type x: float
:param y: Y-coordinate in EUREF89 UTM 33
:type y: float
:param lat: Lattitude in EUREF89
:type lat: float
:param lon: Longitude in EUREF89
:type lon: float
:returns: :class:`.Posisjon`
:usage:
>>> pos = nvdb.posisjon(x=269815,y=7038165)
>>> print(pos.vegreferanse)
"""
if x_coordinate and y_coordinate:
payload = {'nord': y_coordinate, 'ost': x_coordinate}
elif lat and lon:
payload = {'lat': lat, 'lon': lon}
return models.Posisjon(self, payload)
def regioner(self):
""" Returns an Area object for all regions
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/regioner', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def fylker(self):
""" Returns an mArea object for all fylker
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/fylker', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def vegavdelinger(self):
""" Returns an Area object for all vegavdelinger
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/vegavdelinger', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def kommuner(self):
""" Returns an Area object for all kommuner
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/kommuner', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def kontraktsomrader(self):
""" Returns an Area object for all kontraktsomrader
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/kontraktsomrader', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
def riksvegruter(self):
""" Returns an Area object for all riksvegruter
:returns: list of :class:`.Area`
:usage:
>>> for region in nvdb.regioner():
>>> print(region.metadata)
"""
payload = {'inkluder': 'alle'}
data = _fetch_data(self, 'omrader/riksvegruter', payload)
return [models.Area(self, models.Area_data) for models.Area_data in data]
|
py | 1a51b1f590abd78da4b737ea85cb8c1e078155e4 | import base64
import io
import os
import threading
import time
from typing import Optional, List
from platypush import Config
from platypush.context import get_bus
from platypush.message.event.qrcode import QrcodeScannedEvent
from platypush.message.response.qrcode import QrcodeGeneratedResponse, QrcodeDecodedResponse, ResultModel
from platypush.plugins import Plugin, action
from platypush.plugins.camera import CameraPlugin
from platypush.utils import get_plugin_class_by_name
class QrcodePlugin(Plugin):
"""
Plugin to generate and scan QR and bar codes.
Requires:
* **numpy** (``pip install numpy``).
* **qrcode** (``pip install 'qrcode[pil]'``) for QR generation.
* **pyzbar** (``pip install pyzbar``) for decoding code from images.
* **Pillow** (``pip install Pillow``) for image management.
"""
def __init__(self, camera_plugin: Optional[str] = None, **kwargs):
"""
:param camera_plugin: Name of the plugin that will be used as a camera to capture images (e.g.
``camera.cv`` or ``camera.pi``).
"""
super().__init__(**kwargs)
self.camera_plugin = camera_plugin
self._capturing = threading.Event()
def _get_camera(self, camera_plugin: Optional[str] = None, **config) -> CameraPlugin:
camera_plugin = camera_plugin or self.camera_plugin
if not config:
config = Config.get(camera_plugin) or {}
config['stream_raw_frames'] = True
cls = get_plugin_class_by_name(camera_plugin)
assert cls and issubclass(cls, CameraPlugin), '{} is not a valid camera plugin'.format(camera_plugin)
return cls(**config)
# noinspection PyShadowingBuiltins
@action
def generate(self, content: str, output_file: Optional[str] = None, show: bool = False,
format: str = 'png', camera_plugin: Optional[str] = None) -> QrcodeGeneratedResponse:
"""
Generate a QR code.
If you configured the :class:`platypush.backend.http.HttpBackend` then you can also generate
codes directly from the browser through ``http://<host>:<port>/qrcode?content=...``.
:param content: Text, URL or content of the QR code.
:param output_file: If set then the QR code will be exported in the specified image file.
Otherwise, a base64-encoded representation of its binary content will be returned in
the response as ``data``.
:param show: If True, and if the device where the application runs has an active display,
then the generated QR code will be shown on display.
:param format: Output image format (default: ``png``).
:param camera_plugin: If set then this plugin (e.g. ``camera`` or ``camera.pi``) will be used to capture
live images from the camera and search for bar codes or QR-codes.
:return: :class:`platypush.message.response.qrcode.QrcodeGeneratedResponse`.
"""
import qrcode
qr = qrcode.make(content)
img = qr.get_image()
ret = {
'content': content,
'format': format,
}
if show:
img.show()
if output_file:
output_file = os.path.abspath(os.path.expanduser(output_file))
img.save(output_file, format=format)
ret['image_file'] = output_file
else:
f = io.BytesIO()
img.save(f, format=format)
ret['data'] = base64.encodebytes(f.getvalue()).decode()
return QrcodeGeneratedResponse(**ret)
@action
def decode(self, image_file: str) -> QrcodeDecodedResponse:
"""
Decode a QR code from an image file.
:param image_file: Path of the image file.
"""
from pyzbar import pyzbar
from PIL import Image
image_file = os.path.abspath(os.path.expanduser(image_file))
img = Image.open(image_file)
results = pyzbar.decode(img)
return QrcodeDecodedResponse(results)
def _convert_frame(self, frame):
import numpy as np
from PIL import Image
assert isinstance(frame, np.ndarray), \
'Image conversion only works with numpy arrays for now (got {})'.format(type(frame))
mode = 'RGB'
if len(frame.shape) > 2 and frame.shape[2] == 4:
mode = 'RGBA'
return Image.frombuffer(mode, (frame.shape[1], frame.shape[0]), frame, 'raw', mode, 0, 1)
@action
def start_scanning(self, camera_plugin: Optional[str] = None, duration: Optional[float] = None,
n_codes: Optional[int] = None) -> Optional[List[ResultModel]]:
"""
Decode QR-codes and bar codes using a camera.
Triggers:
- :class:`platypush.message.event.qrcode.QrcodeScannedEvent` when a code is successfully scanned.
:param camera_plugin: Camera plugin (overrides default ``camera_plugin``).
:param duration: How long the capturing phase should run (default: until ``stop_scanning`` or app termination).
:param n_codes: Stop after decoding this number of codes (default: None).
:return: When ``duration`` or ``n_codes`` are specified or ``stop_scanning`` is called, it will return a list of
:class:`platypush.message.response.qrcode.ResultModel` instances with the scanned results,
"""
from pyzbar import pyzbar
assert not self._capturing.is_set(), 'A capturing process is already running'
camera = self._get_camera(camera_plugin)
codes = []
last_results = {}
last_results_timeout = 10.0
last_results_time = 0
self._capturing.set()
try:
with camera:
start_time = time.time()
while self._capturing.is_set() \
and (not duration or time.time() < start_time + duration) \
and (not n_codes or len(codes) < n_codes):
output = camera.get_stream()
with output.ready:
output.ready.wait()
img = self._convert_frame(output.raw_frame)
results = pyzbar.decode(img)
if results:
results = [
result for result in QrcodeDecodedResponse(results).output['results']
if result['data'] not in last_results
or time.time() >= last_results_time + last_results_timeout
]
if results:
codes.extend(results)
get_bus().post(QrcodeScannedEvent(results=results))
last_results = {result['data']: result for result in results}
last_results_time = time.time()
finally:
self._capturing.clear()
return codes
@action
def stop_scanning(self):
self._capturing.clear()
# vim:sw=4:ts=4:et:
|
py | 1a51b305b4428742b3228cf0753cf86f4bea7032 | from uuid import uuid4
from django.conf import settings
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
MiddlewareMixin = object
from .locals import set_cid, get_cid, log_output
class CidMiddleware(MiddlewareMixin):
"""
Middleware class to extract the correlation id from incoming headers
and add them to outgoing headers
"""
def __init__(self, *args, **kwargs):
super(CidMiddleware, self).__init__(*args, **kwargs)
self.cid_request_header = getattr(
settings, 'CID_HEADER', 'X_CORRELATION_ID'
)
self.cid_response_header = getattr(
settings, 'CID_RESPONSE_HEADER', self.cid_request_header
)
self.generate_cid = getattr(settings, 'CID_GENERATE', False)
def process_request(self, request):
cid = request.META.get(self.cid_request_header, None)
if cid is None and self.generate_cid:
cid = str(uuid4())
request.correlation_id = cid
set_cid(request.correlation_id)
def process_response(self, request, response):
cid = get_cid()
if cid and self.cid_response_header:
response[self.cid_response_header] = cid
# Intercept 5XX errors and log them
log_output(request, response)
return response
|
py | 1a51b4414075e5ca28ef0cd2dba59862ddecce5f | import numpy as np
from numpy.random import normal
from scipy.sparse import issparse
import scipy.sparse.linalg as slinalg
from scipy import linalg, stats
__all__ = [
"quad_potential",
"QuadPotentialDiag",
"QuadPotentialDiagAdapt",
"isquadpotential",
"QuadPotentialLowRank",
]
def quad_potential(C, is_cov):
"""
Compute a QuadPotential object from a scaling matrix.
Parameters
----------
C : arraylike, 0 <= ndim <= 2
scaling matrix for the potential
vector treated as diagonal matrix.
is_cov : Boolean
whether C is provided as a covariance matrix or hessian
Returns
-------
q : Quadpotential
"""
if issparse(C):
if not chol_available:
raise ImportError("Sparse mass matrices require scikits.sparse")
elif is_cov:
return QuadPotentialSparse(C)
else:
raise ValueError("Sparse precision matrices are not supported")
partial_check_positive_definite(C)
if C.ndim == 1:
if is_cov:
return QuadPotentialDiag(C)
else:
return QuadPotentialDiag(1.0 / C)
else:
raise NotImplementedError("QuadPotentialFull and QuadPotentialFullInv not yet implemented")
def partial_check_positive_definite(C):
"""Make a simple but partial check for Positive Definiteness."""
if C.ndim == 1:
d = C
else:
d = np.diag(C)
(i,) = np.nonzero(np.logical_or(np.isnan(d), d <= 0))
if len(i):
raise PositiveDefiniteError("Simple check failed. Diagonal contains negatives", i)
class PositiveDefiniteError(ValueError):
def __init__(self, msg, idx):
super(PositiveDefiniteError, self).__init__(msg)
self.idx = idx
self.msg = msg
def __str__(self):
return "Scaling is not positive definite: %s. Check indexes %s." % (self.msg, self.idx)
class QuadPotential(object):
def velocity(self, x, out=None):
"""Compute the current velocity at a position in parameter space."""
raise NotImplementedError("Abstract method")
def energy(self, x, velocity=None):
raise NotImplementedError("Abstract method")
def random(self, x):
raise NotImplementedError("Abstract method")
def velocity_energy(self, x, v_out):
raise NotImplementedError("Abstract method")
def update(self, sample, grad, tune):
"""Inform the potential about a new sample during tuning.
This can be used by adaptive potentials to change the
mass matrix.
"""
pass
def raise_ok(self, vmap=None):
"""Check if the mass matrix is ok, and raise ValueError if not.
Parameters
----------
vmap : blocking.ArrayOrdering.vmap
List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp
Raises
------
ValueError if any standard deviations are 0 or infinite
Returns
-------
None
"""
return None
def reset(self):
pass
def isquadpotential(value):
"""Check whether an object might be a QuadPotential object."""
return isinstance(value, QuadPotential)
class QuadPotentialDiagAdapt(QuadPotential):
"""Adapt a diagonal mass matrix from the sample variances."""
def __init__(
self,
n,
initial_mean,
initial_diag=None,
initial_weight=0,
adaptation_window=101,
dtype=None,
):
"""Set up a diagonal mass matrix."""
if initial_diag is not None and initial_diag.ndim != 1:
raise ValueError("Initial diagonal must be one-dimensional.")
if initial_mean.ndim != 1:
raise ValueError("Initial mean must be one-dimensional.")
if initial_diag is not None and len(initial_diag) != n:
raise ValueError(
"Wrong shape for initial_diag: expected %s got %s" % (n, len(initial_diag))
)
if len(initial_mean) != n:
raise ValueError(
"Wrong shape for initial_mean: expected %s got %s" % (n, len(initial_mean))
)
if dtype is None:
dtype = "float32"
if initial_diag is None:
initial_diag = np.ones(n, dtype=dtype)
initial_weight = 1
else:
initial_diag = initial_diag.astype(dtype)
self.dtype = dtype
self._n = n
self._var = np.array(initial_diag, dtype=self.dtype, copy=True)
# self._var_theano = theano.shared(self._var)
self._stds = np.sqrt(initial_diag)
self._inv_stds = 1.0 / self._stds
self._foreground_var = _WeightedVariance(
self._n, initial_mean, initial_diag, initial_weight, self.dtype
)
self._background_var = _WeightedVariance(self._n, dtype=self.dtype)
self._n_samples = 0
self.adaptation_window = adaptation_window
def velocity(self, x, out=None):
"""Compute the current velocity at a position in parameter space."""
return np.multiply(self._var, x, out=out)
def energy(self, x, velocity=None):
"""Compute kinetic energy at a position in parameter space."""
if velocity is not None:
return 0.5 * x.dot(velocity)
return 0.5 * x.dot(self._var * x)
def velocity_energy(self, x, v_out):
"""Compute velocity and return kinetic energy at a position in parameter space."""
self.velocity(x, out=v_out)
return 0.5 * np.dot(x, v_out)
def random(self):
"""Draw random value from QuadPotential."""
vals = normal(size=self._n).astype(self.dtype)
return self._inv_stds * vals
def _update_from_weightvar(self, weightvar):
weightvar.current_variance(out=self._var)
np.sqrt(self._var, out=self._stds)
np.divide(1, self._stds, out=self._inv_stds)
# self._var_theano.set_value(self._var)
def update(self, sample, grad, tune):
"""Inform the potential about a new sample during tuning."""
if not tune:
return
window = self.adaptation_window
self._foreground_var.add_sample(sample, weight=1)
self._background_var.add_sample(sample, weight=1)
self._update_from_weightvar(self._foreground_var)
if self._n_samples > 0 and self._n_samples % window == 0:
self._foreground_var = self._background_var
self._background_var = _WeightedVariance(self._n, dtype=self.dtype)
self._n_samples += 1
def raise_ok(self, vmap):
"""Check if the mass matrix is ok, and raise ValueError if not.
Parameters
----------
vmap : blocking.ArrayOrdering.vmap
List of `VarMap`s, which are namedtuples with var, slc, shp, dtyp
Raises
------
ValueError if any standard deviations are 0 or infinite
Returns
-------
None
"""
if np.any(self._stds == 0):
name_slc = []
tmp_hold = list(range(self._stds.size))
for vmap_ in vmap:
slclen = len(tmp_hold[vmap_.slc])
for i in range(slclen):
name_slc.append((vmap_.var, i))
index = np.where(self._stds == 0)[0]
errmsg = ["Mass matrix contains zeros on the diagonal. "]
for ii in index:
errmsg.append(
"The derivative of RV `{}`.ravel()[{}]" " is zero.".format(*name_slc[ii])
)
raise ValueError("\n".join(errmsg))
if np.any(~np.isfinite(self._stds)):
name_slc = []
tmp_hold = list(range(self._stds.size))
for vmap_ in vmap:
slclen = len(tmp_hold[vmap_.slc])
for i in range(slclen):
name_slc.append((vmap_.var, i))
index = np.where(~np.isfinite(self._stds))[0]
errmsg = ["Mass matrix contains non-finite values on the diagonal. "]
for ii in index:
errmsg.append(
"The derivative of RV `{}`.ravel()[{}]" " is non-finite.".format(*name_slc[ii])
)
raise ValueError("\n".join(errmsg))
class QuadPotentialDiagAdaptGrad(QuadPotentialDiagAdapt):
"""Adapt a diagonal mass matrix from the variances of the gradients.
This is experimental, and may be removed without prior deprication.
"""
def __init__(self, *args, **kwargs):
super(QuadPotentialDiagAdaptGrad, self).__init__(*args, **kwargs)
self._grads1 = np.zeros(self._n, dtype=self.dtype)
self._ngrads1 = 0
self._grads2 = np.zeros(self._n, dtype=self.dtype)
self._ngrads2 = 0
def _update(self, var):
self._var[:] = var
np.sqrt(self._var, out=self._stds)
np.divide(1, self._stds, out=self._inv_stds)
# self._var_theano.set_value(self._var)
def update(self, sample, grad, tune):
"""Inform the potential about a new sample during tuning."""
if not tune:
return
self._grads1[:] += np.abs(grad)
self._grads2[:] += np.abs(grad)
self._ngrads1 += 1
self._ngrads2 += 1
if self._n_samples <= 150:
super().update(sample, grad)
else:
self._update((self._ngrads1 / self._grads1) ** 2)
if self._n_samples > 100 and self._n_samples % 100 == 50:
self._ngrads1 = self._ngrads2
self._ngrads2 = 1
self._grads1[:] = self._grads2
self._grads2[:] = 1
class _WeightedVariance(object):
"""Online algorithm for computing mean of variance."""
def __init__(
self, nelem, initial_mean=None, initial_variance=None, initial_weight=0, dtype="d"
):
self._dtype = dtype
self.w_sum = float(initial_weight)
self.w_sum2 = float(initial_weight) ** 2
if initial_mean is None:
self.mean = np.zeros(nelem, dtype="d")
else:
self.mean = np.array(initial_mean, dtype="d", copy=True)
if initial_variance is None:
self.raw_var = np.zeros(nelem, dtype="d")
else:
self.raw_var = np.array(initial_variance, dtype="d", copy=True)
self.raw_var[:] *= self.w_sum
if self.raw_var.shape != (nelem,):
raise ValueError("Invalid shape for initial variance.")
if self.mean.shape != (nelem,):
raise ValueError("Invalid shape for initial mean.")
def add_sample(self, x, weight):
x = np.asarray(x)
self.w_sum += weight
self.w_sum2 += weight * weight
prop = weight / self.w_sum
old_diff = x - self.mean
self.mean[:] += prop * old_diff
new_diff = x - self.mean
self.raw_var[:] += weight * old_diff * new_diff
def current_variance(self, out=None):
if self.w_sum == 0:
raise ValueError("Can not compute variance without samples.")
if out is not None:
return np.divide(self.raw_var, self.w_sum, out=out)
else:
return (self.raw_var / self.w_sum).astype(self._dtype)
def current_mean(self):
return self.mean.copy(dtype=self._dtype)
class QuadPotentialDiag(QuadPotential):
"""Quad potential using a diagonal covariance matrix."""
def __init__(self, v, dtype=None):
"""Use a vector to represent a diagonal matrix for a covariance matrix.
Parameters
----------
v : vector, 0 <= ndim <= 1
Diagonal of covariance matrix for the potential vector
"""
if dtype is None:
dtype = "float32"
self.dtype = dtype
v = v.astype(self.dtype)
s = v ** 0.5
self.s = s
self.inv_s = 1.0 / s
self.v = v
def velocity(self, x, out=None):
"""Compute the current velocity at a position in parameter space."""
if out is not None:
np.multiply(x, self.v, out=out)
return
return self.v * x
def random(self):
"""Draw random value from QuadPotential."""
return normal(size=self.s.shape) * self.inv_s
def energy(self, x, velocity=None):
"""Compute kinetic energy at a position in parameter space."""
if velocity is not None:
return 0.5 * np.dot(x, velocity)
return 0.5 * x.dot(self.v * x)
def velocity_energy(self, x, v_out):
"""Compute velocity and return kinetic energy at a position in parameter space."""
np.multiply(x, self.v, out=v_out)
return 0.5 * np.dot(x, v_out)
def add_ADATv(A, v, out, diag=None, beta=0.0, work=None):
"""Run out = beta * out + A @ np.diag(D) @ A.T @ v."""
if work is None:
work = np.empty(A.shape[1])
linalg.blas.dgemv(1.0, A, v, y=work, trans=1, beta=0.0, overwrite_y=True)
if diag is not None:
work *= diag
linalg.blas.dgemv(1.0, A, work, y=out, beta=beta, overwrite_y=True)
class Covariance:
def __init__(self, n_dim, n_svd, n_approx, values, grads, diag=None):
assert n_svd <= len(values)
assert values.shape == grads.shape
self.values = values - values.mean(0)
self.grads = grads - grads.mean(0)
val_variance = self.values.var(0)
grd_variance = self.grads.var(0)
self._val_var = val_variance
self._grd_var = grd_variance
if diag == "mean":
self.diag = np.sqrt(val_variance / grd_variance)
elif diag == "values":
self.diag = np.sqrt(val_variance)
elif isinstance(diag, np.ndarray):
self.diag = np.sqrt(diag)
else:
raise ValueError("Unknown diag approximation: %s" % diag)
self.invsqrtdiag = 1 / np.sqrt(self.diag)
self.values /= self.diag[None, :]
self.grads *= self.diag[None, :]
_, svdvals, vecs = linalg.svd(self.values, full_matrices=False)
self.vals_eigs = 2 * np.log(svdvals[:n_svd]) - np.log(len(values))
self.vals_vecs = vecs.T[:, :n_svd].copy()
_, svdvals, vecs = linalg.svd(self.grads, full_matrices=False)
self.grad_eigs = -2 * np.log(svdvals[:n_svd]) + np.log(len(grads))
self.grad_vecs = vecs.T[:, :n_svd].copy()
self.n_dim = n_dim
self.n_svd = n_svd
self.n_approx = n_approx
if n_svd < n_dim // 3:
center_slice = slice(n_svd // 3, None)
else:
center_slice = slice(2 * n_svd // 3, (2 * n_dim) // 3)
self.center = 0.5 * (
self.grad_eigs[center_slice].mean() + self.vals_eigs[center_slice].mean()
)
self.vals_eigs -= self.center
self.grad_eigs -= self.center
weight = stats.beta(0.5, 0.5).cdf(np.linspace(0, 1, n_dim))
self.weight = 1 - weight[:n_svd]
self._make_operators(n_approx)
def to_dense(self):
vecs, eigs = self.vals_vecs, self.vals_eigs
A = (vecs * eigs * self.weight) @ vecs.T
vecs, eigs = self.grad_vecs, self.grad_eigs
B = (vecs * eigs * self.weight) @ vecs.T
corr = np.exp(self.center) * linalg.expm(A + B)
corr *= self.diag[:, None]
corr *= self.diag[None, :]
return corr
def invsqrt_to_dense(self):
assert False # TODO This is wrong
vecs, eigs = self.vals_vecs, self.vals_eigs
A = (vecs * eigs * self.weight) @ vecs.T
vecs, eigs = self.grad_vecs, self.grad_eigs
B = (vecs * eigs * self.weight) @ vecs.T
corr = np.exp(-0.5 * self.center) * linalg.expm(-0.5 * (A + B))
corr *= self.invsqrtdiag[:, None]
corr *= self.invsqrtdiag[None, :]
return corr
def matmul(self, x, out=None):
if out is None:
out = np.empty_like(x)
self._matmul(x * self.diag, out)
out *= self.diag
return out
def invsqrtmul(self, x, out=None):
if out is None:
out = np.empty_like(x)
self._matmul_invsqrt(x, out)
return out / self.diag
def _make_operators(self, n_eigs, exponent=1):
vecs1, eigs1 = self.vals_vecs, self.vals_eigs
vecs2, eigs2 = self.grad_vecs, self.grad_eigs
vecs1 = np.ascontiguousarray(vecs1)
vecs2 = np.ascontiguousarray(vecs2)
def upper_matmul(x):
out = np.empty_like(x)
work = np.empty(len(eigs1))
add_ADATv(vecs1, x, out, diag=eigs1 * self.weight, beta=0.0, work=work)
add_ADATv(vecs2, x, out, diag=eigs2 * self.weight, beta=1.0, work=work)
return out
upper = slinalg.LinearOperator((self.n_dim, self.n_dim), upper_matmul)
eigs, vecs = slinalg.eigsh(upper, k=n_eigs, mode="buckling")
self._matrix_logeigs = eigs
eigs_exp = np.exp(eigs)
eigs_invsqrtexp = np.exp(-0.5 * eigs)
def matmul_exp(x, out):
work = np.empty(len(eigs))
add_ADATv(vecs, x, out, diag=None, beta=0.0, work=work)
add_ADATv(vecs, x, out, diag=eigs_exp, beta=-1.0, work=work)
out += x
out *= np.exp(self.center)
def matmul_invsqrtexp(x, out):
work = np.empty(len(eigs))
add_ADATv(vecs, x, out, diag=None, beta=0.0, work=work)
add_ADATv(vecs, x, out, diag=eigs_invsqrtexp, beta=-1.0, work=work)
out += x
out *= np.exp(-0.5 * self.center)
self._matmul = matmul_exp
self._matmul_invsqrt = matmul_invsqrtexp
class QuadPotentialLowRank(object):
def __init__(self, ndim, n_approx, diag):
self._cov = None
self._iter = 0
self._ndim = ndim
self._n_approx = n_approx
self._diag = diag
self._old_covs = []
self._grad_store = []
self._sample_store = []
self.dtype = "float64"
def velocity(self, x, out=None):
if self._cov is None:
if out is None:
out = np.empty_like(x)
out[:] = x
return out
return self._cov.matmul(x, out=out)
def energy(self, x, velocity=None):
if velocity is None:
velocity = self.velocity(x)
return 0.5 * x.dot(velocity)
def random(self):
rand = np.random.randn(self._ndim)
if self._cov is None:
return rand
return self._cov.invsqrtmul(rand)
def velocity_energy(self, x, v_out):
self.velocity(x, out=v_out)
return 0.5 * np.dot(x, v_out)
def raise_ok(self, *args, **kwargs):
pass
def update(self, sample, grad, tune):
self._iter += 1
if not tune:
return
if self._iter < 50:
return
renew_iters = [120, 240, 400, 850]
if self._iter not in renew_iters:
self._grad_store.append(grad.copy())
self._sample_store.append(sample.copy())
return
n_samples = len(self._grad_store)
samples = np.array(self._sample_store)
grads = np.array(self._grad_store)
self._sample_store.clear()
self._grad_store.clear()
if self._iter <= 160:
n_approx = 4
else:
n_approx = self._n_approx
if self._cov is not None:
self._old_covs.append(self._cov)
n_svd = min(self._ndim - 5, n_samples - 5)
self._cov = Covariance(self._ndim, n_svd, n_approx, samples, grads, diag=self._diag)
|
py | 1a51b4c66b5be384b61c9bc177e809629ddfb281 | import hopkins_data
import pandas as pd
class CovidData:
def __init__(self):
self.confirmed_df = hopkins_data.get_confirmed_by_country()
self.death_df = hopkins_data.get_death_by_country()
self.population_df = hopkins_data.get_population_by_country()
assert len(set(self.confirmed_df.index)-set(self.population_df.index)) == 0
self.seven_day_incidence = (self.confirmed_df.diff(7, axis=1).divide(self.population_df, axis=0)*100000).fillna(0.0)
self.seven_day_death_rate = (self.death_df.diff(7, axis=1).divide(self.population_df, axis=0)*100000).fillna(0.0)
self.death_per_confirmed = (100 * self.seven_day_death_rate / self.seven_day_incidence).fillna(0.0)
def create_overview(self): # todo
df = pd.concat([
self.confirmed_df.diff(1, axis=1).iloc[:, -1].to_frame('confirmed_yesterday'),
self.death_df.diff(1, axis=1).iloc[:, -1].to_frame('death_yesterday'),
self.population_df,
self.seven_day_incidence.iloc[:, -1].to_frame('weekly_incidence'),
self.confirmed_df.iloc[:, -1].to_frame('confirmed_overall'),
self.death_df.iloc[:, -1].to_frame('death_overall'),
self.confirmed_df.iloc[:, -1].divide(self.population_df).to_frame('confirmed_100k'),
self.death_df.iloc[:, -1].divide(self.population_df).to_frame('death_100k'),
(self.death_df.iloc[:, -1]/self.confirmed_df.iloc[:, -1]).to_frame('confirmed_to_kill'),
self.seven_day_death_rate.iloc[:, -1].to_frame('weekly_death_rate'),
], axis=1)
return df.clip(lower=0).reset_index().rename(columns={'index': 'country'})
@staticmethod
def create_time_series_data(df_in, countries, label):
# select countries
df = df_in.loc[countries]
# clip negative values
df = df.clip(lower=0)
# stack data
return df.stack().reset_index().rename(
columns={'level_0': 'Country', 'level_1': 'Date', 0: label}), label
def get_seven_day_incidences(self, countries, y_label):
return CovidData.create_time_series_data(
self.seven_day_incidence,
countries, y_label)
def get_seven_day_incidences_ranking(self, countries, y_label):
return CovidData.create_time_series_data(
self.seven_day_incidence.rank(ascending=False),
countries, y_label)
def get_confirmed_yesterday_100k(self, countries, y_label):
return CovidData.create_time_series_data(
self.confirmed_df.diff(1, axis=1).divide(self.population_df, axis=0)*100000, countries, y_label)
def get_death_yesterday_100k(self, countries, y_label):
return CovidData.create_time_series_data(
self.death_df.diff(1, axis=1).divide(self.population_df, axis=0)*100000, countries, y_label)
def confirmed_sum_100k(self, countries, y_label):
return CovidData.create_time_series_data(
self.confirmed_df.divide(self.population_df, axis=0)*100000, countries, y_label)
def death_sum_100k(self, countries, y_label):
return CovidData.create_time_series_data(
self.death_df.divide(self.population_df, axis=0)*100000, countries, y_label)
def death_rate(self, countries, y_label):
return CovidData.create_time_series_data(self.seven_day_death_rate, countries, y_label)
def death_rank(self, countries, y_label):
return CovidData.create_time_series_data(self.seven_day_death_rate.rank(ascending=False), countries, y_label)
def get_death_per_confirmed(self, countries, y_label):
return CovidData.create_time_series_data(self.death_per_confirmed, countries, y_label)
def get_death_per_confirmed_rank(self, countries, y_label):
return CovidData.create_time_series_data(self.death_per_confirmed.rank(ascending=False), countries, y_label)
def get_death_per_confirmed_shifted(self, countries, offset, y_label):
df = 100 * self.seven_day_death_rate / self.seven_day_incidence.shift(offset, axis='columns')
return CovidData.create_time_series_data(df.iloc[:,30+offset:], countries, y_label)
|
py | 1a51b6525b2dd8943ebf7c0674f29430314fb1b2 | # %% Load packages
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.mcmc.constants import num_chains
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.mcmc.dataloaders import test_dataloader
from bnn_mcmc_examples.examples.mlp.noisy_xor.setting1.mcmc.hmc.constants import sampler_output_run_paths
# %% Load test data and labels
_, test_labels = next(iter(test_dataloader))
test_labels = test_labels.squeeze().detach().cpu().numpy()
# %% Plot predictive posteriors
pred_colors = {'correct': '#bcbd22', 'wrong': '#d62728'}
# '#bcbd22': rio grande, similar to yellow green
# ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b", "#e377c2", "#7f7f7f", "#bcbd22", "#17becf"]
patch_list = []
for key in pred_colors:
patch_list.append(mpatches.Patch(color=pred_colors[key], label=key))
legend_patches = [mpatches.Patch(color=pred_colors[key], label=key.capitalize()) for key in pred_colors]
for i in range(num_chains):
test_pred_df = pd.read_csv(
sampler_output_run_paths[i].joinpath('pred_posterior_on_test.csv'),
header=None,
names=['class0', 'class1']
)
test_pred_df['preds'] = np.loadtxt(
sampler_output_run_paths[i].joinpath('preds_via_bm.txt'), dtype=np.int, delimiter=',', skiprows=0
)
test_pred_df['labels'] = test_labels
test_pred_df.sort_values(['labels'], ascending=True, inplace=True)
test_pred_df = pd.concat([
test_pred_df.loc[test_pred_df['labels'] == 0].sort_values(['class0'], ascending=True),
test_pred_df.loc[test_pred_df['labels'] == 1].sort_values(['class1'], ascending=True)
])
test_pred_df['color'] = [
pred_colors['correct'] if cmp else pred_colors['wrong'] for cmp in test_pred_df['preds'] == test_pred_df['labels']
]
test_pred_df.to_csv(sampler_output_run_paths[i].joinpath('pred_posterior_on_test_for_fig.csv'))
test_pred_label_counts = test_pred_df['labels'].value_counts()
test_pred_label_cumsum = [
test_pred_label_counts.loc[0],
test_pred_label_counts.loc[0] + test_pred_label_counts.loc[1]
]
plt.figure(figsize=[8, 4])
plt.ylim([0, 1])
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['axes.titlesize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['legend.fontsize'] = 12
plt.vlines(
x=range(len(test_labels)),
ymin=0,
ymax=pd.concat([
test_pred_df['class0'][:test_pred_label_cumsum[0]],
test_pred_df['class1'][test_pred_label_cumsum[0]:]
]),
color=test_pred_df['color'],
linewidth=2
)
#plt.bar(
# range(len(test_labels)),
# pd.concat([
# test_pred_df['class0'][:test_pred_label_cumsum[0]],
# test_pred_df['class1'][test_pred_label_cumsum[0]:]
# ]),
# width=0.7,
# color=test_pred_df['color'],
# align='edge'
#)
plt.legend(handles=legend_patches, loc='upper left', ncol=1)
plt.axhline(y=0.5, xmin=0, xmax=len(test_labels), color='black', linestyle='dashed', linewidth=1.5)
plt.axvline(x=0.5*len(test_labels), ymin=0, ymax=1, color='black', linestyle='dotted', linewidth=1.5)
plt.savefig(
sampler_output_run_paths[i].joinpath('pred_posterior_on_test.png'),
pil_kwargs={'quality': 100},
transparent=True,
bbox_inches='tight',
pad_inches=0.1
)
plt.close()
|
py | 1a51b7e0107bd48da2ec17b8cf0ac822af15c80f | from overwatch import app
import xmltodict
import asyncio
import aiohttp
loop = asyncio.get_event_loop()
semaphore = asyncio.Semaphore(5)
def fetch_urls(urls, parser):
async def fetch(url):
with (await semaphore):
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.read()
await asyncio.sleep(1)
if app.debug:
print('Fetch: {url} <Status {status}>'.format(
url=url, status=response.status))
return parser(content)
urls_to_fetch = [fetch(url) for url in urls]
parsed_urls = loop.run_until_complete(asyncio.gather(*urls_to_fetch))
return parsed_urls
def parse_xml(url, params=[]):
if not params:
urls = [url]
else:
urls = [url.format(**a) for a in params]
return fetch_urls(urls, xmltodict.parse)
|
py | 1a51b88933dd7e9075dfbb23a2820c25ea1b310d | # -*- coding: utf-8 -*-
import json
from odoo import api, models, _
from odoo.tools import float_round
class ReportBomStructure(models.AbstractModel):
_name = 'report.mrp.report_bom_structure'
_description = 'BOM Structure Report'
@api.model
def _get_report_values(self, docids, data=None):
docs = []
for bom_id in docids:
bom = self.env['mrp.bom'].browse(bom_id)
candidates = bom.product_id or bom.product_tmpl_id.product_variant_ids
for product_variant_id in candidates:
if data and data.get('childs'):
doc = self._get_pdf_line(bom_id, product_id=product_variant_id, qty=float(data.get('quantity')), child_bom_ids=json.loads(data.get('childs')))
else:
doc = self._get_pdf_line(bom_id, product_id=product_variant_id, unfolded=True)
doc['report_type'] = 'pdf'
doc['report_structure'] = data and data.get('report_type') or 'all'
docs.append(doc)
if not candidates:
if data and data.get('childs'):
doc = self._get_pdf_line(bom_id, qty=float(data.get('quantity')), child_bom_ids=json.loads(data.get('childs')))
else:
doc = self._get_pdf_line(bom_id, unfolded=True)
doc['report_type'] = 'pdf'
doc['report_structure'] = data and data.get('report_type') or 'all'
docs.append(doc)
return {
'doc_ids': docids,
'doc_model': 'mrp.bom',
'docs': docs,
}
@api.model
def get_html(self, bom_id=False, searchQty=1, searchVariant=False):
res = self._get_report_data(bom_id=bom_id, searchQty=searchQty, searchVariant=searchVariant)
res['lines']['report_type'] = 'html'
res['lines']['report_structure'] = 'all'
res['lines']['has_attachments'] = res['lines']['attachments'] or any(component['attachments'] for component in res['lines']['components'])
res['lines'] = self.env.ref('mrp.report_mrp_bom').render({'data': res['lines']})
return res
@api.model
def get_bom(self, bom_id=False, product_id=False, line_qty=False, line_id=False, level=False):
lines = self._get_bom(bom_id=bom_id, product_id=product_id, line_qty=line_qty, line_id=line_id, level=level)
return self.env.ref('mrp.report_mrp_bom_line').render({'data': lines})
@api.model
def get_operations(self, bom_id=False, qty=0, level=0):
bom = self.env['mrp.bom'].browse(bom_id)
lines = self._get_operation_line(bom.routing_id, float_round(qty / bom.product_qty, precision_rounding=1, rounding_method='UP'), level)
values = {
'bom_id': bom_id,
'currency': self.env.user.company_id.currency_id,
'operations': lines,
}
return self.env.ref('mrp.report_mrp_operation_line').render({'data': values})
def _get_bom_reference(self, bom):
return bom.display_name
@api.model
def _get_report_data(self, bom_id, searchQty=0, searchVariant=False):
lines = {}
bom = self.env['mrp.bom'].browse(bom_id)
bom_quantity = searchQty or bom.product_qty
bom_product_variants = {}
bom_uom_name = ''
if bom:
bom_uom_name = bom.product_uom_id.name
# Get variants used for search
if not bom.product_id:
for variant in bom.product_tmpl_id.product_variant_ids:
bom_product_variants[variant.id] = variant.display_name
lines = self._get_bom(bom_id, product_id=searchVariant, line_qty=bom_quantity, level=1)
return {
'lines': lines,
'variants': bom_product_variants,
'bom_uom_name': bom_uom_name,
'bom_qty': bom_quantity,
'is_variant_applied': self.env.user.user_has_groups('product.group_product_variant') and len(bom_product_variants) > 1,
'is_uom_applied': self.env.user.user_has_groups('uom.group_uom')
}
def _get_bom(self, bom_id=False, product_id=False, line_qty=False, line_id=False, level=False):
bom = self.env['mrp.bom'].browse(bom_id)
bom_quantity = line_qty
if line_id:
current_line = self.env['mrp.bom.line'].browse(int(line_id))
bom_quantity = current_line.product_uom_id._compute_quantity(line_qty, bom.product_uom_id)
# Display bom components for current selected product variant
if product_id:
product = self.env['product.product'].browse(int(product_id))
else:
product = bom.product_id or bom.product_tmpl_id.product_variant_id
if product:
attachments = self.env['mrp.document'].search(['|', '&', ('res_model', '=', 'product.product'),
('res_id', '=', product.id), '&', ('res_model', '=', 'product.template'), ('res_id', '=', product.product_tmpl_id.id)])
else:
product = bom.product_tmpl_id
attachments = self.env['mrp.document'].search([('res_model', '=', 'product.template'), ('res_id', '=', product.id)])
operations = self._get_operation_line(bom.routing_id, float_round(bom_quantity / bom.product_qty, precision_rounding=1, rounding_method='UP'), 0)
lines = {
'bom': bom,
'bom_qty': bom_quantity,
'bom_prod_name': product.display_name,
'currency': self.env.user.company_id.currency_id,
'product': product,
'code': bom and self._get_bom_reference(bom) or '',
'price': product.uom_id._compute_price(product.standard_price, bom.product_uom_id) * bom_quantity,
'total': sum([op['total'] for op in operations]),
'level': level or 0,
'operations': operations,
'operations_cost': sum([op['total'] for op in operations]),
'attachments': attachments,
'operations_time': sum([op['duration_expected'] for op in operations])
}
components, total = self._get_bom_lines(bom, bom_quantity, product, line_id, level)
lines['components'] = components
lines['total'] += total
return lines
def _get_bom_lines(self, bom, bom_quantity, product, line_id, level):
components = []
total = 0
for line in bom.bom_line_ids:
line_quantity = (bom_quantity / (bom.product_qty or 1.0)) * line.product_qty
if line._skip_bom_line(product):
continue
price = line.product_id.uom_id._compute_price(line.product_id.standard_price, line.product_uom_id) * line_quantity
if line.child_bom_id:
factor = line.product_uom_id._compute_quantity(line_quantity, line.child_bom_id.product_uom_id) / line.child_bom_id.product_qty
sub_total = self._get_price(line.child_bom_id, factor, line.product_id)
else:
sub_total = price
sub_total = self.env.user.company_id.currency_id.round(sub_total)
components.append({
'prod_id': line.product_id.id,
'prod_name': line.product_id.display_name,
'code': line.child_bom_id and self._get_bom_reference(line.child_bom_id) or '',
'prod_qty': line_quantity,
'prod_uom': line.product_uom_id.name,
'prod_cost': self.env.user.company_id.currency_id.round(price),
'parent_id': bom.id,
'line_id': line.id,
'level': level or 0,
'total': sub_total,
'child_bom': line.child_bom_id.id,
'phantom_bom': line.child_bom_id and line.child_bom_id.type == 'phantom' or False,
'attachments': self.env['mrp.document'].search(['|', '&',
('res_model', '=', 'product.product'), ('res_id', '=', line.product_id.id), '&', ('res_model', '=', 'product.template'), ('res_id', '=', line.product_id.product_tmpl_id.id)]),
})
total += sub_total
return components, total
def _get_operation_line(self, routing, qty, level):
operations = []
total = 0.0
for operation in routing.operation_ids:
operation_cycle = float_round(qty / operation.workcenter_id.capacity, precision_rounding=1, rounding_method='UP')
duration_expected = operation_cycle * operation.time_cycle + operation.workcenter_id.time_stop + operation.workcenter_id.time_start
total = ((duration_expected / 60.0) * operation.workcenter_id.costs_hour)
operations.append({
'level': level or 0,
'operation': operation,
'name': operation.name + ' - ' + operation.workcenter_id.name,
'duration_expected': duration_expected,
'total': self.env.user.company_id.currency_id.round(total),
})
return operations
def _get_price(self, bom, factor, product):
price = 0
if bom.routing_id:
# routing are defined on a BoM and don't have a concept of quantity.
# It means that the operation time are defined for the quantity on
# the BoM (the user produces a batch of products). E.g the user
# product a batch of 10 units with a 5 minutes operation, the time
# will be the 5 for a quantity between 1-10, then doubled for
# 11-20,...
operation_cycle = float_round(factor, precision_rounding=1, rounding_method='UP')
operations = self._get_operation_line(bom.routing_id, operation_cycle, 0)
price += sum([op['total'] for op in operations])
for line in bom.bom_line_ids:
if line._skip_bom_line(product):
continue
if line.child_bom_id:
qty = line.product_uom_id._compute_quantity(line.product_qty * factor, line.child_bom_id.product_uom_id) / line.child_bom_id.product_qty
sub_price = self._get_price(line.child_bom_id, qty, line.product_id)
price += sub_price
else:
prod_qty = line.product_qty * factor
not_rounded_price = line.product_id.uom_id._compute_price(line.product_id.standard_price, line.product_uom_id) * prod_qty
price += self.env.user.company_id.currency_id.round(not_rounded_price)
return price
def _get_pdf_line(self, bom_id, product_id=False, qty=1, child_bom_ids=[], unfolded=False):
data = self._get_bom(bom_id=bom_id, product_id=product_id.id, line_qty=qty)
def get_sub_lines(bom, product_id, line_qty, line_id, level):
data = self._get_bom(bom_id=bom.id, product_id=product_id.id, line_qty=line_qty, line_id=line_id, level=level)
bom_lines = data['components']
lines = []
for bom_line in bom_lines:
lines.append({
'name': bom_line['prod_name'],
'type': 'bom',
'quantity': bom_line['prod_qty'],
'uom': bom_line['prod_uom'],
'prod_cost': bom_line['prod_cost'],
'bom_cost': bom_line['total'],
'level': bom_line['level'],
'code': bom_line['code']
})
if bom_line['child_bom'] and (unfolded or bom_line['child_bom'] in child_bom_ids):
line = self.env['mrp.bom.line'].browse(bom_line['line_id'])
lines += (get_sub_lines(line.child_bom_id, line.product_id, bom_line['prod_qty'], line, level + 1))
if data['operations']:
lines.append({
'name': _('Operations'),
'type': 'operation',
'quantity': data['operations_time'],
'uom': _('minutes'),
'bom_cost': data['operations_cost'],
'level': level,
})
for operation in data['operations']:
if unfolded or 'operation-' + str(bom.id) in child_bom_ids:
lines.append({
'name': operation['name'],
'type': 'operation',
'quantity': operation['duration_expected'],
'uom': _('minutes'),
'bom_cost': operation['total'],
'level': level + 1,
})
return lines
bom = self.env['mrp.bom'].browse(bom_id)
product = product_id or bom.product_id or bom.product_tmpl_id.product_variant_id
pdf_lines = get_sub_lines(bom, product, qty, False, 1)
data['components'] = []
data['lines'] = pdf_lines
return data
|
py | 1a51b930fa5b5b15c114067687678d191e6e5ea5 | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import time
import math
import json
import random
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from torch.nn.utils import clip_grad_norm_
from torch.utils.data.distributed import DistributedSampler
from pysot.utils.lr_scheduler import build_lr_scheduler
from pysot.utils.log_helper import init_log, print_speed, add_file_handler
from pysot.utils.distributed import dist_init, DistModule, reduce_gradients,\
average_reduce, get_rank, get_world_size
from pysot.utils.model_load import load_pretrain, restore_from
from pysot.utils.average_meter import AverageMeter
from pysot.utils.misc import describe, commit
from pysot.models.model_builder import ModelBuilder
from pysot.datasets.dataset import TrkDataset
from pysot.datasets.dataflow import get_train_dataflow
from pysot.config import cfg
logger = logging.getLogger('global')
parser = argparse.ArgumentParser(description='siamrpn tracking')
parser.add_argument('--cfg', type=str, default='config.yaml',
help='configuration of tracking')
parser.add_argument('--seed', type=int, default=123456,
help='random seed')
parser.add_argument('--local_rank', type=int, default=0,
help='compulsory for pytorch launcer')
args = parser.parse_args()
torch.autograd.set_detect_anomaly(True)
def seed_torch(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def build_data_loader():
logger.info("build train dataset")
# train_dataset
train_dataset = get_train_dataflow() #TrkDataset()
logger.info("build dataset done")
# let tensorpack handle all the distributed data loading
train_loader = DataLoader(train_dataset,
batch_size=None,
batch_sampler=None,
sampler=None)
# train_sampler = None
# if get_world_size() > 1:
# train_sampler = DistributedSampler(train_dataset)
# train_loader = DataLoader(train_dataset,
# batch_size=cfg.TRAIN.BATCH_SIZE,
# num_workers=cfg.TRAIN.NUM_WORKERS,
# pin_memory=True,
# sampler=train_sampler)
return train_loader
def build_opt_lr(model, current_epoch=0):
if current_epoch >= cfg.BACKBONE.TRAIN_EPOCH:
for layer in cfg.BACKBONE.TRAIN_LAYERS:
for param in getattr(model.backbone, layer).parameters():
param.requires_grad = True
for m in getattr(model.backbone, layer).modules():
if isinstance(m, nn.BatchNorm2d):
m.train()
else:
for param in model.backbone.parameters():
param.requires_grad = False
for m in model.backbone.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
trainable_params = []
trainable_params += [{'params': filter(lambda x: x.requires_grad,
model.backbone.parameters()),
'lr': cfg.BACKBONE.LAYERS_LR * cfg.TRAIN.BASE_LR}]
if cfg.ADJUST.ADJUST:
trainable_params += [{'params': model.neck.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
trainable_params += [{'params': model.rpn_head.parameters(),
'lr': cfg.TRAIN.BASE_LR}]
optimizer = torch.optim.SGD(trainable_params,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WEIGHT_DECAY)
lr_scheduler = build_lr_scheduler(optimizer, epochs=cfg.TRAIN.EPOCH)
lr_scheduler.step(cfg.TRAIN.START_EPOCH)
return optimizer, lr_scheduler
def log_grads(model, tb_writer, tb_index):
def weights_grads(model):
grad = {}
weights = {}
for name, param in model.named_parameters():
if param.grad is not None:
grad[name] = param.grad
weights[name] = param.data
return grad, weights
grad, weights = weights_grads(model)
feature_norm, rpn_norm = 0, 0
for k, g in grad.items():
_norm = g.data.norm(2)
weight = weights[k]
w_norm = weight.norm(2)
if 'feature' in k:
feature_norm += _norm ** 2
else:
rpn_norm += _norm ** 2
tb_writer.add_scalar('grad_all/'+k.replace('.', '/'),
_norm, tb_index)
tb_writer.add_scalar('weight_all/'+k.replace('.', '/'),
w_norm, tb_index)
tb_writer.add_scalar('w-g/'+k.replace('.', '/'),
w_norm/(1e-20 + _norm), tb_index)
tot_norm = feature_norm + rpn_norm
tot_norm = tot_norm ** 0.5
feature_norm = feature_norm ** 0.5
rpn_norm = rpn_norm ** 0.5
tb_writer.add_scalar('grad/tot', tot_norm, tb_index)
tb_writer.add_scalar('grad/feature', feature_norm, tb_index)
tb_writer.add_scalar('grad/rpn', rpn_norm, tb_index)
def train(train_loader, model, optimizer, lr_scheduler, tb_writer):
cur_lr = lr_scheduler.get_cur_lr()
rank = get_rank()
average_meter = AverageMeter()
def is_valid_number(x):
return not(math.isnan(x) or math.isinf(x) or x > 1e4)
world_size = get_world_size()
num_per_epoch = len(train_loader.dataset) // world_size
# num_per_epoch = len(train_loader.dataset) // \
# cfg.TRAIN.EPOCH // (cfg.TRAIN.BATCH_SIZE * world_size)
start_epoch = cfg.TRAIN.START_EPOCH
epoch = start_epoch
if not os.path.exists(cfg.TRAIN.SNAPSHOT_DIR) and \
get_rank() == 0:
os.makedirs(cfg.TRAIN.SNAPSHOT_DIR)
logger.info("model\n{}".format(describe(model.module)))
end = time.time()
for idx, data in enumerate(train_loader):
if epoch != idx // num_per_epoch + start_epoch:
epoch = idx // num_per_epoch + start_epoch
if get_rank() == 0:
torch.save(
{'epoch': epoch,
'state_dict': model.module.state_dict(),
'optimizer': optimizer.state_dict()},
cfg.TRAIN.SNAPSHOT_DIR+'/checkpoint_e%d.pth' % (epoch))
if epoch == cfg.TRAIN.EPOCH:
return
if cfg.BACKBONE.TRAIN_EPOCH == epoch:
logger.info('start training backbone.')
optimizer, lr_scheduler = build_opt_lr(model.module, epoch)
logger.info("model\n{}".format(describe(model.module)))
lr_scheduler.step(epoch)
cur_lr = lr_scheduler.get_cur_lr()
logger.info('epoch: {}'.format(epoch+1))
tb_idx = idx
if idx % num_per_epoch == 0 and idx != 0:
for idx, pg in enumerate(optimizer.param_groups):
logger.info('epoch {} lr {}'.format(epoch+1, pg['lr']))
if rank == 0:
tb_writer.add_scalar('lr/group{}'.format(idx+1),
pg['lr'], tb_idx)
data_time = average_reduce(time.time() - end)
if rank == 0:
tb_writer.add_scalar('time/data', data_time, tb_idx)
outputs = model(data)
loss = outputs['total_loss']
if is_valid_number(loss.data.item()):
optimizer.zero_grad()
loss.backward()
reduce_gradients(model)
if rank == 0 and cfg.TRAIN.LOG_GRADS:
log_grads(model.module, tb_writer, tb_idx)
# clip gradient
clip_grad_norm_(model.parameters(), cfg.TRAIN.GRAD_CLIP)
optimizer.step()
batch_time = time.time() - end
batch_info = {}
batch_info['batch_time'] = average_reduce(batch_time)
batch_info['data_time'] = average_reduce(data_time)
for k, v in sorted(outputs.items()):
batch_info[k] = average_reduce(v.data.item())
average_meter.update(**batch_info)
if rank == 0:
for k, v in batch_info.items():
tb_writer.add_scalar(k, v, tb_idx)
if (idx+1) % cfg.TRAIN.PRINT_FREQ == 0:
info = "Epoch: [{}][{}/{}] lr: {:.6f}\n".format(
epoch+1, (idx+1) % num_per_epoch,
num_per_epoch, cur_lr)
for cc, (k, v) in enumerate(batch_info.items()):
if cc % 2 == 0:
info += ("\t{:s}\t").format(
getattr(average_meter, k))
else:
info += ("{:s}\n").format(
getattr(average_meter, k))
logger.info(info)
print_speed(idx+1+start_epoch*num_per_epoch,
average_meter.batch_time.avg,
cfg.TRAIN.EPOCH * num_per_epoch)
end = time.time()
def main():
rank, world_size = dist_init()
logger.info("init done")
# load cfg
cfg.merge_from_file(args.cfg)
if rank == 0:
if not os.path.exists(cfg.TRAIN.LOG_DIR):
os.makedirs(cfg.TRAIN.LOG_DIR)
init_log('global', logging.INFO)
if cfg.TRAIN.LOG_DIR:
add_file_handler('global',
os.path.join(cfg.TRAIN.LOG_DIR, 'logs.txt'),
logging.INFO)
logger.info("Version Information: \n{}\n".format(commit()))
logger.info("config \n{}".format(json.dumps(cfg, indent=4)))
# create model
model = ModelBuilder().cuda().train()
dist_model = DistModule(model)
# load pretrained backbone weights
if cfg.BACKBONE.PRETRAINED:
cur_path = os.path.dirname(os.path.realpath(__file__))
backbone_path = os.path.join(cur_path, '../', cfg.BACKBONE.PRETRAINED)
load_pretrain(model.backbone, backbone_path)
# create tensorboard writer
if rank == 0 and cfg.TRAIN.LOG_DIR:
tb_writer = SummaryWriter(cfg.TRAIN.LOG_DIR)
else:
tb_writer = None
# build dataset loader
train_loader = build_data_loader()
# build optimizer and lr_scheduler
optimizer, lr_scheduler = build_opt_lr(dist_model.module,
cfg.TRAIN.START_EPOCH)
# resume training
if cfg.TRAIN.RESUME:
logger.info("resume from {}".format(cfg.TRAIN.RESUME))
assert os.path.isfile(cfg.TRAIN.RESUME), \
'{} is not a valid file.'.format(cfg.TRAIN.RESUME)
model, optimizer, cfg.TRAIN.START_EPOCH = \
restore_from(model, optimizer, cfg.TRAIN.RESUME)
# load pretrain
elif cfg.TRAIN.PRETRAINED:
load_pretrain(model, cfg.TRAIN.PRETRAINED)
dist_model = DistModule(model)
logger.info(lr_scheduler)
logger.info("model prepare done")
# start training
train(train_loader, dist_model, optimizer, lr_scheduler, tb_writer)
if __name__ == '__main__':
seed_torch(args.seed)
main()
|
py | 1a51b9ca2a194971cdb97ed421fbdfd046289920 | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.nn.functional as F
import torch.nn as nn
import torch.optim as optim
import torch
import unittest
from monai.losses import FocalLoss
class TestFocalLoss(unittest.TestCase):
def test_consistency_with_cross_entropy_2d(self):
# For gamma=0 the focal loss reduces to the cross entropy loss
focal_loss = FocalLoss(gamma=0.0, reduction="mean")
ce = nn.CrossEntropyLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random tensor of shape (batch_size, class_num, 8, 4)
x = torch.rand(batch_size, class_num, 8, 4, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=class_num, size=(batch_size, 8, 4))
l = l.long()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss.forward(x, l)
output1 = ce.forward(x, l)
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertAlmostEqual(max_error, 0.0, places=3)
def test_consistency_with_cross_entropy_classification(self):
# for gamma=0 the focal loss reduces to the cross entropy loss
focal_loss = FocalLoss(gamma=0.0, reduction="mean")
ce = nn.CrossEntropyLoss(reduction="mean")
max_error = 0
class_num = 10
batch_size = 128
for _ in range(100):
# Create a random scores tensor of shape (batch_size, class_num)
x = torch.rand(batch_size, class_num, requires_grad=True)
# Create a random batch of classes
l = torch.randint(low=0, high=class_num, size=(batch_size,))
l = l.long()
if torch.cuda.is_available():
x = x.cuda()
l = l.cuda()
output0 = focal_loss.forward(x, l)
output1 = ce.forward(x, l)
a = float(output0.cpu().detach())
b = float(output1.cpu().detach())
if abs(a - b) > max_error:
max_error = abs(a - b)
self.assertAlmostEqual(max_error, 0.0, places=3)
def test_bin_seg_2d(self):
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
# Same test, but for target with a class dimension
target = target.unsqueeze(1) # shape (1, 1, H, W)
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_empty_class_2d(self):
num_classes = 2
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_multi_class_seg_2d(self):
num_classes = 6 # labels 0 to 5
# define 2d examples
target = torch.tensor([[0, 0, 0, 0], [0, 1, 2, 0], [0, 3, 4, 0], [0, 0, 0, 0]])
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
pred_very_good = 1000 * F.one_hot(target, num_classes=num_classes).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_bin_seg_3d(self):
# define 2d examples
target = torch.tensor(
[
# raw 0
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 1
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 2
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
]
)
# add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W, D)
pred_very_good = 1000 * F.one_hot(target, num_classes=2).permute(0, 4, 1, 2, 3).float()
# initialize the mean dice loss
loss = FocalLoss()
# focal loss for pred_very_good should be close to 0
focal_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(focal_loss_good, 0.0, places=3)
def test_convergence(self):
"""
The goal of this test is to assess if the gradient of the loss function
is correct by testing if we can train a one layer neural network
to segment one image.
We verify that the loss is decreasing in almost all SGD steps.
"""
learning_rate = 0.001
max_iter = 20
# define a simple 3d example
target_seg = torch.tensor(
[
# raw 0
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 1
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
# raw 2
[[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]],
]
)
target_seg = torch.unsqueeze(target_seg, dim=0)
image = 12 * target_seg + 27
image = image.float()
num_classes = 2
num_voxels = 3 * 4 * 4
# define a one layer model
class OnelayerNet(nn.Module):
def __init__(self):
super(OnelayerNet, self).__init__()
self.layer = nn.Linear(num_voxels, num_voxels * num_classes)
def forward(self, x):
x = x.view(-1, num_voxels)
x = self.layer(x)
x = x.view(-1, num_classes, 3, 4, 4)
return x
# initialise the network
net = OnelayerNet()
# initialize the loss
loss = FocalLoss()
# initialize an SGD
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
loss_history = []
# train the network
for _ in range(max_iter):
# set the gradient to zero
optimizer.zero_grad()
# forward pass
output = net(image)
loss_val = loss(output, target_seg)
# backward pass
loss_val.backward()
optimizer.step()
# stats
loss_history.append(loss_val.item())
# count the number of SGD steps in which the loss decreases
num_decreasing_steps = 0
for i in range(len(loss_history) - 1):
if loss_history[i] > loss_history[i + 1]:
num_decreasing_steps += 1
decreasing_steps_ratio = float(num_decreasing_steps) / (len(loss_history) - 1)
# verify that the loss is decreasing for sufficiently many SGD steps
self.assertTrue(decreasing_steps_ratio > 0.9)
if __name__ == "__main__":
unittest.main()
|
py | 1a51b9d656a2fac6038c024a49a40b55d783a557 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.sks import dashboard
class BarChart(horizon.Panel):
name = _("BarChart")
slug = 'bar_chart'
dashboard.SKS.register(BarChart)
|
py | 1a51bb84286665da2525afee6263a66901fc2d02 | # Copyright (c) 2011 Tencent Inc.
# All rights reserved.
#
# Author: Huan Yu <[email protected]>
# Feng Chen <[email protected]>
# Yi Wang <[email protected]>
# Chong Peng <[email protected]>
# Wenting Li <[email protected]>
# Date: October 20, 2011
# pylint: disable=too-many-lines
"""
This is the scons rules helper module which should be
imported by Scons script
"""
from __future__ import absolute_import
import os
import py_compile
import re
import shutil
import signal
import socket
import string
import subprocess
import sys
import tarfile
import tempfile
import time
import zipfile
# pylint: disable=E0401
import SCons
import SCons.Action
import SCons.Builder
import SCons.Scanner
import SCons.Scanner.Prog
from blade import blade_util
from blade import console
from blade import toolchain
from blade.blade_util import iteritems
from blade.console import color
# option_verbose to indicate print verbosity level
_verbosity = 'normal'
# blade path
blade_path = os.path.dirname(__file__)
# build error log during scons execution
blade_error_log = None
# linking tmp dir
linking_tmp_dir = ''
# build time stamp
build_time = time.time()
proto_import_re = re.compile(r'^import\s+"(\S+)"\s*;\s*$', re.M)
proto_import_public_re = re.compile(r'^import\s+public\s+"(\S+)"\s*;\s*$', re.M)
def set_blade_error_log(path):
global blade_error_log
if blade_error_log:
console.warning('blade error log was already set to %s' %
blade_error_log.name)
else:
blade_error_log = open(path, 'w')
def _compile_python(src, build_dir):
if src.startswith(build_dir):
pyc = src + 'c'
else:
pyc = os.path.join(build_dir, src) + 'c'
dir = os.path.dirname(pyc)
if not os.path.exists(dir):
os.makedirs(dir)
py_compile.compile(src, pyc)
return pyc
def generate_python_library(target, source, env):
data = dict()
data['base_dir'] = env.get('BASE_DIR', '')
srcs = []
for s in source:
src = str(s)
digest = blade_util.md5sum_file(src)
srcs.append((src, digest))
data['srcs'] = srcs
with open(str(target[0]), 'w') as f:
f.write(str(data))
return None
def generate_python_binary(target, source, env):
"""The action to generate python executable file. """
target_name = str(target[0])
base_dir, build_dir = env.get('BASE_DIR', ''), env['BUILD_DIR']
entry = env['ENTRY']
srcs = [str(s) for s in source]
toolchain.generate_python_binary(base_dir, entry, target_name, srcs)
return None
def generate_resource_index(target, source, env):
res_source_path = str(target[0])
res_header_path = str(target[1])
if not os.path.exists(os.path.dirname(res_header_path)):
os.mkdir(os.path.dirname(res_header_path))
with open(res_header_path, 'w') as h, open(res_source_path, 'w') as c:
source_path = env["SOURCE_PATH"]
full_name = blade_util.regular_variable_name("%s/%s" % (source_path, env["TARGET_NAME"]))
guard_name = 'BLADE_RESOURCE_%s_H' % full_name.upper()
h.write('#ifndef %s\n#define %s\n' % (guard_name, guard_name))
h.write('''
// This file was automatically generated by blade
#ifdef __cplusplus
extern "C" {
#endif
#ifndef BLADE_RESOURCE_TYPE_DEFINED
#define BLADE_RESOURCE_TYPE_DEFINED
struct BladeResourceEntry {
const char* name;
const char* data;
unsigned int size;
};
#endif
''')
res_index_name = 'RESOURCE_INDEX_%s' % full_name
c.write('// This file was automatically generated by blade\n')
c.write('#include "%s"\n' % res_header_path)
c.write('const struct BladeResourceEntry %s[] = {\n' % res_index_name)
for s in source:
src = str(s)
var_name = blade_util.regular_variable_name(src)
org_src = os.path.relpath(src, source_path)
h.write('// %s\n' % org_src)
h.write('extern const char RESOURCE_%s[%d];\n' % (var_name, s.get_size()))
h.write('extern const unsigned RESOURCE_%s_len;\n' % var_name)
c.write(' { "%s", RESOURCE_%s, %s },\n' % (org_src, var_name, s.get_size()))
c.write('};\n')
c.write('const unsigned %s_len = %s;\n' % (res_index_name, len(source)))
h.write('// Resource index\n')
h.write('extern const struct BladeResourceEntry %s[];\n' % res_index_name)
h.write('extern const unsigned %s_len;\n' % res_index_name)
h.write('\n#ifdef __cplusplus\n} // extern "C"\n#endif\n')
h.write('\n#endif // %s\n' % guard_name)
return None
def generate_resource_file(target, source, env):
"""Generate resource source file in resource_library"""
src_path = str(source[0])
new_src_path = str(target[0])
cmd = ('xxd -i %s | sed -e "s/^unsigned char /const char RESOURCE_/g" '
'-e "s/^unsigned int /const unsigned int RESOURCE_/g"> %s') % (
src_path, new_src_path)
p = subprocess.Popen(
cmd,
env={},
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode or stderr:
error = 'failed to generate resource file'
if stderr:
error = error + ': ' + stderr
console.error_exit(error)
return p.returncode
def process_java_sources(target, source, env):
"""Copy source file into .sources dir. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def process_java_resources(target, source, env):
"""Copy resource file into .resources dir. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def _check_java_jar_classes(sources, classes_dir):
"""Check if all the classes are generated into classes_dir completely. """
# pylint: disable=too-many-nested-blocks
sources = sorted([os.path.basename(s) for s in sources])
sources = [s for s in sources if s[0].isupper()]
classes = ['%s.class' % s[:-5] for s in sources]
if not classes:
return
generated_classes = []
paths = set()
retry = 0
while retry < 3:
for dir, subdirs, files in os.walk(classes_dir):
for f in files:
if f.endswith('.class'):
f = os.path.relpath(os.path.join(dir, f), classes_dir)
if f not in paths:
paths.add(f)
name = os.path.basename(f)
if '$' not in name:
generated_classes.append(name)
generated_classes.sort()
i, j = 0, 0
while j != len(generated_classes):
if classes[i] == generated_classes[j]:
i += 1
if i == len(classes):
return
j += 1
time.sleep(0.5)
retry += 1
console.debug('Classes: %s Generated classes: %s' % (classes, generated_classes))
console.error_exit('Missing class files in %s' % classes_dir)
def _generate_java_jar(target, sources, resources, env):
"""
Compile the java sources and generate a jar containing the classes and resources.
"""
classes_dir = target.replace('.jar', '.classes')
resources_dir = target.replace('.jar', '.resources')
if os.path.exists(classes_dir):
shutil.rmtree(classes_dir)
os.makedirs(classes_dir)
java, javac, jar, options = env['JAVA'], env['JAVAC'], env['JAR'], env['JAVACFLAGS']
classpath = ':'.join(env['JAVACLASSPATH'])
if not classpath:
classpath = blade_util.get_cwd()
if sources:
cmd = '%s %s -d %s -classpath %s %s' % (
javac, options, classes_dir, classpath, ' '.join(sources))
if echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None):
return 1
cmd = ['%s cf %s' % (jar, target)]
if sources:
_check_java_jar_classes(sources, classes_dir)
cmd.append('-C %s .' % classes_dir)
if os.path.exists(resources_dir):
for resource in resources:
cmd.append("-C '%s' '%s'" % (resources_dir,
os.path.relpath(resource, resources_dir)))
cmd_str = ' '.join(cmd)
return echospawn(args=[cmd_str], env=os.environ, sh=None, cmd=None, escape=None)
def generate_java_jar(target, source, env):
target = str(target[0])
sources = []
index = 0
for src in source:
if str(src).endswith('.java'):
sources.append(str(src))
index += 1
else:
break
resources = [str(src) for src in source[index:]]
return _generate_java_jar(target, sources, resources, env)
_one_jar_boot_path = None
def _generate_one_jar(target,
main_class,
main_jar,
deps_jar,
one_jar_boot_path):
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_one_jar = zipfile.ZipFile(target, 'w')
jar_path_set = set()
# Copy files from one-jar-boot.jar to the target jar
zip_file = zipfile.ZipFile(one_jar_boot_path, 'r')
name_list = zip_file.namelist()
for name in name_list:
if not name.lower().endswith('manifest.mf'): # Exclude manifest
target_one_jar.writestr(name, zip_file.read(name))
jar_path_set.add(name)
zip_file.close()
# Main jar and dependencies
target_one_jar.write(main_jar, os.path.join('main',
os.path.basename(main_jar)))
for dep in deps_jar:
dep_name = os.path.basename(dep)
target_one_jar.write(dep, os.path.join('lib', dep_name))
# Copy resources to the root of target onejar
for jar in [main_jar] + deps_jar:
jar = zipfile.ZipFile(jar, 'r')
jar_name_list = jar.namelist()
for name in jar_name_list:
if name.endswith('.class') or name.upper().startswith('META-INF'):
continue
if name not in jar_path_set:
jar_path_set.add(name)
target_one_jar.writestr(name, jar.read(name))
jar.close()
# Manifest
# Note that the manifest file must end with a new line or carriage return
target_one_jar.writestr(os.path.join('META-INF', 'MANIFEST.MF'),
'''Manifest-Version: 1.0
Main-Class: com.simontuffs.onejar.Boot
One-Jar-Main-Class: %s
''' % main_class)
target_one_jar.close()
return None
def generate_one_jar(target, source, env):
if len(source) < 2:
console.error_exit('Failed to generate java binary from %s: '
'Source should at least contain main class '
'and main jar' % ','.join(str(s) for s in source))
main_class = str(source[0])
main_jar = str(source[1])
deps_jar = []
for dep in source[2:]:
deps_jar.append(str(dep))
target = str(target[0])
# print target, main_class, main_jar, deps_jar, _one_jar_boot_path
return _generate_one_jar(target, main_class, main_jar, deps_jar,
_one_jar_boot_path)
def _is_signature_file(name):
parts = name.upper().split('/')
if len(parts) == 2:
for suffix in ('.SF', '.DSA', '.RSA'):
if parts[1].endswith(suffix):
return True
if parts[1].startswith('SIG-'):
return True
return False
_JAR_MANIFEST = 'META-INF/MANIFEST.MF'
_FATJAR_EXCLUSIONS = frozenset(['LICENSE', 'README', 'NOTICE',
'META-INF/LICENSE', 'META-INF/README',
'META-INF/NOTICE', 'META-INF/INDEX.LIST'])
def _is_fat_jar_excluded(name):
name = name.upper()
for exclusion in _FATJAR_EXCLUSIONS:
if name.startswith(exclusion):
return True
return name == _JAR_MANIFEST or _is_signature_file(name)
def _generate_fat_jar(target, deps_jar, env):
"""Generate a fat jar containing the contents of all the jar dependencies. """
target_dir = os.path.dirname(target)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
target_fat_jar = zipfile.ZipFile(target, 'w', zipfile.ZIP_DEFLATED)
# Record paths written in the fat jar to avoid duplicate writing
zip_path_dict = {}
zip_path_conflicts = 0
for dep_jar in deps_jar:
jar = zipfile.ZipFile(dep_jar, 'r')
name_list = jar.namelist()
for name in name_list:
if name.endswith('/') or not _is_fat_jar_excluded(name):
if name not in zip_path_dict:
target_fat_jar.writestr(name, jar.read(name))
zip_path_dict[name] = os.path.basename(dep_jar)
else:
if not name.endswith('/'): # Not a directory
zip_path_conflicts += 1
console.log('%s: duplicate path %s found in {%s, %s}' % (
target, name, zip_path_dict[name],
os.path.basename(dep_jar)))
jar.close()
if zip_path_conflicts:
console.warning('%s: Found %d conflicts when packaging. '
'See %s for details.' % (
target, zip_path_conflicts, console.get_log_file()))
# TODO(wentingli): Create manifest from dependency jars later if needed
contents = 'Manifest-Version: 1.0\nCreated-By: Python.Zipfile (Blade)\n'
main_class = env.Dictionary().get('JAVAMAINCLASS')
if main_class:
contents += 'Main-Class: %s\n' % main_class
contents += '\n'
target_fat_jar.writestr(_JAR_MANIFEST, contents)
target_fat_jar.close()
return None
def generate_fat_jar(target, source, env):
target = str(target[0])
dep_jars = [str(dep) for dep in source]
# Create a new process for fatjar packaging to avoid GIL
cmd = 'PYTHONPATH=%s:$PYTHONPATH python -m fatjar %s %s' % (
blade_path, target, ' '.join(dep_jars))
p = subprocess.Popen(cmd,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if stdout:
console.warning('%s See %s for details.' % (
stdout.rstrip(), console.get_log_file()))
if stderr:
console.log(stderr)
return p.returncode
def _generate_java_binary(target_name, onejar_path, jvm_flags, run_args):
"""generate a wrapper shell script to run jar"""
onejar_name = os.path.basename(onejar_path)
full_path = os.path.abspath(onejar_path)
target_file = open(target_name, 'w')
target_file.write("""#!/bin/sh
# Auto generated wrapper shell script by blade
jar=`dirname "$0"`/"%s"
if [ ! -f "$jar" ]; then
jar="%s"
fi
exec java %s -jar "$jar" %s $@
""" % (onejar_name, full_path, jvm_flags, run_args))
os.chmod(target_name, 0o755)
target_file.close()
return None
def generate_java_binary(target, source, env):
"""build function to generate wrapper shell script for java binary"""
target_name = str(target[0])
onejar_path = str(source[0])
return _generate_java_binary(target_name, onejar_path, '', '')
def _get_all_test_class_names_in_jar(jar):
"""Returns a list of test class names in the jar file. """
test_class_names = []
zip_file = zipfile.ZipFile(jar, 'r')
name_list = zip_file.namelist()
for name in name_list:
basename = os.path.basename(name)
# Exclude inner class and Test.class
if (basename.endswith('Test.class') and
len(basename) > len('Test.class') and
not '$' in basename):
class_name = name.replace('/', '.')[:-6] # Remove .class suffix
test_class_names.append(class_name)
zip_file.close()
return test_class_names
def _generate_java_test_coverage_flag(env):
"""Returns java test coverage flags based on the environment passed in. """
env_dict = env.Dictionary()
jacoco_agent = env_dict.get('JACOCOAGENT')
if jacoco_agent:
jacoco_agent = os.path.abspath(jacoco_agent)
target_under_test_package = env_dict.get('JAVATARGETUNDERTESTPKG')
if target_under_test_package:
options = []
options.append('includes=%s' % ':'.join(
[p + '.*' for p in target_under_test_package if p]))
options.append('output=file')
return '-javaagent:%s=%s' % (jacoco_agent, ','.join(options))
return ''
def _generate_java_test(target, main_class, jars, jvm_flags, run_args, env):
target_file = open(target, 'w')
target_file.write("""#!/bin/sh
# Auto generated wrapper shell script by blade
if [ -n "$BLADE_COVERAGE" ]
then
coverage_options="%s"
fi
exec java $coverage_options -classpath %s %s %s %s $@
""" % (_generate_java_test_coverage_flag(env), ':'.join(jars),
jvm_flags, main_class, run_args))
os.chmod(target, 0o755)
target_file.close()
return None
def generate_java_test(target, source, env):
"""build function to generate wrapper shell script for java test"""
target_name = str(target[0])
main_class = str(source[0])
test_jar = str(source[1])
jars = []
for jar in source[1:]:
jars.append(os.path.abspath(str(jar)))
test_class_names = _get_all_test_class_names_in_jar(test_jar)
return _generate_java_test(target_name, main_class, jars, '',
' '.join(test_class_names), env)
def _generate_scala_jar(target, sources, resources, env):
"""
Compile scala sources and generate a jar containing
the classes and resources.
"""
scalac = env['SCALAC']
java = env['JAVA']
jar = env['JAR']
options = ' '.join(env['SCALACFLAGS'])
classpath = ':'.join(env['JAVACLASSPATH'])
if not classpath:
classpath = blade_util.get_cwd()
cmd = 'JAVACMD=%s %s -d %s -classpath %s %s %s' % (java, scalac, target,
classpath, options, ' '.join(sources))
if echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None):
return 1
if resources:
resources_dir = target.replace('.jar', '.resources')
if os.path.exists(resources_dir):
cmd = ['%s uf %s' % (jar, target)]
for resource in resources:
cmd.append("-C '%s' '%s'" % (resources_dir,
os.path.relpath(resource, resources_dir)))
return echospawn(args=cmd, env=os.environ, sh=None, cmd=None, escape=None)
return None
def generate_scala_jar(target, source, env):
target = str(target[0])
sources = []
index = 0
for src in source:
if str(src).endswith('.scala'):
sources.append(str(src))
index += 1
else:
break
resources = [str(src) for src in source[index:]]
return _generate_scala_jar(target, sources, resources, env)
def _generate_scala_test(target, jars, test_class_names, env):
scala, java = env['SCALA'], env['JAVA']
scala, java = os.path.abspath(scala), os.path.abspath(java)
run_args = 'org.scalatest.run ' + ' '.join(test_class_names)
script = open(target, 'w')
script.write("""#!/bin/sh
# Auto generated wrapper shell script by blade
JAVACMD=%s exec %s -classpath %s %s $@
""" % (java, scala, ':'.join(jars), run_args))
script.close()
os.chmod(target, 0o755)
return None
def generate_scala_test(target, source, env):
"""Generate wrapper shell script for scala test. """
target = str(target[0])
test_jar = str(source[0])
jars = [os.path.abspath(str(jar)) for jar in source]
test_class_names = _get_all_test_class_names_in_jar(test_jar)
return _generate_scala_test(target, jars, test_class_names, env)
def process_package_source(target, source, env):
"""Copy source file into .sources dir. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def _get_tar_mode_from_suffix(suffix):
return {
'tar': 'w',
'tar.gz': 'w:gz',
'tgz': 'w:gz',
'tar.bz2': 'w:bz2',
'tbz': 'w:bz2',
}[suffix]
def _archive_package_sources(package, sources, sources_dir):
"""Archive sources into the package and return a list of source info. """
manifest = []
for s in sources:
f = str(s)
if f.startswith(sources_dir):
path = os.path.relpath(f, sources_dir)
else:
path = os.path.basename(f)
package(f, path)
manifest.append('%s %s' % (s.get_csig(), path))
return manifest
_PACKAGE_MANIFEST = 'MANIFEST.TXT'
def _generate_tar_package(target, sources, sources_dir, suffix):
"""Generate a tar ball containing all of the source files. """
mode = _get_tar_mode_from_suffix(suffix)
tar = tarfile.open(target, mode)
manifest = _archive_package_sources(tar.add, sources, sources_dir)
manifest_path = '%s.MANIFEST' % target
with open(manifest_path, 'w') as m:
m.write('\n'.join(manifest) + '\n')
tar.add(manifest_path, _PACKAGE_MANIFEST)
tar.close()
return None
def _generate_zip_package(target, sources, sources_dir):
"""Generate a zip archive containing all of the source files. """
zip = zipfile.ZipFile(target, 'w', zipfile.ZIP_DEFLATED)
manifest = _archive_package_sources(zip.write, sources, sources_dir)
zip.writestr(_PACKAGE_MANIFEST, '\n'.join(manifest) + '\n')
zip.close()
return None
def generate_package(target, source, env):
"""Generate a package containing all of the source files. """
target = str(target[0])
sources_dir = target + '.sources'
suffix = env['PACKAGESUFFIX']
if suffix == 'zip':
return _generate_zip_package(target, source, sources_dir)
else:
return _generate_tar_package(target, source, sources_dir, suffix)
def generate_shell_test_data(target, source, env):
"""Generate test data used by shell script for subsequent execution. """
target = str(target[0])
with open(target, 'w') as testdata:
for i in range(0, len(source), 2):
testdata.write(os.path.abspath(str(source[i])) + ' ' + source[i + 1] + '\n')
return None
def generate_shell_test(target, source, env):
"""Generate a shell wrapper to run shell scripts in source one by one. """
target = str(target[0])
with open(target, 'w') as script:
script.write('#!/bin/sh\n')
script.write('# Auto generated wrapper shell script by blade\n')
script.write('set -e\n')
for s in source:
script.write('. %s' % os.path.abspath(str(s)) + '\n')
script.write('\n')
os.chmod(target, 0o755)
return None
def generate_proto_go_source(target, source, env):
"""Generate go source file by invoking protobuf compiler. """
source = source[0]
import_protos = proto_import_re.findall(source.get_text_contents())
parameters = 'import_prefix=%s/' % env['PROTOBUFGOPATH']
if import_protos:
proto_mappings = []
for proto in import_protos:
dir = os.path.dirname(proto)
name = os.path.basename(proto)
proto_mappings.append('M%s=%s' % (
proto, os.path.join(dir, name.replace('.', '_'))))
parameters += ',%s' % ','.join(proto_mappings)
cmd = '%s --proto_path=. --plugin=protoc-gen-go=%s -I. %s -I=%s --go_out=%s:%s %s' % (
env['PROTOC'], env['PROTOCGOPLUGIN'], env['PROTOBUFINCS'],
os.path.dirname(str(source)), parameters, env['BUILDDIR'], source)
return echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None)
def copy_proto_go_source(target, source, env):
"""Copy go source file generated by protobuf into go standard directory. """
shutil.copy2(str(source[0]), str(target[0]))
return None
def _generate_go_package(target, source, env):
go, go_home = env['GOCMD'], env['GOHOME']
cmd = 'GOPATH=%s %s install %s' % (go_home, go, env['GOPACKAGE'])
return echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None)
def generate_go_library(target, source, env):
"""
Generate go package object. Note that the sources should be
in the same directory and the go tool compiles them as a whole
by designating the package path.
"""
return _generate_go_package(target, source, env)
def generate_go_binary(target, source, env):
"""Generate go command executable. """
return _generate_go_package(target, source, env)
def generate_go_test(target, source, env):
"""Generate go test binary. """
go, go_home = env['GOCMD'], env['GOHOME']
cmd = 'GOPATH=%s %s test -c -o %s %s' % (
go_home, go, target[0], env['GOPACKAGE'])
return echospawn(args=[cmd], env=os.environ, sh=None, cmd=None, escape=None)
def MakeAction(cmd, cmdstr):
if console.verbosity_compare(_verbosity, 'verbose') >= 0:
return SCons.Action.Action(cmd)
else:
return SCons.Action.Action(cmd, cmdstr)
_ERRORS = [': error:', ': fatal error:', ': undefined reference to',
': cannot find ', ': ld returned 1 exit status',
' is not defined'
]
_WARNINGS = [': warning:', ': note: ', '] Warning: ']
def error_colorize(message):
colored_message = []
for line in message.splitlines(True): # keepends
color = 'cyan'
# For clang column indicator, such as '^~~~~~'
if line.strip().startswith('^'):
color = 'green'
else:
for w in _WARNINGS:
if w in line:
color = 'yellow'
break
for w in _ERRORS:
if w in line:
color = 'red'
break
colored_message.append(console.colored(line, color))
return console.inerasable(''.join(colored_message))
def _echo(stdout, stderr):
"""Echo messages to stdout and stderr. """
if stdout:
sys.stdout.write(stdout)
if stderr:
sys.stderr.write(stderr)
if blade_error_log:
blade_error_log.write(stderr)
def echospawn(sh, escape, cmd, args, env):
# convert env from unicode strings
asciienv = {}
for key, value in iteritems(env):
asciienv[key] = str(value)
cmdline = ' '.join(args)
console.debug(cmdline)
p = subprocess.Popen(cmdline,
env=asciienv,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
if console.verbosity_compare(_verbosity, 'verbose') < 0:
if stdout:
stdout = error_colorize(stdout)
if stderr:
stderr = error_colorize(stderr)
if p.returncode:
if p.returncode != -signal.SIGINT:
# Error
_echo(stdout, stderr)
else:
# Only warnings
_echo(stdout, stderr)
return p.returncode
def _blade_action_postfunc(closing_message):
"""To do post jobs if blade's own actions failed to build. """
console.info(closing_message)
# Remember to write the dblite incase of re-linking once fail to
# build last time. We should elaborate a way to avoid rebuilding
# after failure of our own builders or actions.
SCons.SConsign.write()
def _fast_link_helper(target, source, env, link_com):
"""fast link helper function. """
target_file = str(target[0])
prefix_str = 'blade_%s' % target_file.replace('/', '_').replace('.', '_')
fd, temporary_file = tempfile.mkstemp(suffix='xianxian',
prefix=prefix_str,
dir=linking_tmp_dir)
os.close(fd)
sources = []
for s in source:
sources.append(str(s))
link_com_str = link_com.substitute(
FL_TARGET=temporary_file,
FL_SOURCE=' '.join(sources))
p = subprocess.Popen(
link_com_str,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
std_out, std_err = p.communicate()
if std_out:
print
std_out
if std_err:
print
std_err
if p.returncode == 0:
shutil.move(temporary_file, target_file)
if not os.path.exists(target_file):
console.warning('failed to genreate %s in link on tmpfs mode' % target_file)
else:
_blade_action_postfunc('failed while fast linking')
return p.returncode
def fast_link_sharelib_action(target, source, env):
# $SHLINK -o $TARGET $SHLINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$SHLINK'),
env.subst('$SHLINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def fast_link_prog_action(target, source, env):
# $LINK -o $TARGET $LINKFLAGS $__RPATH $SOURCES $_LIBDIRFLAGS $_LIBFLAGS
link_com = string.Template('%s -o $FL_TARGET %s %s $FL_SOURCE %s %s' % (
env.subst('$LINK'),
env.subst('$LINKFLAGS'),
env.subst('$__RPATH'),
env.subst('$_LIBDIRFLAGS'),
env.subst('$_LIBFLAGS')))
return _fast_link_helper(target, source, env, link_com)
def setup_fast_link_prog_builder(top_env):
"""
This is the function to setup blade fast link
program builder. It will overwrite the program
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_action = MakeAction(fast_link_prog_action, '$LINKCOMSTR')
program = SCons.Builder.Builder(action=new_link_action,
emitter='$PROGEMITTER',
prefix='$PROGPREFIX',
suffix='$PROGSUFFIX',
src_suffix='$OBJSUFFIX',
src_builder='Object',
target_scanner=SCons.Scanner.Prog.ProgramScanner())
top_env['BUILDERS']['Program'] = program
def setup_fast_link_sharelib_builder(top_env):
"""
This is the function to setup blade fast link
sharelib builder. It will overwrite the sharelib
builder of top level env if user specifies an
option to apply fast link method that they want
to place the blade output to distributed file
system to advoid the random read write of linker
largely degrades building performance.
"""
new_link_actions = []
new_link_actions.append(SCons.Defaults.SharedCheck)
new_link_actions.append(MakeAction(fast_link_sharelib_action, '$SHLINKCOMSTR'))
sharedlib = SCons.Builder.Builder(action=new_link_actions,
emitter='$SHLIBEMITTER',
prefix='$SHLIBPREFIX',
suffix='$SHLIBSUFFIX',
target_scanner=SCons.Scanner.Prog.ProgramScanner(),
src_suffix='$SHOBJSUFFIX',
src_builder='SharedObject')
top_env['BUILDERS']['SharedLibrary'] = sharedlib
def setup_fast_link_builders(top_env):
"""Creates fast link builders - Program and SharedLibrary. """
# Check requirement
acquire_temp_place = "df | grep tmpfs | awk '{print $5, $6}'"
p = subprocess.Popen(
acquire_temp_place,
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
universal_newlines=True)
stdout, stderr = p.communicate()
# Do not try to overwrite builder with error
if p.returncode:
console.warning('you have link on tmp enabled, but it is not fullfilled to make it.')
return
# No tmpfs to do fastlink, will not overwrite the builder
if not stdout:
console.warning('you have link on tmp enabled, but there is no tmpfs to make it.')
return
# Use the first one
global linking_tmp_dir
usage, linking_tmp_dir = tuple(stdout.splitlines(False)[0].split())
# Do not try to do that if there is no memory space left
usage = int(usage.replace('%', ''))
if usage > 90:
console.warning('you have link on tmp enabled, '
'but there is not enough space on %s to make it.' %
linking_tmp_dir)
return
console.info('building in link on tmpfs mode')
setup_fast_link_sharelib_builder(top_env)
setup_fast_link_prog_builder(top_env)
def make_top_env(build_dir):
"""Make the top level scons envrionment object"""
os.environ['LC_ALL'] = 'C'
top_env = SCons.Environment.Environment(ENV=os.environ)
top_env.EnsureSConsVersion(2, 0)
# Optimization options, see http://www.scons.org/wiki/GoFastButton
top_env.Decider('MD5-timestamp')
top_env.SetOption('implicit_cache', 1)
top_env.SetOption('max_drift', 1)
top_env.VariantDir(build_dir, '.', duplicate=0)
return top_env
def get_compile_source_message():
return console.erasable('%sCompiling %s$SOURCE%s%s' % (
color('cyan'), color('purple'), color('cyan'), color('end')))
def get_link_program_message():
return console.erasable('%sLinking Program %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
def setup_compliation_verbosity(top_env, color_enabled, verbosity):
"""Generates color and verbose message. """
console.enable_color(color_enabled)
global _verbosity
_verbosity = verbosity
top_env["SPAWN"] = echospawn
compile_source_message = get_compile_source_message()
link_program_message = get_link_program_message()
assembling_source_message = console.erasable('%sAssembling %s$SOURCE%s%s' % (
color('cyan'), color('purple'), color('cyan'), color('end')))
link_library_message = console.erasable('%sCreating Static Library %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
ranlib_library_message = console.erasable('%sRanlib Library %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
link_shared_library_message = console.erasable('%sLinking Shared Library %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
jar_message = console.erasable('%sCreating Jar %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
if console.verbosity_compare(verbosity, 'verbose') < 0:
top_env.Append(
CXXCOMSTR=compile_source_message,
CCCOMSTR=compile_source_message,
ASCOMSTR=assembling_source_message,
ASPPCOMSTR=assembling_source_message,
SHCCCOMSTR=compile_source_message,
SHCXXCOMSTR=compile_source_message,
ARCOMSTR=link_library_message,
RANLIBCOMSTR=ranlib_library_message,
SHLINKCOMSTR=link_shared_library_message,
LINKCOMSTR=link_program_message,
JAVACCOMSTR=compile_source_message,
JARCOMSTR=jar_message,
LEXCOMSTR=compile_source_message,
YACCCOMSTR=compile_source_message)
def proto_scan_func(node, env, path, arg):
contents = node.get_text_contents()
protos = proto_import_re.findall(contents)
protos += proto_import_public_re.findall(contents)
if not protos:
return []
def _find_proto(proto, path):
for dir in path:
f = os.path.join(str(dir), proto)
if os.path.exists(f):
return f
return ''
results = []
for proto in protos:
f = _find_proto(proto, path)
if f:
results.append(f)
public_protos = proto_import_public_re.findall(open(f).read())
for public_proto in public_protos:
public_proto = _find_proto(public_proto, path)
if public_proto:
results.append(public_proto)
return env.File(results)
def setup_proto_builders(top_env, build_dir, protoc_bin, protoc_java_bin,
protobuf_path, protobuf_incs_str, protobuf_java_incs,
protoc_php_plugin, protobuf_php_path, protoc_go_plugin):
# pylint: disable=too-many-locals
compile_proto_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_proto_go_message = console.erasable('%sCompiling %s$SOURCE%s to go source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
copy_proto_go_source_message = console.erasable('%sCopying %s$SOURCE%s to go directory%s' %
(color('cyan'), color('purple'), color('cyan'), color('end')))
generate_proto_descriptor_message = console.erasable('%sGenerating proto descriptor set %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
proto_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --cpp_out=%s $PROTOCFLAGS $PROTOCCPPPLUGINFLAGS $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_cc_message))
top_env.Append(BUILDERS={"Proto": proto_bld})
proto_java_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. %s --java_out=%s/`dirname $SOURCE` $PROTOCJAVAPLUGINFLAGS $SOURCE" % (
protoc_java_bin, protobuf_java_incs, build_dir),
compile_proto_java_message))
top_env.Append(BUILDERS={"ProtoJava": proto_java_bld})
proto_php_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. --plugin=protoc-gen-php=%s -I. %s -I%s -I=`dirname $SOURCE` --php_out=%s/`dirname $SOURCE` $SOURCE" % (
protoc_bin, protoc_php_plugin, protobuf_incs_str, protobuf_php_path, build_dir),
compile_proto_php_message))
top_env.Append(BUILDERS={"ProtoPhp": proto_php_bld})
proto_python_bld = SCons.Builder.Builder(action=MakeAction(
"%s --proto_path=. -I. %s -I=`dirname $SOURCE` --python_out=%s $PROTOCPYTHONPLUGINFLAGS $SOURCE" % (
protoc_bin, protobuf_incs_str, build_dir),
compile_proto_python_message))
top_env.Append(BUILDERS={"ProtoPython": proto_python_bld})
proto_go_bld = SCons.Builder.Builder(action=MakeAction(
generate_proto_go_source, compile_proto_go_message),
PROTOC=protoc_bin, PROTOCGOPLUGIN=protoc_go_plugin,
PROTOBUFINCS=protobuf_incs_str, BUILDDIR=build_dir)
top_env.Append(BUILDERS={"ProtoGo": proto_go_bld})
proto_go_source_bld = SCons.Builder.Builder(
action=MakeAction(copy_proto_go_source, copy_proto_go_source_message))
top_env.Append(BUILDERS={"ProtoGoSource": proto_go_source_bld})
proto_descriptor_bld = SCons.Builder.Builder(action=MakeAction(
'%s --proto_path=. -I. %s -I=`dirname $SOURCE` '
'--descriptor_set_out=$TARGET --include_imports --include_source_info '
'$SOURCES' % (protoc_bin, protobuf_incs_str),
generate_proto_descriptor_message))
top_env.Append(BUILDERS={"ProtoDescriptors": proto_descriptor_bld})
top_env.Replace(PROTOCFLAGS="",
PROTOCCPPPLUGINFLAGS="",
PROTOCJAVAPLUGINFLAGS="",
PROTOCPYTHONPLUGINFLAGS="")
top_env.Append(PROTOPATH=['.', protobuf_path])
proto_scanner = top_env.Scanner(name='ProtoScanner',
function=proto_scan_func,
argument=None,
skeys=['.proto'],
path_function=SCons.Scanner.FindPathDirs('PROTOPATH'))
top_env.Append(SCANNERS=proto_scanner)
def setup_thrift_builders(top_env, build_dir, thrift_bin, thrift_incs_str, thrift_gen_params):
compile_thrift_cc_message = console.erasable('%sCompiling %s$SOURCE%s to cc source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_thrift_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_thrift_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
thrift_bld = SCons.Builder.Builder(action=MakeAction(
'%s --gen %s -I . %s -I `dirname $SOURCE`'
' -out %s/`dirname $SOURCE` $SOURCE' % (
thrift_bin, thrift_gen_params, thrift_incs_str, build_dir),
compile_thrift_cc_message))
top_env.Append(BUILDERS={"Thrift": thrift_bld})
thrift_java_bld = SCons.Builder.Builder(action=MakeAction(
"%s --gen java -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_java_message))
top_env.Append(BUILDERS={"ThriftJava": thrift_java_bld})
thrift_python_bld = SCons.Builder.Builder(action=MakeAction(
"%s --gen py -I . %s -I `dirname $SOURCE` -out %s/`dirname $SOURCE` $SOURCE" % (
thrift_bin, thrift_incs_str, build_dir),
compile_thrift_python_message))
top_env.Append(BUILDERS={"ThriftPython": thrift_python_bld})
def setup_fbthrift_builders(top_env, build_dir, fbthrift1_bin, fbthrift2_bin, fbthrift_incs_str):
compile_fbthrift_cpp_message = console.erasable('%sCompiling %s$SOURCE%s to cpp source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_fbthrift_cpp2_message = console.erasable('%sCompiling %s$SOURCE%s to cpp2 source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
fbthrift1_bld = SCons.Builder.Builder(action=MakeAction(
'%s --gen cpp:templates,cob_style,include_prefix,enum_strict -I . %s -I `dirname $SOURCE`'
' -o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift1_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp_message))
top_env.Append(BUILDERS={"FBThrift1": fbthrift1_bld})
fbthrift2_bld = SCons.Builder.Builder(action=MakeAction(
'%s --gen=cpp2:cob_style,include_prefix,future -I . %s -I `dirname $SOURCE` '
'-o %s/`dirname $SOURCE` $SOURCE' % (
fbthrift2_bin, fbthrift_incs_str, build_dir),
compile_fbthrift_cpp2_message))
top_env.Append(BUILDERS={"FBThrift2": fbthrift2_bld})
def setup_cuda_builders(top_env, nvcc_str, cuda_incs_str):
nvcc_object_bld = SCons.Builder.Builder(action=MakeAction(
"%s -ccbin g++ %s $NVCCFLAGS -o $TARGET -c $SOURCE" % (nvcc_str, cuda_incs_str),
get_compile_source_message()))
top_env.Append(BUILDERS={"NvccObject": nvcc_object_bld})
nvcc_binary_bld = SCons.Builder.Builder(action=MakeAction(
"%s %s $NVCCFLAGS -o $TARGET" % (nvcc_str, cuda_incs_str),
get_link_program_message()))
top_env.Append(NVCC=nvcc_str)
top_env.Append(BUILDERS={"NvccBinary": nvcc_binary_bld})
def setup_java_builders(top_env, java_home, one_jar_boot_path):
# pylint: disable=too-many-locals
if java_home:
top_env.Replace(JAVA=os.path.join(java_home, 'bin/java'))
top_env.Replace(JAVAC=os.path.join(java_home, 'bin/javac'))
top_env.Replace(JAR=os.path.join(java_home, 'bin/jar'))
blade_jar_bld = SCons.Builder.Builder(action=MakeAction(
'jar cf $TARGET -C `dirname $SOURCE` .',
'$JARCOMSTR'))
top_env.Append(BUILDERS={"BladeJar": blade_jar_bld})
# Scons has many bugs with generated sources file,
# such as can't obtain class file path correctly.
# so just build all sources to jar directly
generated_jar_bld = SCons.Builder.Builder(action=MakeAction(
'rm -fr ${TARGET}.classes && mkdir -p ${TARGET}.classes && '
'$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET}.classes $SOURCES && '
'$JAR $JARFLAGS ${TARGET} -C ${TARGET}.classes . && '
'rm -fr ${TARGET}.classes',
'$JARCOMSTR'))
top_env.Append(BUILDERS={"GeneratedJavaJar": generated_jar_bld})
# Scons Java builder has bugs on detecting generated .class files
# produced by javac: anonymous inner classes are missing in the results
# of Java builder no matter which JAVAVERSION(1.5, 1.6) is specified
# See: http://scons.tigris.org/issues/show_bug.cgi?id=1594
# http://scons.tigris.org/issues/show_bug.cgi?id=2742
blade_java_jar_bld = SCons.Builder.Builder(action=MakeAction(
generate_java_jar, '$JARCOMSTR'))
top_env.Append(BUILDERS={"BladeJavaJar": blade_java_jar_bld})
resource_message = console.erasable('%sProcess Jar Resource %s$SOURCES%s%s' % ( \
color('cyan'), color('purple'), color('cyan'), color('end')))
java_resource_bld = SCons.Builder.Builder(
action=MakeAction(process_java_resources, resource_message))
top_env.Append(BUILDERS={"JavaResource": java_resource_bld})
source_message = console.erasable('%sProcess Java Source %s$SOURCES%s%s' % ( \
color('cyan'), color('purple'), color('cyan'), color('end')))
java_source_bld = SCons.Builder.Builder(
action=MakeAction(process_java_sources, source_message))
top_env.Append(BUILDERS={"JavaSource": java_source_bld})
global _one_jar_boot_path
_one_jar_boot_path = one_jar_boot_path
one_java_message = console.erasable('%sGenerating One Jar %s$TARGET%s%s' % ( \
color('cyan'), color('purple'), color('cyan'), color('end')))
one_jar_bld = SCons.Builder.Builder(action=MakeAction(generate_one_jar,
one_java_message))
top_env.Append(BUILDERS={'OneJar': one_jar_bld})
fat_java_message = console.erasable('%sCreating Fat Jar %s$TARGET%s%s' % ( \
color('green'), color('purple'), color('green'), color('end')))
fat_jar_bld = SCons.Builder.Builder(action=MakeAction(generate_fat_jar,
fat_java_message))
top_env.Append(BUILDERS={'FatJar': fat_jar_bld})
java_binary_message = console.erasable('%sGenerating Java Binary %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
java_binary_bld = SCons.Builder.Builder(action=MakeAction(
generate_java_binary, java_binary_message))
top_env.Append(BUILDERS={"JavaBinary": java_binary_bld})
java_test_message = console.erasable('%sGenerating Java Test %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
java_test_bld = SCons.Builder.Builder(action=MakeAction(
generate_java_test, java_test_message))
top_env.Append(BUILDERS={"JavaTest": java_test_bld})
def setup_scala_builders(top_env, scala_home):
if scala_home:
top_env.Replace(SCALAC=os.path.join(scala_home, 'bin/scalac'))
top_env.Replace(SCALA=os.path.join(scala_home, 'bin/scala'))
scala_jar_bld = SCons.Builder.Builder(action=MakeAction(
generate_scala_jar, '$JARCOMSTR'))
top_env.Append(BUILDERS={"ScalaJar": scala_jar_bld})
scala_test_message = console.erasable('%sGenerating Scala Test %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
scala_test_bld = SCons.Builder.Builder(action=MakeAction(
generate_scala_test, scala_test_message))
top_env.Append(BUILDERS={"ScalaTest": scala_test_bld})
def setup_go_builders(top_env, go_cmd, go_home):
if go_cmd:
top_env.Replace(GOCMD=go_cmd)
if go_home:
top_env.Replace(GOHOME=go_home)
go_library_message = console.erasable('%sGenerating Go Package %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
go_library_builder = SCons.Builder.Builder(action=MakeAction(
generate_go_library, go_library_message))
top_env.Append(BUILDERS={"GoLibrary": go_library_builder})
go_binary_message = console.erasable('%sGenerating Go Executable %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
go_binary_builder = SCons.Builder.Builder(action=MakeAction(
generate_go_binary, go_binary_message))
top_env.Append(BUILDERS={"GoBinary": go_binary_builder})
go_test_message = console.erasable('%sGenerating Go Test %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
go_test_builder = SCons.Builder.Builder(action=MakeAction(
generate_go_test, go_test_message))
top_env.Append(BUILDERS={"GoTest": go_test_builder})
def setup_lex_yacc_builders(top_env):
top_env.Replace(LEXCOM="$LEX $LEXFLAGS -o $TARGET $SOURCES")
def setup_resource_builders(top_env):
compile_resource_index_message = console.erasable(
'%sGenerating resource index for %s$SOURCE_PATH/$TARGET_NAME%s%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_resource_message = console.erasable('%sCompiling %s$SOURCE%s as resource file%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
resource_index_bld = SCons.Builder.Builder(action=MakeAction(generate_resource_index,
compile_resource_index_message))
resource_file_bld = SCons.Builder.Builder(action=MakeAction(generate_resource_file,
compile_resource_message))
top_env.Append(BUILDERS={"ResourceIndex": resource_index_bld})
top_env.Append(BUILDERS={"ResourceFile": resource_file_bld})
def setup_python_builders(top_env):
compile_python_library_message = console.erasable('%sGenerating Python Library %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
compile_python_binary_message = console.erasable('%sGenerating Python Binary %s$TARGET%s%s' % \
(color('green'), color('purple'), color('green'), color('end')))
python_library_bld = SCons.Builder.Builder(action=MakeAction(generate_python_library,
compile_python_library_message))
python_binary_bld = SCons.Builder.Builder(action=MakeAction(generate_python_binary,
compile_python_binary_message))
top_env.Append(BUILDERS={"PythonLibrary": python_library_bld})
top_env.Append(BUILDERS={"PythonBinary": python_binary_bld})
def setup_package_builders(top_env):
source_message = console.erasable('%sProcess Package Source %s$SOURCES%s%s' % (
color('cyan'), color('purple'), color('cyan'), color('end')))
source_bld = SCons.Builder.Builder(
action=MakeAction(process_package_source, source_message))
top_env.Append(BUILDERS={"PackageSource": source_bld})
package_message = console.erasable('%sCreating Package %s$TARGET%s%s' % (
color('green'), color('purple'), color('green'), color('end')))
package_bld = SCons.Builder.Builder(
action=MakeAction(generate_package, package_message))
top_env.Append(BUILDERS={"Package": package_bld})
def setup_shell_builders(top_env):
shell_test_data_message = console.erasable('%sGenerating Shell Test Data %s$TARGET%s%s' %
(color('cyan'), color('purple'), color('cyan'), color('end')))
shell_test_data_bld = SCons.Builder.Builder(action=MakeAction(
generate_shell_test_data, shell_test_data_message))
top_env.Append(BUILDERS={"ShellTestData": shell_test_data_bld})
shell_test_message = console.erasable('%sGenerating Shell Test %s$TARGET%s%s' %
(color('green'), color('purple'), color('green'), color('end')))
shell_test_bld = SCons.Builder.Builder(action=MakeAction(
generate_shell_test, shell_test_message))
top_env.Append(BUILDERS={"ShellTest": shell_test_bld})
def setup_other_builders(top_env):
setup_lex_yacc_builders(top_env)
setup_resource_builders(top_env)
setup_python_builders(top_env)
setup_package_builders(top_env)
setup_shell_builders(top_env)
def setup_swig_builders(top_env, build_dir):
compile_swig_python_message = console.erasable('%sCompiling %s$SOURCE%s to python source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_swig_java_message = console.erasable('%sCompiling %s$SOURCE%s to java source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
compile_swig_php_message = console.erasable('%sCompiling %s$SOURCE%s to php source%s' % \
(color('cyan'), color('purple'), color('cyan'), color('end')))
# Python
swig_py_bld = SCons.Builder.Builder(action=MakeAction(
'swig -python -threads $SWIGPYTHONFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_python_message))
top_env.Append(BUILDERS={"SwigPython": swig_py_bld})
# Java
swig_java_bld = SCons.Builder.Builder(action=MakeAction(
'swig -java $SWIGJAVAFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_java_message))
top_env.Append(BUILDERS={'SwigJava': swig_java_bld})
swig_php_bld = SCons.Builder.Builder(action=MakeAction(
'swig -php $SWIGPHPFLAGS -c++ -I%s -o $TARGET $SOURCE' % (build_dir),
compile_swig_php_message))
top_env.Append(BUILDERS={"SwigPhp": swig_php_bld})
def _exec_get_version_info(cmd, cwd):
lc_all_env = os.environ
lc_all_env['LC_ALL'] = 'POSIX'
p = subprocess.Popen(cmd,
env=lc_all_env,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
if p.returncode:
return None
else:
return stdout.replace('"', '\\"').replace('\n', '\\n"\n"')
def _get_version_info(blade_root_dir, svn_roots):
"""Gets svn root dir info. """
svn_info_map = {}
if os.path.exists("%s/.git" % blade_root_dir):
cmd = "git log -n 1"
dirname = os.path.dirname(blade_root_dir)
version_info = _exec_get_version_info(cmd, None)
if version_info:
svn_info_map[dirname] = version_info
return svn_info_map
for root_dir in svn_roots:
root_dir_realpath = os.path.realpath(root_dir)
svn_working_dir = os.path.dirname(root_dir_realpath)
svn_dir = os.path.basename(root_dir_realpath)
cmd = 'svn info %s' % svn_dir
version_info = _exec_get_version_info(cmd, svn_working_dir)
if not version_info:
cmd = 'git ls-remote --get-url && git branch | grep "*" && git log -n 1'
version_info = _exec_get_version_info(cmd, root_dir_realpath)
if not version_info:
console.warning('Failed to get version control info in %s' % root_dir)
if version_info:
svn_info_map[root_dir] = version_info
return svn_info_map
def generate_version_file(top_env, blade_root_dir, build_dir,
profile, gcc_version, svn_roots):
"""Generate version information files. """
svn_info_map = _get_version_info(blade_root_dir, svn_roots)
svn_info_len = len(svn_info_map)
if not os.path.exists(build_dir):
os.makedirs(build_dir)
filename = os.path.join(build_dir, 'version.c')
with open(filename, 'w') as version_c:
version_c.write('/* This file was generated by blade */\n')
version_c.write(('''
extern const int kSvnInfoCount;
extern const char* const kSvnInfo[];
extern const char kBuildType[];
extern const char kBuildTime[];
extern const char kBuilderName[];
extern const char kHostName[];
extern const char kCompiler[];
'''))
version_c.write('const int kSvnInfoCount = %d;\n' % svn_info_len)
version_c.write('const char* const kSvnInfo[%d] = {%s};\n' % (
svn_info_len, ', '.join(['"%s"' % v for v in svn_info_map.values()])))
version_c.write('const char kBuildType[] = "%s";\n' % profile)
version_c.write('const char kBuildTime[] = "%s";\n' % time.asctime())
version_c.write('const char kBuilderName[] = "%s";\n' % os.getenv('USER'))
version_c.write((
'const char kHostName[] = "%s";\n' % socket.gethostname()))
compiler = 'GCC %s' % gcc_version
version_c.write('const char kCompiler[] = "%s";\n' % compiler)
env_version = top_env.Clone()
env_version.Replace(SHCXXCOMSTR=console.erasable(
'%sUpdating version information%s' % (
color('cyan'), color('end'))))
return env_version.SharedObject(filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.