max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
dbrepl.py | righteousgambitresearch/SSRFTest | 213 | 12678034 | #!/usr/local/bin/python -i
from metamodel import createLocalSession
from main import app
app.test_request_context('/').__enter__()
createLocalSession()
from model import *
|
app/dashboard/controllers.py | bbhunter/shadow-workers | 114 | 12678050 | import time
import os
from datetime import datetime
from app import db, ConnectedAgents, ConnectedDomAgents, auth, extraModules, AutomaticModuleExecution
from flask import jsonify, send_from_directory, Blueprint, Response, render_template, request, escape
from pywebpush import webpush, WebPushException
from database.models import Registration, Agent, Module, DomCommand, DashboardRegistration
from sqlalchemy.orm import joinedload
dashboard = Blueprint('dashboard', __name__)
AGENT_TIMEOUT = 8
@dashboard.after_request
def applySecurityHeaders(response):
#style-src 'self';
response.headers["Content-Security-Policy"] = "script-src 'self'; img-src 'self'; font-src 'self'; media-src 'self'; frame-src 'self'; frame-ancestors 'none'"
response.headers["X-Frame-Options"] = "deny"
response.headers["X-Xss-Protection"] = "1; mode=block"
response.headers["Referrer-Policy"] = "same-origin"
return response
@dashboard.before_request
def contentTypeCSRFProtection():
if request.method == 'POST':
if not request.content_type == 'application/json':
return Response("", 404)
@dashboard.route('/')
@auth.login_required
def servedashboard():
return render_template('index.html')
@dashboard.route('/sw.js')
@auth.login_required
def sw():
vapidPub = os.popen("vapid --applicationServerKey | cut -d' ' -f5").read().strip()
res = render_template('dashboard_notifications.js', vapidPub = vapidPub)
return res, {'Content-Type': 'application/javascript'}
@dashboard.route('/modules')
@auth.login_required
def getModules():
return jsonify({'modules': extraModules['modules'], 'autoLoadedModules': AutomaticModuleExecution})
@dashboard.route('/agents')
@auth.login_required
def getAgents():
activeAgents()
return jsonify({'active': ConnectedAgents, 'dormant': dormantAgents()})
@dashboard.route('/agent/<agentID>', methods=['GET'])
@auth.login_required
def getAgent(agentID):
if agentID != None:
agent = db.session().query(Agent).filter(Agent.id == agentID).first()
if agent is not None:
result = Agent.to_json(agent)
registration = db.session.query(Registration).filter(Registration.agentId == agent.id).order_by(Registration.id.desc()).first()
result['push'] = str(registration is not None).lower()
result['active'] = 'true' if agent.id in ConnectedAgents else 'false'
result['domActive'] = 'true' if agent.id in ConnectedDomAgents else 'false'
result['user_agent'] = escape(agent.user_agent)
modules = db.session().query(Module).filter(Module.agentId == agentID, Module.processed == 1).all()
if len(modules) != 0:
result['modules'] = {}
for module in modules:
result['modules'][module.name] = escape(module.results)
dom_commands = db.session().query(DomCommand).filter(DomCommand.agentId == agentID, DomCommand.processed == 1).order_by(DomCommand.id.desc()).limit(3).all()
if len(dom_commands) != 0:
result['dom_commands'] = {}
for dom_command in dom_commands:
result['dom_commands'][escape(dom_command.command)] = escape(dom_command.result)
return jsonify(result)
return Response("", 404)
@dashboard.route('/automodule/<moduleName>', methods=['POST'])
@auth.login_required
def autoLoadModule(moduleName):
checkModule(moduleName)
if moduleName in AutomaticModuleExecution:
return Response("", 404)
AutomaticModuleExecution.append(moduleName)
return ""
@dashboard.route('/automodule/<moduleName>', methods=['DELETE'])
@auth.login_required
def deleteAutoLoadModule(moduleName):
checkModule(moduleName)
if moduleName not in AutomaticModuleExecution:
return Response("", 404)
AutomaticModuleExecution.remove(moduleName)
return ""
@dashboard.route('/agent/<agentID>', methods=['DELETE'])
@auth.login_required
def deleteAgent(agentID):
if agentID is None:
return Response("", 404)
agent = db.session().query(Agent).filter(Agent.id == agentID).first()
if agent is None:
return Response("", 404)
db.session().delete(agent)
db.session().commit()
return ""
@dashboard.route('/module/<moduleName>/<agentID>', methods=['POST'])
@auth.login_required
def createModule(moduleName, agentID):
module = loadAgentModule(moduleName, agentID)
if module is not None: # already loaded
return Response("", 404)
module = Module(None, agentID, moduleName, '', 0, datetime.now())
db.session().add(module)
db.session().commit()
return ""
@dashboard.route('/module/<moduleName>/<agentID>', methods=['DELETE'])
@auth.login_required
def removeModule(moduleName, agentID):
module = loadAgentModule(moduleName, agentID)
if module is not None:
db.session().delete(module)
db.session().commit()
return ""
return Response("", 404)
# Send command to be executed to Dashboard
@dashboard.route('/dom/<agentID>', methods=['POST'])
@auth.login_required
def sendDomJS(agentID):
body = request.get_json(silent = True)
if body and body['js']:
dom_command = DomCommand(None, agentID, body['js'], None, 0, datetime.now())
db.session().add(dom_command)
db.session().commit()
longpoll_counter=0
while True:
time.sleep(0.5)
longpoll_counter+=1
if(longpoll_counter>8): # wait up to 4 seconds for response
return Response("", 404)
dom_results = db.session().query(DomCommand).filter(DomCommand.agentId == agentID, DomCommand.processed == 1,DomCommand.id==dom_command.id).order_by(DomCommand.id.desc()).limit(3).all()
if len(dom_results) != 0:
result={}
for cmd_result in dom_results:
result['cmd']=cmd_result.command
result['result']=cmd_result.result
return jsonify(result)
else:
continue
return Response("", 404)
# API to get the results of any command. Not used at the moment
@dashboard.route('/dom/result/<agentID>/<cmdID>', methods=['GET'])
@auth.login_required
def sendDomCmdResult(agentID,cmdID):
dom_commands = db.session().query(DomCommand).filter(DomCommand.agentId == agentID, DomCommand.processed == 1,DomCommand.id==cmdID).order_by(DomCommand.id.desc()).limit(3).all()
if len(dom_commands) != 0:
result={}
for dom_command in dom_commands:
result['cmd']=dom_command.command
result['result']=dom_command.result
return jsonify(result)
return Response("", 404)
@dashboard.route('/push/<agentId>', methods=['POST'])
@auth.login_required
def push(agentId):
registration = db.session.query(Registration).filter(Registration.agentId == agentId).order_by(Registration.id.desc()).first()
if registration is None:
return Response("", 404)
else:
try:
webpush(
subscription_info={
"endpoint": registration.endpoint,
"keys": {
"p256dh": registration.authKey,
"auth": registration.authSecret
}
},
data="",
vapid_private_key="./private_key.pem",
vapid_claims={
"sub": "mailto:<EMAIL>",
}
)
except WebPushException as ex:
print(ex)
return Response("", 404)
return ""
@dashboard.route('/registration', methods = ['POST'])
@auth.login_required
def registration():
body = request.get_json(silent = True)
if body and body['endpoint'] and body['key'] and body['authSecret']:
dashboard_registration = DashboardRegistration(None, body['endpoint'], body['key'], body['authSecret'])
db.session.add(dashboard_registration)
db.session.commit()
return ""
return Response("", 404)
def activeAgents():
now = time.time()
agentsToRemove = {}
# remove DOM agents that timed out
for agentID in ConnectedDomAgents:
if (now - ConnectedDomAgents[agentID]['last_seen']) > AGENT_TIMEOUT:
agentsToRemove[agentID] = ConnectedDomAgents[agentID]
for agentID in agentsToRemove:
del ConnectedDomAgents[agentID]
agentsToRemove = {}
# remove SW agents that timed out
for agentID in ConnectedAgents:
if (now - ConnectedAgents[agentID]['last_seen']) > AGENT_TIMEOUT:
agentsToRemove[agentID] = ConnectedAgents[agentID]
ConnectedAgents[agentID]['domActive'] = 'true' if agentID in ConnectedDomAgents else 'false'
for agentID in agentsToRemove:
del ConnectedAgents[agentID]
def dormantAgents():
agents = db.session().query(Agent).options(joinedload('registration')).filter(Agent.id.notin_(ConnectedAgents.keys())).all()
results = {}
for agent in agents:
results[agent.id] = Agent.to_json(agent)
results[agent.id]['push'] = str(agent.registration is not None).lower()
results[agent.id]['active'] = 'false'
results[agent.id]['domActive'] = 'true' if agent.id in ConnectedDomAgents else 'false'
return results
def loadAgentModule(moduleName, agentID):
checkModule(moduleName)
return db.session.query(Module).filter(Module.agentId == agentID, Module.name == moduleName).order_by(Module.id.desc()).first()
def checkModule(moduleName):
if moduleName not in extraModules['modules']:
return Response("", 404)
|
wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/overlapping_fields_can_be_merged.py | borisgrafx/client | 3,968 | 12678076 | import itertools
from collections import OrderedDict
from ...error import GraphQLError
from ...language import ast
from ...language.printer import print_ast
from ...pyutils.pair_set import PairSet
from ...type.definition import (GraphQLInterfaceType, GraphQLList,
GraphQLNonNull, GraphQLObjectType,
get_named_type, is_leaf_type)
from ...utils.type_comparators import is_equal_type
from ...utils.type_from_ast import type_from_ast
from .base import ValidationRule
class OverlappingFieldsCanBeMerged(ValidationRule):
__slots__ = ('_compared_fragments', '_cached_fields_and_fragment_names', )
def __init__(self, context):
super(OverlappingFieldsCanBeMerged, self).__init__(context)
# A memoization for when two fragments are compared "between" each other for
# conflicts. Two fragments may be compared many times, so memoizing this can
# dramatically improve the performance of this validator.
self._compared_fragments = PairSet()
# A cache for the "field map" and list of fragment names found in any given
# selection set. Selection sets may be asked for this information multiple
# times, so this improves the performance of this validator.
self._cached_fields_and_fragment_names = {}
def leave_SelectionSet(self, node, key, parent, path, ancestors):
# Note: we validate on the reverse traversal so deeper conflicts will be
# caught first, for correct calculation of mutual exclusivity and for
# clearer error messages.
# field_map = _collect_field_asts_and_defs(
# self.context,
# self.context.get_parent_type(),
# node
# )
# conflicts = _find_conflicts(self.context, False, field_map, self.compared_set)
conflicts = _find_conflicts_within_selection_set(self.context, self._cached_fields_and_fragment_names,
self._compared_fragments, self.context.get_parent_type(),
node)
for (reason_name, reason), fields1, fields2 in conflicts:
self.context.report_error(GraphQLError(
self.fields_conflict_message(reason_name, reason),
list(fields1) + list(fields2)
))
@staticmethod
def same_type(type1, type2):
return is_equal_type(type1, type2)
# return type1.is_same_type(type2)
@classmethod
def fields_conflict_message(cls, reason_name, reason):
return (
'Fields "{}" conflict because {}. '
'Use different aliases on the fields to fetch both if this was '
'intentional.'
).format(reason_name, cls.reason_message(reason))
@classmethod
def reason_message(cls, reason):
if isinstance(reason, list):
return ' and '.join('subfields "{}" conflict because {}'.format(reason_name, cls.reason_message(sub_reason))
for reason_name, sub_reason in reason)
return reason
# Algorithm:
#
# Conflicts occur when two fields exist in a query which will produce the same
# response name, but represent differing values, thus creating a conflict.
# The algorithm below finds all conflicts via making a series of comparisons
# between fields. In order to compare as few fields as possible, this makes
# a series of comparisons "within" sets of fields and "between" sets of fields.
#
# Given any selection set, a collection produces both a set of fields by
# also including all inline fragments, as well as a list of fragments
# referenced by fragment spreads.
#
# A) Each selection set represented in the document first compares "within" its
# collected set of fields, finding any conflicts between every pair of
# overlapping fields.
# Note: This is the only time that a the fields "within" a set are compared
# to each other. After this only fields "between" sets are compared.
#
# B) Also, if any fragment is referenced in a selection set, then a
# comparison is made "between" the original set of fields and the
# referenced fragment.
#
# C) Also, if multiple fragments are referenced, then comparisons
# are made "between" each referenced fragment.
#
# D) When comparing "between" a set of fields and a referenced fragment, first
# a comparison is made between each field in the original set of fields and
# each field in the the referenced set of fields.
#
# E) Also, if any fragment is referenced in the referenced selection set,
# then a comparison is made "between" the original set of fields and the
# referenced fragment (recursively referring to step D).
#
# F) When comparing "between" two fragments, first a comparison is made between
# each field in the first referenced set of fields and each field in the the
# second referenced set of fields.
#
# G) Also, any fragments referenced by the first must be compared to the
# second, and any fragments referenced by the second must be compared to the
# first (recursively referring to step F).
#
# H) When comparing two fields, if both have selection sets, then a comparison
# is made "between" both selection sets, first comparing the set of fields in
# the first selection set with the set of fields in the second.
#
# I) Also, if any fragment is referenced in either selection set, then a
# comparison is made "between" the other set of fields and the
# referenced fragment.
#
# J) Also, if two fragments are referenced in both selection sets, then a
# comparison is made "between" the two fragments.
def _find_conflicts_within_selection_set(context, cached_fields_and_fragment_names, compared_fragments, parent_type,
selection_set):
"""Find all conflicts found "within" a selection set, including those found via spreading in fragments.
Called when visiting each SelectionSet in the GraphQL Document.
"""
conflicts = []
field_map, fragment_names = _get_fields_and_fragments_names(context, cached_fields_and_fragment_names, parent_type,
selection_set)
# (A) Find all conflicts "within" the fields of this selection set.
# Note: this is the *only place* `collect_conflicts_within` is called.
_collect_conflicts_within(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
field_map
)
# (B) Then collect conflicts between these fields and those represented by
# each spread fragment name found.
for i, fragment_name in enumerate(fragment_names):
_collect_conflicts_between_fields_and_fragment(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
field_map,
fragment_name,
)
# (C) Then compare this fragment with all other fragments found in this
# selection set to collect conflicts within fragments spread together.
# This compares each item in the list of fragment names to every other item
# in that same list (except for itself).
for other_fragment_name in fragment_names[i+1:]:
_collect_conflicts_between_fragments(
context,
conflicts,
cached_fields_and_fragment_names,
compared_fragments,
False,
fragment_name,
other_fragment_name,
)
return conflicts
def _collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map,
fragment_name):
fragment = context.get_fragment(fragment_name)
if not fragment:
return None
field_map2, fragment_names2 = _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names,
fragment)
# (D) First collect any conflicts between the provided collection of fields
# and the collection of fields represented by the given fragment.
_collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, field_map, field_map2)
# (E) Then collect any conflicts between the provided collection of fields
# and any fragment names found in the given fragment.
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map,
fragment_name2)
# Collect all conflicts found between two fragments, including via spreading in
# any nested fragments
def _collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, fragment_name1, fragment_name2):
fragment1 = context.get_fragment(fragment_name1)
fragment2 = context.get_fragment(fragment_name2)
if not fragment1 or not fragment2:
return None
# No need to compare a fragment to itself.
if fragment1 == fragment2:
return None
# Memoize so two fragments are not compared for conflicts more than once.
if compared_fragments.has(fragment_name1, fragment_name2, are_mutually_exclusive):
return None
compared_fragments.add(fragment_name1, fragment_name2, are_mutually_exclusive)
field_map1, fragment_names1 = _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names,
fragment1)
field_map2, fragment_names2 = _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names,
fragment2)
# (F) First, collect all conflicts between these two collections of fields
# (not including any nested fragments)
_collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, field_map1, field_map2)
# (G) Then collect conflicts between the first fragment and any nested
# fragments spread in the second fragment.
for _fragment_name2 in fragment_names2:
_collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, fragment_name1, _fragment_name2)
# (G) Then collect conflicts between the second fragment and any nested
# fragments spread in the first fragment.
for _fragment_name1 in fragment_names1:
_collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, _fragment_name1, fragment_name2)
def _find_conflicts_between_sub_selection_sets(context, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, parent_type1, selection_set1,
parent_type2, selection_set2):
"""Find all conflicts found between two selection sets.
Includes those found via spreading in fragments. Called when determining if conflicts exist
between the sub-fields of two overlapping fields.
"""
conflicts = []
field_map1, fragment_names1 = _get_fields_and_fragments_names(context, cached_fields_and_fragment_names,
parent_type1, selection_set1)
field_map2, fragment_names2 = _get_fields_and_fragments_names(context, cached_fields_and_fragment_names,
parent_type2, selection_set2)
# (H) First, collect all conflicts between these two collections of field.
_collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
are_mutually_exclusive, field_map1, field_map2)
# (I) Then collect conflicts between the first collection of fields and
# those referenced by each fragment name associated with the second.
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map1,
fragment_name2)
# (I) Then collect conflicts between the second collection of fields and
# those referenced by each fragment name associated with the first.
for fragment_name1 in fragment_names1:
_collect_conflicts_between_fields_and_fragment(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive, field_map2,
fragment_name1)
# (J) Also collect conflicts between any fragment names by the first and
# fragment names by the second. This compares each item in the first set of
# names to each item in the second set of names.
for fragment_name1 in fragment_names1:
for fragment_name2 in fragment_names2:
_collect_conflicts_between_fragments(context, conflicts, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive,
fragment_name1, fragment_name2)
return conflicts
def _collect_conflicts_within(context, conflicts, cached_fields_and_fragment_names, compared_fragments, field_map):
"""Collect all Conflicts "within" one collection of fields."""
# field map is a keyed collection, where each key represents a response
# name and the value at that key is a list of all fields which provide that
# response name. For every response name, if there are multiple fields, they
# must be compared to find a potential conflict.
for response_name, fields in list(field_map.items()):
# This compares every field in the list to every other field in this list
# (except to itself). If the list only has one item, nothing needs to
# be compared.
for i, field in enumerate(fields):
for other_field in fields[i+1:]:
# within one collection is never mutually exclusive
conflict = _find_conflict(context, cached_fields_and_fragment_names, compared_fragments, False,
response_name, field, other_field)
if conflict:
conflicts.append(conflict)
def _collect_conflicts_between(context, conflicts, cached_fields_and_fragment_names, compared_fragments,
parent_fields_are_mutually_exclusive, field_map1, field_map2):
"""Collect all Conflicts between two collections of fields.
This is similar to, but different from the `collect_conflicts_within` function above. This check assumes that
`collect_conflicts_within` has already been called on each provided collection of fields.
This is true because this validator traverses each individual selection set.
"""
# A field map is a keyed collection, where each key represents a response
# name and the value at that key is a list of all fields which provide that
# response name. For any response name which appears in both provided field
# maps, each field from the first field map must be compared to every field
# in the second field map to find potential conflicts.
for response_name, fields1 in list(field_map1.items()):
fields2 = field_map2.get(response_name)
if fields2:
for field1 in fields1:
for field2 in fields2:
conflict = _find_conflict(context, cached_fields_and_fragment_names, compared_fragments,
parent_fields_are_mutually_exclusive, response_name, field1, field2)
if conflict:
conflicts.append(conflict)
def _find_conflict(context, cached_fields_and_fragment_names, compared_fragments, parent_fields_are_mutually_exclusive,
response_name, field1, field2):
"""Determines if there is a conflict between two particular fields."""
parent_type1, ast1, def1 = field1
parent_type2, ast2, def2 = field2
# If it is known that two fields could not possibly apply at the same
# time, due to the parent types, then it is safe to permit them to diverge
# in aliased field or arguments used as they will not present any ambiguity
# by differing.
# It is known that two parent types could never overlap if they are
# different Object types. Interface or Union types might overlap - if not
# in the current state of the schema, then perhaps in some future version,
# thus may not safely diverge.
are_mutually_exclusive = (
parent_fields_are_mutually_exclusive or (
parent_type1 != parent_type2 and
isinstance(parent_type1, GraphQLObjectType) and
isinstance(parent_type2, GraphQLObjectType)
)
)
# The return type for each field.
type1 = def1 and def1.type
type2 = def2 and def2.type
if not are_mutually_exclusive:
# Two aliases must refer to the same field.
name1 = ast1.name.value
name2 = ast2.name.value
if name1 != name2:
return (
(response_name, '{} and {} are different fields'.format(name1, name2)),
[ast1],
[ast2]
)
# Two field calls must have the same arguments.
if not _same_arguments(ast1.arguments, ast2.arguments):
return (
(response_name, 'they have differing arguments'),
[ast1],
[ast2]
)
if type1 and type2 and do_types_conflict(type1, type2):
return (
(response_name, 'they return conflicting types {} and {}'.format(type1, type2)),
[ast1],
[ast2]
)
# Collect and compare sub-fields. Use the same "visited fragment names" list
# for both collections so fields in a fragment reference are never
# compared to themselves.
selection_set1 = ast1.selection_set
selection_set2 = ast2.selection_set
if selection_set1 and selection_set2:
conflicts = _find_conflicts_between_sub_selection_sets(context, cached_fields_and_fragment_names,
compared_fragments, are_mutually_exclusive,
get_named_type(type1), selection_set1,
get_named_type(type2), selection_set2)
return _subfield_conflicts(conflicts, response_name, ast1, ast2)
def _get_fields_and_fragments_names(context, cached_fields_and_fragment_names, parent_type, selection_set):
cached = cached_fields_and_fragment_names.get(selection_set)
if not cached:
ast_and_defs = OrderedDict()
fragment_names = OrderedDict()
_collect_fields_and_fragment_names(context, parent_type, selection_set, ast_and_defs, fragment_names)
cached = [ast_and_defs, list(fragment_names.keys())]
cached_fields_and_fragment_names[selection_set] = cached
return cached
def _get_referenced_fields_and_fragment_names(context, cached_fields_and_fragment_names, fragment):
"""Given a reference to a fragment, return the represented collection of fields as well as a list of
nested fragment names referenced via fragment spreads."""
# Short-circuit building a type from the AST if possible.
cached = cached_fields_and_fragment_names.get(fragment.selection_set)
if cached:
return cached
fragment_type = type_from_ast(context.get_schema(), fragment.type_condition)
return _get_fields_and_fragments_names(context, cached_fields_and_fragment_names,
fragment_type, fragment.selection_set)
def _collect_fields_and_fragment_names(context, parent_type, selection_set, ast_and_defs, fragment_names):
for selection in selection_set.selections:
if isinstance(selection, ast.Field):
field_name = selection.name.value
if isinstance(parent_type, (GraphQLObjectType, GraphQLInterfaceType)):
field_def = parent_type.fields.get(field_name)
else:
field_def = None
response_name = selection.alias.value if selection.alias else field_name
if not ast_and_defs.get(response_name):
ast_and_defs[response_name] = []
ast_and_defs[response_name].append([parent_type, selection, field_def])
elif isinstance(selection, ast.FragmentSpread):
fragment_names[selection.name.value] = True
elif isinstance(selection, ast.InlineFragment):
type_condition = selection.type_condition
if type_condition:
inline_fragment_type = type_from_ast(context.get_schema(), selection.type_condition)
else:
inline_fragment_type = parent_type
_collect_fields_and_fragment_names(context, inline_fragment_type, selection.selection_set, ast_and_defs,
fragment_names)
def _subfield_conflicts(conflicts, response_name, ast1, ast2):
"""Given a series of Conflicts which occurred between two sub-fields, generate a single Conflict."""
if conflicts:
return (
(response_name, [conflict[0] for conflict in conflicts]),
tuple(itertools.chain([ast1], *[conflict[1] for conflict in conflicts])),
tuple(itertools.chain([ast2], *[conflict[2] for conflict in conflicts]))
)
def do_types_conflict(type1, type2):
if isinstance(type1, GraphQLList):
if isinstance(type2, GraphQLList):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if isinstance(type2, GraphQLList):
if isinstance(type1, GraphQLList):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if isinstance(type1, GraphQLNonNull):
if isinstance(type2, GraphQLNonNull):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if isinstance(type2, GraphQLNonNull):
if isinstance(type1, GraphQLNonNull):
return do_types_conflict(type1.of_type, type2.of_type)
return True
if is_leaf_type(type1) or is_leaf_type(type2):
return type1 != type2
return False
def _same_value(value1, value2):
return (not value1 and not value2) or print_ast(value1) == print_ast(value2)
def _same_arguments(arguments1, arguments2):
# Check to see if they are empty arguments or nones. If they are, we can
# bail out early.
if not (arguments1 or arguments2):
return True
if len(arguments1) != len(arguments2):
return False
arguments2_values_to_arg = {a.name.value: a for a in arguments2}
for argument1 in arguments1:
argument2 = arguments2_values_to_arg.get(argument1.name.value)
if not argument2:
return False
if not _same_value(argument1.value, argument2.value):
return False
return True
|
code/ReID_net/datasets/Util/Batch.py | MTonyM/PReMVOS | 140 | 12678095 | import tensorflow as tf
def create_batch_dict(batch_size, tensors_dict):
if batch_size == 1:
batch = {k: tf.expand_dims(t, axis=0) for k, t in list(tensors_dict.items())}
summary = None
else:
keys = list(tensors_dict.keys())
values = list(tensors_dict.values())
values = tf.train.batch(values, batch_size, num_threads=8, capacity=5 * batch_size)
batch = dict(list(zip(keys, values)))
summary = tf.get_collection(tf.GraphKeys.SUMMARIES)[-1]
assert "fraction_of_" in summary.name
for t in list(batch.values()):
t.set_shape([batch_size] + [None] * (t.get_shape().ndims - 1))
return batch, summary
|
common/src/utils/oopatterns.py | krisshol/bach-kmno | 248 | 12678107 | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
"""Defines common Object Oriented Patterns
One should re-use these instead of defining their owns.
"""
# ==========================
# Singleton Design Pattern
# ==========================
class SingletonMetaClass(type):
"""Metaclass for singleton design pattern.
.. warning::
This metaclass should not be used directly. To declare a class
using the singleton pattern, one should use the :class:`Singleton`
class instead.
"""
_instances = {}
def __call__(mcs, *args, **kwargs):
if mcs not in mcs._instances:
mcs._instances[mcs] = \
super(SingletonMetaClass, mcs).__call__(*args, **kwargs)
return mcs._instances[mcs]
# Metaclass compatible with python 2 and 3. Inherit from this for singletons
Singleton = SingletonMetaClass('Singleton', (object,), {})
"""Base class for singleton
This class implements the singleton design pattern. One can inherit from this
base class to make a class implement the singleton design pattern.
.. code-block:: python
# a class implementing a singleton
class aParametricSingleton(Singleton):
# do some stuff here
pass
# let us verify that it is really a singleton
print(id(aParametricSingleton())
print(id(aParametricSingleton())
"""
# =====================================
# Parametric Singleton Design Pattern
# =====================================
class ParametricSingletonMetaClass(type):
"""Metaclass for parametric singleton design pattern
.. warning::
This metaclass should not be used directly. To declare a class
using the singleton pattern, one should use the
:class:`ParametricSingleton` class instead and precise the
parameter used for the dict using a class method named
``depends_on``.
"""
_instances = {}
def __call__(mcs, *args, **kwargs):
# check for "depends_on" attribute
if "depends_on" not in kwargs and not hasattr(mcs, "depends_on"):
raise TypeError("argument or attribute 'depends_on' not defined")
# check for unbound methods
if "depends_on" in kwargs and \
(not kwargs["depends_on"] or not callable(kwargs["depends_on"])):
raise TypeError("function in parameter 'depends_on' is not bound")
elif hasattr(mcs, "depends_on") and \
(not getattr(mcs, "depends_on") or
not callable(getattr(mcs, "depends_on"))):
raise TypeError("function in attribute 'depends_on' is not bound")
# call depends_on to get the key
if "depends_on" in kwargs:
key = kwargs["depends_on"](mcs, args, kwargs)
del kwargs["depends_on"]
else:
key = getattr(mcs, "depends_on")(mcs, args, kwargs)
# check for instance
if mcs not in mcs._instances:
mcs._instances[mcs] = {}
if key not in mcs._instances[mcs]:
mcs._instances[mcs][key] = \
super(ParametricSingletonMetaClass, mcs).\
__call__(*args, **kwargs)
return mcs._instances[mcs][key]
def update_key(mcs, old_key, new_key):
mcs._instances[mcs][new_key] = mcs._instances[mcs].pop(old_key)
def remove_key(mcs, key):
if key in mcs._instances:
del mcs._instances[mcs][key]
# Metaclass compatible with python 2 and 3.
# Inherit from this for parametric singletons
ParametricSingleton = ParametricSingletonMetaClass('ParametricSingleton',
(object,), {})
"""Base class for parametric singletons
This class implements the parametric singleton design pattern. One can inherit
from this base class to make a class implement a parametric singleton pattern.
Pass either an argument ``depends_on`` in the constructor or define a class
method called ``depends_on`` that specifies how to compute the parameter value
used for the hash table storing the instances:
* example with a **static method**:
.. code-block:: python
class aParametricSingleton(ParametricSingleton):
@staticmethod
def depends_on(*args, **kwargs):
return "my key"
* example with a **``lambda`` wrapped with a static method**:
.. code-block:: python
class aParametricSingleton(ParametricSingleton):
depends_on = staticmethod(lambda *args, **kwargs: "my key")
"""
class PluginMetaClass(type):
"""Metaclass for auto-registering plugin pattern
.. warning::
This metaclass should not be used directly. To declare a class
using the plugin pattern, one should use the :class:`Plugin`
class instead.
"""
# ===================
# class constructor
# ===================
def __init__(mcs, name, bases, attrs):
# small hack to skip Plugin base class when initializing
if not len(attrs):
return
# Begin to register all classes that derives from Plugin base class
if not hasattr(mcs, '_plugins'):
# This branch only executes when processing the mount point itself.
# So, since this is a new plugin type, not an implementation, this
# class shouldn't be registered as a plugin. Instead, it sets up a
# list where plugins can be registered later.
mcs._plugins = []
else:
# This must be a plugin implementation, which should be registered.
# Simply appending it to the list is all that's needed to keep
# track of it later.
mcs._plugins.append(mcs)
# =================
# Plugin metadata
# =================
_plugin_name = None
_plugin_version = None
_plugin_description = None
_plugin_dependencies = None
# =====================
# Setters and getters
# =====================
@property
def plugin_name(mcs):
return mcs._plugin_name
@property
def plugin_version(mcs):
return mcs._plugin_version
@property
def plugin_description(mcs):
return mcs._plugin_description
@property
def plugin_dependencies(mcs):
return mcs._plugin_dependencies
@property
def plugins(mcs):
return mcs._plugins
# =================
# Utility methods
# =================
def get_plugins(mcs, *args, **kwargs):
"""return instances of plugins"""
return [plugin(*args, **kwargs) for plugin in mcs._plugins]
def get_plugin(mcs, name, *args, **kwargs):
"""return instance of a named plugin"""
plugin = [x for x in mcs._plugins if x.plugin_name == name]
return plugin[0] if plugin else None
# Metaclass compatible with python 2 and 3. Inherit from this for Plugins
Plugin = PluginMetaClass('Plugin', (object,), {})
|
tests/query_test/test_queries.py | survivorli/Impala | 1,523 | 12678127 | <gh_stars>1000+
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# General Impala query tests
import pytest
import re
from copy import deepcopy
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.common.skip import SkipIfEC
from tests.common.test_dimensions import (
create_uncompressed_text_dimension, extend_exec_option_dimension,
create_beeswax_hs2_dimension, hs2_parquet_constraint)
from tests.common.test_vector import ImpalaTestVector
class TestQueries(ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestQueries, cls).add_test_dimensions()
if cls.exploration_strategy() == 'core':
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet')
# Run these queries through both beeswax and HS2 to get coverage of both protocols.
# Don't run all combinations of table format and protocol - the dimensions should
# be orthogonal.
cls.ImpalaTestMatrix.add_dimension(create_beeswax_hs2_dimension())
cls.ImpalaTestMatrix.add_constraint(hs2_parquet_constraint)
# Adding a test dimension here to test the small query opt in exhaustive.
if cls.exploration_strategy() == 'exhaustive':
extend_exec_option_dimension(cls, "exec_single_node_rows_threshold", "100")
@classmethod
def get_workload(cls):
return 'functional-query'
def test_analytic_fns(self, vector):
# TODO: Enable some of these tests for Avro if possible
# Don't attempt to evaluate timestamp expressions with Avro tables which don't
# support a timestamp type
table_format = vector.get_value('table_format')
if table_format.file_format == 'avro':
pytest.xfail("%s doesn't support TIMESTAMP" % (table_format.file_format))
if table_format.file_format == 'hbase':
pytest.xfail("A lot of queries check for NULLs, which hbase does not recognize")
self.run_test_case('QueryTest/analytic-fns', vector)
def test_limit(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail("IMPALA-283 - select count(*) produces inconsistent results")
if vector.get_value('table_format').file_format == 'kudu':
pytest.xfail("Limit queries without order by clauses are non-deterministic")
self.run_test_case('QueryTest/limit', vector)
def test_top_n(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")
# QueryTest/top-n is also run in test_sort with disable_outermost_topn = 1
self.run_test_case('QueryTest/top-n', vector)
def test_union(self, vector):
self.run_test_case('QueryTest/union', vector)
# IMPALA-3586: The passthrough and materialized children are interleaved. The batch
# size is small to test the transition between materialized and passthrough children.
query_string = ("select count(c) from ( "
"select bigint_col + 1 as c from functional.alltypes limit 15 "
"union all "
"select bigint_col as c from functional.alltypes limit 15 "
"union all "
"select bigint_col + 1 as c from functional.alltypes limit 15 "
"union all "
"(select bigint_col as c from functional.alltypes limit 15)) t")
vector.get_value('exec_option')['batch_size'] = 10
result = self.execute_query(query_string, vector.get_value('exec_option'))
assert result.data[0] == '60'
def test_sort(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail(reason="IMPALA-283 - select count(*) produces inconsistent results")
vector.get_value('exec_option')['disable_outermost_topn'] = 1
self.run_test_case('QueryTest/sort', vector)
# We can get the sort tests for free from the top-n file
self.run_test_case('QueryTest/top-n', vector)
def test_inline_view(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail("jointbl does not have columns with unique values, "
"hbase collapses them")
self.run_test_case('QueryTest/inline-view', vector)
def test_inline_view_limit(self, vector):
self.run_test_case('QueryTest/inline-view-limit', vector)
def test_subquery(self, vector):
self.run_test_case('QueryTest/subquery', vector)
def test_subquery_single_node(self, vector):
new_vector = deepcopy(vector)
new_vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/subquery-single-node', new_vector)
def test_alias(self, vector):
self.run_test_case('QueryTest/alias', vector)
def test_subquery_in_constant_lhs(self, vector):
self.run_test_case('QueryTest/subquery-in-constant-lhs', vector)
def test_empty(self, vector):
self.run_test_case('QueryTest/empty', vector)
def test_views(self, vector):
if vector.get_value('table_format').file_format == "hbase":
pytest.xfail("TODO: Enable views tests for hbase")
self.run_test_case('QueryTest/views', vector)
def test_with_clause(self, vector):
if vector.get_value('table_format').file_format == "hbase":
pytest.xfail("TODO: Enable with clause tests for hbase")
self.run_test_case('QueryTest/with-clause', vector)
def test_misc(self, vector):
table_format = vector.get_value('table_format')
if table_format.file_format in ['hbase', 'rc', 'parquet', 'kudu']:
msg = ("Failing on rc/snap/block despite resolution of IMP-624,IMP-503. "
"Failing on kudu and parquet because tables do not exist")
pytest.xfail(msg)
self.run_test_case('QueryTest/misc', vector)
def test_null_data(self, vector):
if vector.get_value('table_format').file_format == 'hbase':
pytest.xfail("null data does not appear to work in hbase")
self.run_test_case('QueryTest/null_data', vector)
# Tests in this class are only run against text/none either because that's the only
# format that is supported, or the tests don't exercise the file format.
class TestQueriesTextTables(ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestQueriesTextTables, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
@classmethod
def get_workload(cls):
return 'functional-query'
def test_overflow(self, vector):
self.run_test_case('QueryTest/overflow', vector)
def test_strict_mode(self, vector):
vector.get_value('exec_option')['strict_mode'] = 1
vector.get_value('exec_option')['abort_on_error'] = 0
self.run_test_case('QueryTest/strict-mode', vector)
vector.get_value('exec_option')['abort_on_error'] = 1
self.run_test_case('QueryTest/strict-mode-abort', vector)
def test_data_source_tables(self, vector):
self.run_test_case('QueryTest/data-source-tables', vector)
def test_distinct_estimate(self, vector):
# These results will vary slightly depending on how the values get split up
# so only run with 1 node and on text.
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/distinct-estimate', vector)
@SkipIfEC.oom
def test_random(self, vector):
# These results will vary slightly depending on how the values get split up
# so only run with 1 node and on text.
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/random', vector)
@SkipIfEC.oom
def test_values(self, vector):
self.run_test_case('QueryTest/values', vector)
# Tests in this class are only run against Parquet because the tests don't exercise the
# file format.
class TestQueriesParquetTables(ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestQueriesParquetTables, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'parquet')
@classmethod
def get_workload(cls):
return 'functional-query'
@SkipIfEC.oom
@pytest.mark.execute_serially
def test_very_large_strings(self, vector):
"""Regression test for IMPALA-1619. Doesn't need to be run on all file formats.
Executes serially to avoid large random spikes in mem usage."""
self.run_test_case('QueryTest/large_strings', vector)
def test_single_node_large_sorts(self, vector):
if self.exploration_strategy() != 'exhaustive':
pytest.skip("only run large sorts on exhaustive")
vector.get_value('exec_option')['disable_outermost_topn'] = 1
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/single-node-large-sorts', vector)
# Tests for queries in HDFS-specific tables, e.g. AllTypesAggMultiFilesNoPart.
class TestHdfsQueries(ImpalaTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestHdfsQueries, cls).add_test_dimensions()
# Kudu doesn't support AllTypesAggMultiFilesNoPart (KUDU-1271, KUDU-1570).
cls.ImpalaTestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format != 'kudu')
# Adding a test dimension here to test the small query opt in exhaustive.
if cls.exploration_strategy() == 'exhaustive':
extend_exec_option_dimension(cls, "exec_single_node_rows_threshold", "100")
@classmethod
def get_workload(cls):
return 'functional-query'
@SkipIfEC.oom
def test_hdfs_scan_node(self, vector):
self.run_test_case('QueryTest/hdfs-scan-node', vector)
def test_file_partitions(self, vector):
self.run_test_case('QueryTest/hdfs-partitions', vector)
class TestTopNReclaimQuery(ImpalaTestSuite):
"""Test class to validate that TopN periodically reclaims tuple pool memory
and runs with a lower memory footprint."""
QUERY = "select * from tpch.lineitem order by l_orderkey desc limit 10;"
# Mem limit empirically selected so that the query fails if tuple pool reclamation
# is not implemented for TopN
MEM_LIMIT = "60m"
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestTopNReclaimQuery, cls).add_test_dimensions()
# The tpch tests take a long time to execute so restrict the combinations they
# execute over.
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def test_top_n_reclaim(self, vector):
exec_options = vector.get_value('exec_option')
exec_options['mem_limit'] = self.MEM_LIMIT
exec_options['num_scanner_threads'] = 1
result = self.execute_query(self.QUERY, exec_options)
runtime_profile = str(result.runtime_profile)
num_of_times_tuple_pool_reclaimed = re.findall(
'TuplePoolReclamations: ([0-9]*)', runtime_profile)
# Confirm newly added counter is visible
assert len(num_of_times_tuple_pool_reclaimed) > 0
# Tuple pool is expected to be reclaimed for this query
for n in num_of_times_tuple_pool_reclaimed:
assert int(n) > 0
|
third_party/blink/renderer/bindings/scripts/v8_types.py | iridium-browser/iridium-browser | 575 | 12678132 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=relative-import
"""Functions for type handling and type conversion (Blink/C++ <-> V8/JS).
Extends IdlType and IdlUnionType with V8-specific properties, methods, and
class methods.
Spec:
http://www.w3.org/TR/WebIDL/#es-type-mapping
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import posixpath
from idl_types import IdlAnnotatedType
from idl_types import IdlArrayOrSequenceType
from idl_types import IdlNullableType
from idl_types import IdlRecordType
from idl_types import IdlType
from idl_types import IdlTypeBase
from idl_types import IdlUnionType
from utilities import to_snake_case
import v8_attributes # for IdlType.constructor_type_name
from v8_globals import includes
from v8_utilities import binding_header_filename, extended_attribute_value_contains
################################################################################
# V8-specific handling of IDL types
################################################################################
NON_WRAPPER_TYPES = frozenset([
'EventHandler',
'NodeFilter',
'OnBeforeUnloadEventHandler',
'OnErrorEventHandler',
])
TYPED_ARRAY_TYPES = frozenset([
'Float32Array',
'Float64Array',
'Int8Array',
'Int16Array',
'Int32Array',
'Uint8Array',
'Uint8ClampedArray',
'Uint16Array',
'Uint32Array',
'BigInt64Array',
'BigUint64Array',
])
ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES = TYPED_ARRAY_TYPES.union(
frozenset(['ArrayBufferView']))
ARRAY_BUFFER_AND_VIEW_TYPES = TYPED_ARRAY_TYPES.union(
frozenset([
'ArrayBuffer',
'ArrayBufferView',
'DataView',
'SharedArrayBuffer',
]))
# We have an unfortunate hack that treats types whose name ends with
# 'Constructor' as aliases to IDL interface object. This list is used to disable
# the hack.
_CALLBACK_CONSTRUCTORS = frozenset((
'AnimatorConstructor',
'BlinkAudioWorkletProcessorConstructor',
'CustomElementConstructor',
'NoArgumentConstructor',
))
IdlType.is_array_buffer_or_view = property(
lambda self: self.base_type in ARRAY_BUFFER_AND_VIEW_TYPES)
IdlType.is_array_buffer_view_or_typed_array = property(
lambda self: self.base_type in ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES)
IdlType.is_typed_array = property(
lambda self: self.base_type in TYPED_ARRAY_TYPES)
IdlType.is_wrapper_type = property(
lambda self: (self.is_interface_type and not self.is_callback_interface and self.base_type not in NON_WRAPPER_TYPES)
)
################################################################################
# C++ types
################################################################################
CPP_TYPE_SAME_AS_IDL_TYPE = set([
'double',
'float',
])
CPP_INTEGER_CONVERSION_RULES = {
'byte': 'int8_t',
'octet': 'uint8_t',
'short': 'int16_t',
'unsigned short': 'uint16_t',
'long': 'int32_t',
'unsigned long': 'uint32_t',
'long long': 'int64_t',
'unsigned long long': 'uint64_t',
}
CPP_SPECIAL_CONVERSION_RULES = {
'EventHandler': 'EventListener*',
'OnBeforeUnloadEventHandler': 'EventListener*',
'OnErrorEventHandler': 'EventListener*',
'Promise': 'ScriptPromise',
'ScriptValue': 'ScriptValue',
# FIXME: Eliminate custom bindings for XPathNSResolver http://crbug.com/345529
'XPathNSResolver': 'XPathNSResolver*',
'boolean': 'bool',
'object': 'ScriptValue',
'unrestricted double': 'double',
'unrestricted float': 'float',
}
def string_resource_mode(idl_type):
"""Returns a V8StringResourceMode value corresponding to the IDL type.
Args:
idl_type:
A string IdlType.
"""
if idl_type.is_nullable:
return 'kTreatNullAndUndefinedAsNullString'
if idl_type.is_annotated_type:
treat_null_as = idl_type.extended_attributes.get('TreatNullAs')
if treat_null_as == 'EmptyString':
return 'kTreatNullAsEmptyString'
elif treat_null_as:
raise ValueError(
'Unknown value for [TreatNullAs]: %s' % treat_null_as)
return ''
def cpp_type(idl_type,
extended_attributes=None,
raw_type=False,
used_as_rvalue_type=False,
used_as_variadic_argument=False,
used_in_cpp_sequence=False):
"""Returns C++ type corresponding to IDL type.
|idl_type| argument is of type IdlType, while return value is a string
Args:
idl_type:
IdlType
raw_type:
bool, True if idl_type's raw/primitive C++ type should be returned.
used_as_rvalue_type:
bool, True if the C++ type is used as an argument or the return
type of a method.
used_as_variadic_argument:
bool, True if the C++ type is used as a variadic argument of a method.
used_in_cpp_sequence:
bool, True if the C++ type is used as an element of a container.
Containers can be an array, a sequence, a dictionary or a record.
"""
extended_attributes = extended_attributes or {}
idl_type = idl_type.preprocessed_type
# Nullable types
def needs_optional_wrapper():
if not idl_type.is_nullable or not used_in_cpp_sequence:
return False
# NativeValueTraits<T>::NullValue should exist in order to provide the
# implicit null value, if needed.
return not idl_type.inner_type.cpp_type_has_null_value
if needs_optional_wrapper():
inner_type = idl_type.inner_type
if inner_type.is_dictionary or inner_type.is_sequence or inner_type.is_record_type:
# TODO(jbroman, bashi): Implement this if needed.
# This is non-trivial to support because HeapVector refuses to hold
# base::Optional<>, and IDLDictionaryBase (and subclasses) have no
# integrated null state that can be distinguished from a present but
# empty dictionary. It's unclear whether this will ever come up in
# real spec WebIDL.
raise NotImplementedError(
'Sequences of nullable dictionary, sequence or record types are not yet supported.'
)
return 'base::Optional<%s>' % inner_type.cpp_type_args(
extended_attributes, raw_type, used_as_rvalue_type,
used_as_variadic_argument, used_in_cpp_sequence)
# Array or sequence types
if used_as_variadic_argument:
native_array_element_type = idl_type
else:
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
vector_type = cpp_ptr_type('Vector', 'HeapVector',
native_array_element_type.is_traceable)
vector_template_type = cpp_template_type(
vector_type,
native_array_element_type.cpp_type_args(used_in_cpp_sequence=True))
if used_as_rvalue_type:
return 'const %s&' % vector_template_type
return vector_template_type
# Record types.
if idl_type.is_record_type:
vector_type = cpp_ptr_type('Vector', 'HeapVector',
idl_type.value_type.is_traceable)
value_type = idl_type.value_type.cpp_type_args(
used_in_cpp_sequence=True)
vector_template_type = cpp_template_type(
vector_type, 'std::pair<String, %s>' % value_type)
if used_as_rvalue_type:
return 'const %s&' % vector_template_type
return vector_template_type
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in CPP_TYPE_SAME_AS_IDL_TYPE:
return base_idl_type
if base_idl_type in CPP_INTEGER_CONVERSION_RULES:
return CPP_INTEGER_CONVERSION_RULES[base_idl_type]
if base_idl_type in CPP_SPECIAL_CONVERSION_RULES:
return CPP_SPECIAL_CONVERSION_RULES[base_idl_type]
if idl_type.is_string_type:
if idl_type.has_string_context:
return 'String'
if not raw_type:
return 'const String&' if used_as_rvalue_type else 'String'
return 'V8StringResource<%s>' % string_resource_mode(idl_type)
if base_idl_type == 'ArrayBufferView' and 'FlexibleArrayBufferView' in extended_attributes:
return 'FlexibleArrayBufferView'
if base_idl_type in TYPED_ARRAY_TYPES and 'FlexibleArrayBufferView' in extended_attributes:
return 'Flexible' + base_idl_type
if base_idl_type in ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES or base_idl_type == 'DataView':
if 'AllowShared' in extended_attributes:
return cpp_template_type('MaybeShared', idl_type.implemented_as)
else:
return cpp_template_type('NotShared', idl_type.implemented_as)
if idl_type.is_interface_type or idl_type.is_dictionary:
implemented_as_class = idl_type.implemented_as
if raw_type or not used_in_cpp_sequence:
return implemented_as_class + '*'
if not used_in_cpp_sequence:
return implemented_as_class + '*'
if used_as_rvalue_type and idl_type.is_garbage_collected:
return 'const %s*' % implemented_as_class
return cpp_template_type('Member', implemented_as_class)
if idl_type.is_union_type:
# Avoid "AOrNullOrB" for cpp type of (A? or B) because we generate
# V8AOrBOrNull to handle nulle for (A? or B), (A or B?) and (A or B)?
def member_cpp_name(idl_type):
if idl_type.is_nullable:
return idl_type.inner_type.name
return idl_type.name
idl_type_name = 'Or'.join(
member_cpp_name(member) for member in idl_type.member_types)
return 'const %s&' % idl_type_name if used_as_rvalue_type else idl_type_name
if idl_type.is_callback_function:
v8_type_name = 'V8' + base_idl_type
if idl_type.is_custom_callback_function:
return v8_type_name
if not used_in_cpp_sequence:
return v8_type_name + '*'
return cpp_template_type('Member', v8_type_name)
if base_idl_type == 'void':
return base_idl_type
# Default, assume native type is a pointer with same type name as idl type
return base_idl_type + '*'
def cpp_type_initializer(idl_type):
"""Returns a string containing a C++ initialization statement for the
corresponding type.
|idl_type| argument is of type IdlType.
"""
base_idl_type = idl_type.base_type
if idl_type.native_array_element_type:
return ''
if idl_type.is_explicit_nullable:
return ''
if idl_type.is_numeric_type:
return ' = 0'
if base_idl_type == 'boolean':
return ' = false'
if (base_idl_type in NON_WRAPPER_TYPES
or base_idl_type in CPP_SPECIAL_CONVERSION_RULES
or base_idl_type == 'any' or idl_type.is_string_type
or idl_type.is_enum):
return ''
return ' = nullptr'
# Allow access as idl_type.cpp_type if no arguments
IdlTypeBase.cpp_type = property(cpp_type)
IdlTypeBase.cpp_type_initializer = property(cpp_type_initializer)
IdlTypeBase.cpp_type_args = cpp_type
IdlUnionType.cpp_type_initializer = ''
IdlArrayOrSequenceType.native_array_element_type = property(
lambda self: self.element_type)
def cpp_template_type(template, inner_type):
"""Returns C++ template specialized to type."""
format_string = '{template}<{inner_type}>'
return format_string.format(template=template, inner_type=inner_type)
def cpp_ptr_type(old_type, new_type, is_gc_type):
if is_gc_type:
return new_type
return old_type
def v8_type(interface_name):
return 'V8' + interface_name
# [ImplementedAs]
# This handles [ImplementedAs] on interface types, not [ImplementedAs] in the
# interface being generated. e.g., given:
# Foo.idl: interface Foo {attribute Bar bar};
# Bar.idl: [ImplementedAs=Zork] interface Bar {};
# when generating bindings for Foo, the [ImplementedAs] on Bar is needed.
# This data is external to Foo.idl, and hence computed as global information in
# compute_interfaces_info.py to avoid having to parse IDLs of all used interfaces.
IdlType.implemented_as_interfaces = {}
def implemented_as(idl_type):
base_idl_type = idl_type.base_type
if base_idl_type in IdlType.implemented_as_interfaces:
return IdlType.implemented_as_interfaces[base_idl_type]
elif idl_type.is_callback_function or idl_type.is_callback_interface:
return 'V8%s' % base_idl_type
return base_idl_type
IdlType.implemented_as = property(implemented_as)
IdlType.set_implemented_as_interfaces = classmethod(
lambda cls, new_implemented_as_interfaces: \
cls.implemented_as_interfaces.update(new_implemented_as_interfaces)
)
# [GarbageCollected]
IdlType.garbage_collected_types = set()
IdlType.is_garbage_collected = property(
lambda self: self.base_type in IdlType.garbage_collected_types)
IdlType.set_garbage_collected_types = classmethod(
lambda cls, new_garbage_collected_types: \
cls.garbage_collected_types.update(new_garbage_collected_types)
)
def is_gc_type(idl_type):
return idl_type.is_garbage_collected or idl_type.is_union_type
IdlTypeBase.is_gc_type = property(is_gc_type)
def is_traceable(idl_type):
return (idl_type.is_garbage_collected or idl_type.is_callback_function
or idl_type.cpp_type in ('ScriptValue', 'ScriptPromise'))
IdlTypeBase.is_traceable = property(is_traceable)
IdlUnionType.is_traceable = property(lambda self: True)
IdlArrayOrSequenceType.is_traceable = property(
lambda self: self.element_type.is_traceable)
IdlRecordType.is_traceable = property(
lambda self: self.value_type.is_traceable)
IdlNullableType.is_traceable = property(
lambda self: self.inner_type.is_traceable)
################################################################################
# Includes
################################################################################
INCLUDES_FOR_TYPE = {
'object':
set([
'bindings/core/v8/script_value.h', 'bindings/core/v8/idl_types.h',
'bindings/core/v8/native_value_traits_impl.h'
]),
'ArrayBufferView':
set([
'bindings/core/v8/v8_array_buffer_view.h',
'core/typed_arrays/array_buffer_view_helpers.h',
'core/typed_arrays/flexible_array_buffer_view.h'
]),
'EventHandler':
set(['bindings/core/v8/js_event_handler.h']),
'HTMLCollection':
set([
'bindings/core/v8/v8_html_collection.h', 'core/dom/class_collection.h',
'core/dom/tag_collection.h', 'core/html/html_collection.h',
'core/html/html_table_rows_collection.h',
'core/html/forms/html_data_list_options_collection.h',
'core/html/forms/html_form_controls_collection.h'
]),
'NodeList':
set([
'bindings/core/v8/v8_node_list.h', 'core/dom/name_node_list.h',
'core/dom/node_list.h', 'core/dom/static_node_list.h',
'core/html/forms/labels_node_list.h'
]),
'Promise':
set(['bindings/core/v8/script_promise.h']),
'ScriptValue':
set(['bindings/core/v8/script_value.h']),
}
def includes_for_type(idl_type, extended_attributes=None):
idl_type = idl_type.preprocessed_type
extended_attributes = extended_attributes or {}
# Simple types
base_idl_type = idl_type.base_type
if base_idl_type in INCLUDES_FOR_TYPE:
return INCLUDES_FOR_TYPE[base_idl_type]
if base_idl_type in TYPED_ARRAY_TYPES:
return INCLUDES_FOR_TYPE['ArrayBufferView'].union(
set([
'bindings/%s/v8/%s' % (component_dir[base_idl_type],
binding_header_filename(base_idl_type))
]))
if idl_type.is_basic_type:
return set([
'bindings/core/v8/idl_types.h',
'bindings/core/v8/native_value_traits_impl.h'
])
if base_idl_type.endswith('ConstructorConstructor'):
# FIXME: rename to NamedConstructor
# FIXME: replace with a [NamedConstructorAttribute] extended attribute
# Ending with 'ConstructorConstructor' indicates a named constructor,
# and these do not have header files, as they are part of the generated
# bindings for the interface
return set()
if (base_idl_type.endswith('Constructor')
and base_idl_type not in _CALLBACK_CONSTRUCTORS):
# FIXME: replace with a [ConstructorAttribute] extended attribute
base_idl_type = idl_type.constructor_type_name
if idl_type.is_custom_callback_function:
return set()
if idl_type.is_callback_function:
component = IdlType.callback_functions[base_idl_type]['component_dir']
return set([
'bindings/%s/v8/%s' % (component,
binding_header_filename(base_idl_type))
])
if base_idl_type not in component_dir:
return set()
return set([
'bindings/%s/v8/%s' % (component_dir[base_idl_type],
binding_header_filename(base_idl_type))
])
IdlType.includes_for_type = includes_for_type
def includes_for_union_type(idl_type, extended_attributes=None):
return set.union(*[
member_type.includes_for_type(extended_attributes)
for member_type in idl_type.member_types
])
IdlUnionType.includes_for_type = includes_for_union_type
def includes_for_array_or_sequence_type(idl_type, extended_attributes=None):
return set.union(
set([
'bindings/core/v8/idl_types.h',
'bindings/core/v8/native_value_traits_impl.h'
]), idl_type.element_type.includes_for_type(extended_attributes))
IdlArrayOrSequenceType.includes_for_type = includes_for_array_or_sequence_type
def includes_for_record_type(idl_type, extended_attributes=None):
return set.union(
idl_type.key_type.includes_for_type(extended_attributes),
idl_type.value_type.includes_for_type(extended_attributes))
IdlRecordType.includes_for_type = includes_for_record_type
def add_includes_for_type(idl_type, extended_attributes=None):
includes.update(idl_type.includes_for_type(extended_attributes))
IdlTypeBase.add_includes_for_type = add_includes_for_type
def includes_for_interface(interface_name):
return IdlType(interface_name).includes_for_type()
def add_includes_for_interface(interface_name):
includes.update(includes_for_interface(interface_name))
def impl_includes_for_type(idl_type, interfaces_info):
includes_for_type = set()
idl_type = idl_type.preprocessed_type
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
includes_for_type.update(
impl_includes_for_type(native_array_element_type, interfaces_info))
includes_for_type.add('platform/wtf/vector.h')
base_idl_type = idl_type.base_type
if idl_type.is_string_type:
includes_for_type.add('platform/wtf/text/wtf_string.h')
if idl_type.is_record_type:
includes_for_type.update(impl_includes_for_type(idl_type.key_type,
interfaces_info))
includes_for_type.update(impl_includes_for_type(idl_type.value_type,
interfaces_info))
if idl_type.is_callback_function:
component = IdlType.callback_functions[base_idl_type]['component_dir']
return set([
'bindings/%s/v8/%s' % (component,
binding_header_filename(base_idl_type))
])
if base_idl_type in interfaces_info:
interface_info = interfaces_info[base_idl_type]
includes_for_type.add(interface_info['include_path'])
if base_idl_type in INCLUDES_FOR_TYPE:
includes_for_type.update(INCLUDES_FOR_TYPE[base_idl_type])
if idl_type.is_array_buffer_view_or_typed_array:
return set([
'core/typed_arrays/dom_typed_array.h',
'core/typed_arrays/array_buffer_view_helpers.h'
])
return includes_for_type
def impl_includes_for_type_union(idl_type, interfaces_info):
includes_for_type = set()
for member_type in idl_type.member_types:
includes_for_type.update(
member_type.impl_includes_for_type(interfaces_info))
return includes_for_type
IdlTypeBase.impl_includes_for_type = impl_includes_for_type
IdlUnionType.impl_includes_for_type = impl_includes_for_type_union
def impl_forward_declaration_name(idl_type):
element_type = idl_type.native_array_element_type
if element_type:
return element_type.impl_forward_declaration_name
if ((idl_type.is_wrapper_type
and not idl_type.is_array_buffer_view_or_typed_array)
or idl_type.is_dictionary):
return idl_type.implemented_as
return None
IdlTypeBase.impl_forward_declaration_name = property(
impl_forward_declaration_name)
component_dir = {}
def set_component_dirs(new_component_dirs):
component_dir.update(new_component_dirs)
################################################################################
# V8 -> C++
################################################################################
# TODO(rakuco): Get rid of this definition altogether and move to NativeValueTraits<T>::nativeValue().
# That requires not requiring ExceptionState where it is not used, and we must be careful not
# to introduce any performance regressions.
V8_VALUE_TO_CPP_VALUE = {
# Basic
'DOMString':
'{v8_value}',
# Interface types
'FlexibleArrayBufferView':
'ToFlexibleArrayBufferView({isolate}, {v8_value}, {variable_name})',
'Promise':
'ScriptPromise::Cast(ScriptState::Current({isolate}), {v8_value})',
'ScriptValue':
'ScriptValue({isolate}, {v8_value})',
'Window':
'ToDOMWindow({isolate}, {v8_value})',
'XPathNSResolver':
'ToXPathNSResolver(ScriptState::Current({isolate}), {v8_value})',
}
def v8_conversion_needs_exception_state(idl_type):
return (idl_type.is_numeric_type or idl_type.is_enum
or idl_type.is_dictionary
or idl_type.is_array_buffer_view_or_typed_array
or idl_type.has_string_context or
idl_type.name in ('Boolean', 'ByteString', 'Object', 'USVString'))
IdlType.v8_conversion_needs_exception_state = property(
v8_conversion_needs_exception_state)
IdlAnnotatedType.v8_conversion_needs_exception_state = property(
v8_conversion_needs_exception_state)
IdlArrayOrSequenceType.v8_conversion_needs_exception_state = True
IdlRecordType.v8_conversion_needs_exception_state = True
IdlUnionType.v8_conversion_needs_exception_state = True
TRIVIAL_CONVERSIONS = frozenset(
['any', 'boolean', 'NodeFilter', 'XPathNSResolver', 'Promise'])
def v8_conversion_is_trivial(idl_type):
# The conversion is a simple expression that returns the converted value and
# cannot raise an exception.
return (idl_type.base_type in TRIVIAL_CONVERSIONS
or idl_type.is_wrapper_type)
IdlType.v8_conversion_is_trivial = property(v8_conversion_is_trivial)
def native_value_traits_type_name(idl_type,
extended_attributes,
in_sequence_or_record=False):
idl_type = idl_type.preprocessed_type
if idl_type.is_string_type:
# Strings are handled separately because null and/or undefined are
# processed by V8StringResource due to the [TreatNullAs] extended
# attribute and nullable string types.
name = 'IDL%s' % idl_type.name
elif idl_type.is_nullable:
inner_type = idl_type.inner_type
inner_type_nvt_type = native_value_traits_type_name(
inner_type, extended_attributes)
# The IDL compiler has special cases to handle some nullable types in operation
# parameters, dictionary fields, etc.
if in_sequence_or_record or inner_type.name == 'Object':
name = 'IDLNullable<%s>' % inner_type_nvt_type
else:
name = inner_type_nvt_type
elif idl_type.native_array_element_type:
name = 'IDLSequence<%s>' % native_value_traits_type_name(
idl_type.native_array_element_type, extended_attributes, True)
elif idl_type.is_record_type:
name = 'IDLRecord<%s, %s>' % (native_value_traits_type_name(
idl_type.key_type, extended_attributes),
native_value_traits_type_name(
idl_type.value_type,
extended_attributes, True))
elif idl_type.is_basic_type or idl_type.name in ['Object', 'Promise']:
name = 'IDL%s' % idl_type.name
elif idl_type.implemented_as is not None:
name = idl_type.implemented_as
else:
name = idl_type.name
return name
def v8_value_to_cpp_value(idl_type, extended_attributes, v8_value,
variable_name, isolate, for_constructor_callback):
if idl_type.name == 'void':
return ''
# Simple types
idl_type = idl_type.preprocessed_type
base_idl_type = idl_type.as_union_type.name if idl_type.is_union_type else idl_type.base_type
if 'FlexibleArrayBufferView' in extended_attributes:
if base_idl_type not in ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES:
raise ValueError(
'Unrecognized base type for extended attribute "FlexibleArrayBufferView": %s'
% (idl_type.base_type))
if 'AllowShared' not in extended_attributes:
raise ValueError(
'"FlexibleArrayBufferView" extended attribute requires "AllowShared" on %s'
% (idl_type.base_type))
base_idl_type = 'FlexibleArrayBufferView'
if 'AllowShared' in extended_attributes and not idl_type.is_array_buffer_view_or_typed_array:
raise ValueError(
'Unrecognized base type for extended attribute "AllowShared": %s' %
(idl_type.base_type))
if idl_type.is_integer_type:
arguments = ', '.join([v8_value, 'exception_state'])
elif idl_type.v8_conversion_needs_exception_state:
arguments = ', '.join([v8_value, 'exception_state'])
else:
arguments = v8_value
if idl_type.has_string_context:
execution_context = 'bindings::ExecutionContextFromV8Wrappable(impl)'
if for_constructor_callback:
execution_context = 'CurrentExecutionContext(info.GetIsolate())'
cpp_expression_format = 'NativeValueTraits<IDL%s>::NativeValue(%s, %s, exception_state, %s)' % (
idl_type.name, isolate, v8_value, execution_context)
elif base_idl_type in V8_VALUE_TO_CPP_VALUE:
cpp_expression_format = V8_VALUE_TO_CPP_VALUE[base_idl_type]
elif idl_type.name == 'ArrayBuffer':
cpp_expression_format = (
'{v8_value}->Is{idl_type}() ? '
'V8{idl_type}::ToImpl(v8::Local<v8::{idl_type}>::Cast({v8_value})) : 0'
)
elif idl_type.is_array_buffer_view_or_typed_array or base_idl_type == 'DataView':
this_cpp_type = idl_type.cpp_type_args(
extended_attributes=extended_attributes)
if 'AllowShared' in extended_attributes:
cpp_expression_format = (
'ToMaybeShared<%s>({isolate}, {v8_value}, exception_state)' %
this_cpp_type)
else:
cpp_expression_format = (
'ToNotShared<%s>({isolate}, {v8_value}, exception_state)' %
this_cpp_type)
elif idl_type.is_union_type:
nullable = 'UnionTypeConversionMode::kNullable' if idl_type.includes_nullable_type \
else 'UnionTypeConversionMode::kNotNullable'
# We need to consider the moving of the null through the union in order
# to generate the correct V8* class name.
this_cpp_type = idl_type.cpp_type_args(
extended_attributes=extended_attributes)
cpp_expression_format = '%s::ToImpl({isolate}, {v8_value}, {variable_name}, %s, exception_state)' % \
(v8_type(this_cpp_type), nullable)
elif idl_type.use_output_parameter_for_result:
cpp_expression_format = 'V8{idl_type}::ToImpl({isolate}, {v8_value}, {variable_name}, exception_state)'
elif idl_type.is_callback_function:
cpp_expression_format = 'V8{idl_type}::Create({v8_value}.As<v8::Function>())'
elif idl_type.v8_conversion_needs_exception_state:
# Effectively, this if branch means everything with v8_conversion_needs_exception_state == True
# except for unions and dictionary interfaces.
base_idl_type = native_value_traits_type_name(idl_type,
extended_attributes)
cpp_expression_format = (
'NativeValueTraits<{idl_type}>::NativeValue({isolate}, {arguments})'
)
else:
cpp_expression_format = (
'V8{idl_type}::ToImplWithTypeCheck({isolate}, {v8_value})')
return cpp_expression_format.format(
arguments=arguments,
idl_type=base_idl_type,
v8_value=v8_value,
variable_name=variable_name,
isolate=isolate)
# FIXME: this function should be refactored, as this takes too many flags.
def v8_value_to_local_cpp_value(idl_type,
extended_attributes,
v8_value,
variable_name,
declare_variable=True,
isolate='info.GetIsolate()',
bailout_return_value=None,
use_exception_state=False,
code_generation_target=None,
for_constructor_callback=False):
"""Returns an expression that converts a V8 value to a C++ value and stores it as a local value."""
this_cpp_type = idl_type.cpp_type_args(
extended_attributes=extended_attributes, raw_type=True)
idl_type = idl_type.preprocessed_type
cpp_value = v8_value_to_cpp_value(
idl_type,
extended_attributes,
v8_value,
variable_name,
isolate,
for_constructor_callback=for_constructor_callback)
# Optional expression that returns a value to be assigned to the local variable.
assign_expression = None
# Optional void expression executed unconditionally.
set_expression = None
# Optional expression that returns true if the conversion fails.
check_expression = None
# Optional expression used as the return value when returning. Only
# meaningful if 'check_expression' is not None.
return_expression = bailout_return_value
if 'FlexibleArrayBufferView' in extended_attributes:
if idl_type.base_type not in ARRAY_BUFFER_VIEW_AND_TYPED_ARRAY_TYPES:
raise ValueError(
'Unrecognized base type for extended attribute "FlexibleArrayBufferView": %s'
% (idl_type.base_type))
set_expression = cpp_value
elif idl_type.is_string_type or idl_type.v8_conversion_needs_exception_state:
# Types for which conversion can fail and that need error handling.
check_expression = 'exception_state.HadException()'
if idl_type.is_union_type:
set_expression = cpp_value
else:
assign_expression = cpp_value
# Note: 'not idl_type.v8_conversion_needs_exception_state' implies
# 'idl_type.is_string_type', but there are types for which both are
# true (ByteString and USVString), so using idl_type.is_string_type
# as the condition here would be wrong.
if not idl_type.v8_conversion_needs_exception_state:
if use_exception_state:
check_expression = '!%s.Prepare(exception_state)' % variable_name
else:
check_expression = '!%s.Prepare()' % variable_name
elif not idl_type.v8_conversion_is_trivial and not idl_type.is_callback_function:
return {
'error_message':
'no V8 -> C++ conversion for IDL type: %s' % idl_type.name
}
else:
assign_expression = cpp_value
# Types that don't need error handling, and simply assign a value to the
# local variable.
if (idl_type.is_explicit_nullable
and code_generation_target == 'attribute_set'):
this_cpp_type = cpp_template_type('base::Optional', this_cpp_type)
expr = '{cpp_type}({expr})'.format(
cpp_type=this_cpp_type, expr=assign_expression)
assign_expression = ("is_null "
"? base::nullopt "
": {expr}".format(expr=expr))
return {
'assign_expression': assign_expression,
'check_expression': check_expression,
'cpp_type': this_cpp_type,
'cpp_name': variable_name,
'declare_variable': declare_variable,
'return_expression': return_expression,
'set_expression': set_expression,
}
IdlTypeBase.v8_value_to_local_cpp_value = v8_value_to_local_cpp_value
def use_output_parameter_for_result(idl_type):
"""True when methods/getters which return the given idl_type should
take the output argument.
"""
return idl_type.is_union_type
IdlTypeBase.use_output_parameter_for_result = property(
use_output_parameter_for_result)
################################################################################
# C++ -> V8
################################################################################
def preprocess_idl_type(idl_type):
if idl_type.is_nullable:
return IdlNullableType(idl_type.inner_type.preprocessed_type)
if idl_type.is_enum:
# Enumerations are internally DOMStrings
return IdlType('DOMString')
if idl_type.base_type == 'any' or idl_type.is_custom_callback_function:
return IdlType('ScriptValue')
if idl_type.is_callback_function:
return idl_type
return idl_type
IdlTypeBase.preprocessed_type = property(preprocess_idl_type)
def preprocess_idl_type_and_value(idl_type, cpp_value, extended_attributes):
"""Returns IDL type and value, with preliminary type conversions applied."""
idl_type = idl_type.preprocessed_type
if idl_type.name == 'Promise':
idl_type = IdlType('ScriptValue')
if idl_type.base_type in ['long long', 'unsigned long long']:
# long long and unsigned long long are not representable in ECMAScript;
# we represent them as doubles.
is_nullable = idl_type.is_nullable
idl_type = IdlType('double')
if is_nullable:
idl_type = IdlNullableType(idl_type)
cpp_value = 'static_cast<double>(%s)' % cpp_value
# HTML5 says that unsigned reflected attributes should be in the range
# [0, 2^31). When a value isn't in this range, a default value (or 0)
# should be returned instead.
extended_attributes = extended_attributes or {}
if ('Reflect' in extended_attributes
and idl_type.base_type in ['unsigned long', 'unsigned short']):
cpp_value = cpp_value.replace('GetUnsignedIntegralAttribute',
'GetIntegralAttribute')
cpp_value = 'std::max(0, static_cast<int>(%s))' % cpp_value
return idl_type, cpp_value
def v8_conversion_type(idl_type, extended_attributes):
"""Returns V8 conversion type, adding any additional includes.
The V8 conversion type is used to select the C++ -> V8 conversion function
or V8SetReturnValue* function; it can be an idl_type, a cpp_type, or a
separate name for the type of conversion (e.g., 'DOMWrapper').
"""
extended_attributes = extended_attributes or {}
# Nullable dictionaries need to be handled differently than either
# non-nullable dictionaries or unions.
if idl_type.is_dictionary and idl_type.is_nullable:
return 'NullableDictionary'
if idl_type.is_dictionary or idl_type.is_union_type:
return 'DictionaryOrUnion'
# Array or sequence types
native_array_element_type = idl_type.native_array_element_type
if native_array_element_type:
return 'FrozenArray' if idl_type.is_frozen_array else 'sequence'
# Record types.
if idl_type.is_record_type:
return 'Record'
# Simple types
base_idl_type = idl_type.base_type
# Basic types, without additional includes
if base_idl_type in CPP_INTEGER_CONVERSION_RULES:
return CPP_INTEGER_CONVERSION_RULES[base_idl_type]
if idl_type.is_string_type:
if idl_type.is_nullable:
return 'StringOrNull'
return base_idl_type
if idl_type.is_basic_type:
return base_idl_type
if base_idl_type in ['object', 'ScriptValue']:
return 'ScriptValue'
# Data type with potential additional includes
if base_idl_type in V8_SET_RETURN_VALUE: # Special V8SetReturnValue treatment
return base_idl_type
# Pointer type
return 'DOMWrapper'
IdlTypeBase.v8_conversion_type = v8_conversion_type
V8_SET_RETURN_VALUE = {
'boolean':
'V8SetReturnValueBool(info, {cpp_value})',
'DOMString':
'V8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'ByteString':
'V8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'USVString':
'V8SetReturnValueString(info, {cpp_value}, info.GetIsolate())',
'StringOrNull':
'V8SetReturnValueStringOrNull(info, {cpp_value}, info.GetIsolate())',
'void':
'',
# All the int types below are converted to (u)int32_t in the V8SetReturnValue{Int,Unsigned}() calls.
# The 64-bit int types have already been converted to double when V8_SET_RETURN_VALUE is used, so they are not
# listed here.
'int8_t':
'V8SetReturnValueInt(info, {cpp_value})',
'int16_t':
'V8SetReturnValueInt(info, {cpp_value})',
'int32_t':
'V8SetReturnValueInt(info, {cpp_value})',
'uint8_t':
'V8SetReturnValueUnsigned(info, {cpp_value})',
'uint16_t':
'V8SetReturnValueUnsigned(info, {cpp_value})',
'uint32_t':
'V8SetReturnValueUnsigned(info, {cpp_value})',
# No special V8SetReturnValue* function (set value directly)
'float':
'V8SetReturnValue(info, {cpp_value})',
'unrestricted float':
'V8SetReturnValue(info, {cpp_value})',
'double':
'V8SetReturnValue(info, {cpp_value})',
'unrestricted double':
'V8SetReturnValue(info, {cpp_value})',
# No special V8SetReturnValue* function, but instead convert value to V8
# and then use general V8SetReturnValue.
'sequence':
'V8SetReturnValue(info, {cpp_value})',
'FrozenArray':
'V8SetReturnValue(info, {cpp_value})',
'EventHandler':
'V8SetReturnValue(info, {cpp_value})',
'NodeFilter':
'V8SetReturnValue(info, {cpp_value})',
'OnBeforeUnloadEventHandler':
'V8SetReturnValue(info, {cpp_value})',
'OnErrorEventHandler':
'V8SetReturnValue(info, {cpp_value})',
'ScriptValue':
'V8SetReturnValue(info, {cpp_value})',
# Records.
'Record':
'V8SetReturnValue(info, ToV8({cpp_value}, info.Holder(), info.GetIsolate()))',
# DOMWrapper
'DOMWrapperForMainWorld':
'V8SetReturnValueForMainWorld(info, {cpp_value})',
'DOMWrapperFast':
'V8SetReturnValueFast(info, {cpp_value}, {script_wrappable})',
'DOMWrapperDefault':
'V8SetReturnValue(info, {cpp_value})',
# If [CheckSecurity=ReturnValue] is specified, the returned object must be
# wrapped in its own realm, which can be different from the realm of the
# receiver object.
#
# [CheckSecurity=ReturnValue] is used only for contentDocument and
# getSVGDocument attributes of HTML{IFrame,Frame,Object,Embed}Element,
# and Window.frameElement. Except for Window.frameElement, all interfaces
# support contentWindow(), so we create a new wrapper in the realm of
# contentWindow(). Note that DOMWindow* has its own realm and there is no
# need to pass |creationContext| in for ToV8(DOMWindow*).
# Window.frameElement is implemented with [Custom].
'DOMWrapperAcrossContext':
('V8SetReturnValue(info, ToV8({cpp_value}, ' +
'ToV8(impl->contentWindow(), v8::Local<v8::Object>(), ' +
'info.GetIsolate()).As<v8::Object>(), info.GetIsolate()))'),
# Note that static attributes and operations do not check whether |this| is
# an instance of the interface nor |this|'s creation context is the same as
# the current context. So we must always use the current context as the
# creation context of the DOM wrapper for the return value.
'DOMWrapperStatic':
'V8SetReturnValue(info, {cpp_value}, info.GetIsolate()->GetCurrentContext()->Global())',
# Nullable dictionaries
'NullableDictionary':
'V8SetReturnValue(info, result)',
'NullableDictionaryStatic':
'V8SetReturnValue(info, result, info.GetIsolate()->GetCurrentContext()->Global())',
# Union types or dictionaries
'DictionaryOrUnion':
'V8SetReturnValue(info, result)',
'DictionaryOrUnionStatic':
'V8SetReturnValue(info, result, info.GetIsolate()->GetCurrentContext()->Global())',
}
def v8_set_return_value(idl_type,
cpp_value,
extended_attributes=None,
script_wrappable='',
for_main_world=False,
is_static=False):
"""Returns a statement that converts a C++ value to a V8 value and sets it as a return value.
"""
def dom_wrapper_conversion_type():
if ('CheckSecurity' in extended_attributes
and extended_attribute_value_contains(
extended_attributes['CheckSecurity'], 'ReturnValue')):
return 'DOMWrapperAcrossContext'
if is_static:
return 'DOMWrapperStatic'
if not script_wrappable:
return 'DOMWrapperDefault'
if for_main_world:
return 'DOMWrapperForMainWorld'
return 'DOMWrapperFast'
idl_type, cpp_value = preprocess_idl_type_and_value(
idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
# SetReturn-specific overrides
if this_v8_conversion_type in ('EventHandler', 'NodeFilter',
'OnBeforeUnloadEventHandler',
'OnErrorEventHandler', 'ScriptValue',
'sequence', 'FrozenArray'):
# Convert value to V8 and then use general V8SetReturnValue
cpp_value = idl_type.cpp_value_to_v8_value(
cpp_value, extended_attributes=extended_attributes)
if this_v8_conversion_type == 'DOMWrapper':
this_v8_conversion_type = dom_wrapper_conversion_type()
if is_static and this_v8_conversion_type in ('NullableDictionary',
'DictionaryOrUnion'):
this_v8_conversion_type += 'Static'
format_string = V8_SET_RETURN_VALUE[this_v8_conversion_type]
statement = format_string.format(
cpp_value=cpp_value, script_wrappable=script_wrappable)
return statement
IdlTypeBase.v8_set_return_value = v8_set_return_value
CPP_VALUE_TO_V8_VALUE = {
# Built-in types
'DOMString':
'V8String({isolate}, {cpp_value})',
'ByteString':
'V8String({isolate}, {cpp_value})',
'USVString':
'V8String({isolate}, {cpp_value})',
'boolean':
'v8::Boolean::New({isolate}, {cpp_value})',
# All the int types below are converted to (u)int32_t in the v8::Integer::New*() calls.
# The 64-bit int types have already been converted to double when CPP_VALUE_TO_V8_VALUE is used, so they are not
# listed here.
'int8_t':
'v8::Integer::New({isolate}, {cpp_value})',
'int16_t':
'v8::Integer::New({isolate}, {cpp_value})',
'int32_t':
'v8::Integer::New({isolate}, {cpp_value})',
'uint8_t':
'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})',
'uint16_t':
'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})',
'uint32_t':
'v8::Integer::NewFromUnsigned({isolate}, {cpp_value})',
'float':
'v8::Number::New({isolate}, {cpp_value})',
'unrestricted float':
'v8::Number::New({isolate}, {cpp_value})',
'double':
'v8::Number::New({isolate}, {cpp_value})',
'unrestricted double':
'v8::Number::New({isolate}, {cpp_value})',
'StringOrNull':
('({cpp_value}.IsNull() ? ' + 'v8::Null({isolate}).As<v8::Value>() : ' +
'V8String({isolate}, {cpp_value}).As<v8::Value>())'),
# Special cases
'EventHandler':
'JSEventHandler::AsV8Value({isolate}, impl, {cpp_value})',
'NodeFilter':
'ToV8({cpp_value}, {creation_context}, {isolate})',
'OnBeforeUnloadEventHandler':
'JSEventHandler::AsV8Value({isolate}, impl, {cpp_value})',
'OnErrorEventHandler':
'JSEventHandler::AsV8Value({isolate}, impl, {cpp_value})',
'Record':
'ToV8({cpp_value}, {creation_context}, {isolate})',
'ScriptValue':
'{cpp_value}.V8Value()',
# General
'sequence':
'ToV8({cpp_value}, {creation_context}, {isolate})',
'FrozenArray':
'FreezeV8Object(ToV8({cpp_value}, {creation_context}, {isolate}), {isolate})',
'DOMWrapper':
'ToV8({cpp_value}, {creation_context}, {isolate})',
# Passing nullable dictionaries isn't a pattern currently used
# anywhere in the web platform, and more work would be needed in
# the code generator to distinguish between passing null, and
# passing an object which happened to not contain any of the
# dictionary's defined attributes. For now, don't define
# NullableDictionary here, which will cause an exception to be
# thrown during code generation if an argument to a method is a
# nullable dictionary type.
#
# Union types or dictionaries
'DictionaryOrUnion':
'ToV8({cpp_value}, {creation_context}, {isolate})',
}
def cpp_value_to_v8_value(idl_type,
cpp_value,
isolate='info.GetIsolate()',
creation_context='info.Holder()',
extended_attributes=None):
"""Returns an expression that converts a C++ value to a V8 value."""
# the isolate parameter is needed for callback interfaces
idl_type, cpp_value = preprocess_idl_type_and_value(
idl_type, cpp_value, extended_attributes)
this_v8_conversion_type = idl_type.v8_conversion_type(extended_attributes)
format_string = CPP_VALUE_TO_V8_VALUE[this_v8_conversion_type]
statement = format_string.format(
cpp_value=cpp_value,
isolate=isolate,
creation_context=creation_context)
return statement
IdlTypeBase.cpp_value_to_v8_value = cpp_value_to_v8_value
def literal_cpp_value(idl_type, idl_literal):
"""Converts an expression that is a valid C++ literal for this type."""
# FIXME: add validation that idl_type and idl_literal are compatible
if idl_type.base_type in ('any', 'object') and idl_literal.is_null:
return 'ScriptValue::CreateNull(script_state->GetIsolate())'
literal_value = str(idl_literal)
if idl_type.base_type in ('octet', 'unsigned short', 'unsigned long'):
return literal_value + 'u'
if idl_type.is_dictionary and literal_value == '{}':
return 'MakeGarbageCollected<{}>()'.format(idl_type.base_type)
return literal_value
def union_literal_cpp_value(idl_type, idl_literal):
if idl_literal.is_null:
return idl_type.name + '()'
elif idl_literal.idl_type == 'DOMString':
member_type = idl_type.string_member_type
elif idl_literal.idl_type in ('integer', 'float'):
member_type = idl_type.numeric_member_type
elif idl_literal.idl_type == 'boolean':
member_type = idl_type.boolean_member_type
elif idl_literal.idl_type == 'sequence':
member_type = idl_type.sequence_member_type
elif idl_literal.idl_type == 'dictionary':
member_type = idl_type.dictionary_member_type
else:
raise ValueError('Unsupported literal type: ' + idl_literal.idl_type)
return '%s::From%s(%s)' % (idl_type.cpp_type_args(), member_type.name,
member_type.literal_cpp_value(idl_literal))
def array_or_sequence_literal_cpp_value(idl_type, idl_literal):
# Only support empty sequences.
if idl_literal.value == '[]':
return cpp_type(idl_type) + '()'
raise ValueError('Unsupported literal type: ' + idl_literal.idl_type)
IdlType.literal_cpp_value = literal_cpp_value
IdlUnionType.literal_cpp_value = union_literal_cpp_value
IdlArrayOrSequenceType.literal_cpp_value = array_or_sequence_literal_cpp_value
_IDL_TYPE_TO_NATIVE_VALUE_TRAITS_TAG_MAP = {
'DOMString': 'IDLString',
'USVString': 'IDLUSVString',
'DOMStringOrNull': 'IDLStringOrNull',
'USVStringOrNull': 'IDLUSVStringOrNull',
'any': 'ScriptValue',
'boolean': 'IDLBoolean',
'long': 'IDLLong',
'sequence<DOMString>': 'IDLSequence<IDLString>',
'unsigned short': 'IDLUnsignedShort',
'void': None,
}
def idl_type_to_native_value_traits_tag(idl_type):
idl_type_str = str(idl_type)
if idl_type.is_nullable:
idl_type_str += "OrNull"
if idl_type_str in _IDL_TYPE_TO_NATIVE_VALUE_TRAITS_TAG_MAP:
return _IDL_TYPE_TO_NATIVE_VALUE_TRAITS_TAG_MAP[idl_type_str]
else:
raise Exception("Type `%s' is not supported." % idl_type_str)
################################################################################
# Utility properties for nullable types
################################################################################
def cpp_type_has_null_value(idl_type):
# - String types (String/AtomicString) represent null as a null string,
# i.e. one for which String::IsNull() returns true.
# - Enum types, as they are implemented as Strings.
# - Interface types and Dictionary types represent null as a null pointer.
# - Union types, as thier container classes can represent null value.
# - 'Object' and 'any' type. We use ScriptValue for object type.
return (idl_type.is_string_type or idl_type.is_enum
or idl_type.is_interface_type or idl_type.is_callback_interface
or idl_type.is_callback_function
or idl_type.is_custom_callback_function or idl_type.is_dictionary
or idl_type.is_union_type or idl_type.base_type == 'object'
or idl_type.base_type == 'any')
IdlTypeBase.cpp_type_has_null_value = property(cpp_type_has_null_value)
def is_implicit_nullable(idl_type):
# Nullable type where the corresponding C++ type supports a null value.
return idl_type.is_nullable and idl_type.cpp_type_has_null_value
def is_explicit_nullable(idl_type):
# Nullable type that isn't implicit nullable (see above.) For such types,
# we use base::Optional<T> or similar explicit ways to represent a null value.
return idl_type.is_nullable and not idl_type.is_implicit_nullable
IdlTypeBase.is_implicit_nullable = property(is_implicit_nullable)
IdlUnionType.is_implicit_nullable = False
IdlTypeBase.is_explicit_nullable = property(is_explicit_nullable)
def includes_nullable_type_union(idl_type):
# http://heycam.github.io/webidl/#dfn-includes-a-nullable-type
return idl_type.number_of_nullable_member_types == 1
IdlTypeBase.includes_nullable_type = False
IdlNullableType.includes_nullable_type = True
IdlUnionType.includes_nullable_type = property(includes_nullable_type_union)
|
third_party/blink/renderer/build/scripts/aria_properties.py | zipated/src | 2,151 | 12678136 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import os.path
import sys
PYJSON5_DIR = os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..', 'pyjson5', 'src')
sys.path.insert(0, PYJSON5_DIR)
import json5 # pylint: disable=import-error
class ARIAReader(object):
def __init__(self, json5_file_path):
self._input_files = [json5_file_path]
with open(os.path.abspath(json5_file_path)) as json5_file:
self._data = json5.loads(json5_file.read())
def attributes_list(self):
return {'data': [item[u'name'] for item in self._data['attributes']]}
|
transformations/replace_spelling/transformation.py | ZhexiongLiu/NL-Augmenter | 583 | 12678149 | <gh_stars>100-1000
import itertools
import random
import json
import os
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
"""
Base Class for implementing the different input transformations a generation should be robust against.
"""
def generate_sentence(sentence, spell_errors, prob_of_typo, seed):
output = []
for word in sentence.split():
random.seed(seed)
if word.lower() in list(spell_errors.keys()) and random.choice(range(0, 100)) <= prob_of_typo:
output.append(random.choice(spell_errors[word.lower()]))
else:
output.append(word)
output = " ".join(output)
return output
def generate_sentences(text, prob=0.1, seed=0, max_outputs=1):
spell_errors = os.path.join('transformations', 'replace_spelling', 'spell_errors.json')
with open(spell_errors, 'r') as fp:
spell_errors = json.load(fp)
prob_of_typo = int(prob * 100)
perturbed_texts = []
for idx in range (max_outputs):
new_text = generate_sentence(text, spell_errors, prob_of_typo, seed+idx)
perturbed_texts.append(new_text)
return perturbed_texts
class SpellingTransformation(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
TaskType.TEXT_TAGGING,
]
languages = ["en"]
def __init__(self, seed=0, max_outputs=3):
super().__init__(seed, max_outputs=max_outputs)
def generate(self, sentence: str):
perturbed_texts = generate_sentences(text=sentence,
prob=0.20,
seed=self.seed,
max_outputs=self.max_outputs,
)
return perturbed_texts
|
tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py | sarvex/tensorflow-quantum | 1,501 | 12678182 | <reponame>sarvex/tensorflow-quantum
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for ParameterShift specific C++ ops."""
import numpy as np
import tensorflow as tf
import sympy
import cirq
from tensorflow_quantum.core.ops import tfq_ps_util_ops
from tensorflow_quantum.python import util
def _complex_test_circuit():
t = sympy.Symbol('t')
r = sympy.Symbol('r')
qubits = cirq.GridQubit.rect(1, 6)
circuit_batch = [
cirq.Circuit(
cirq.Moment([cirq.H(q) for q in qubits]),
cirq.Moment([
cirq.X(qubits[4]),
cirq.PhasedXPowGate(phase_exponent=np.random.random() * t).on(
qubits[5]),
cirq.ISwapPowGate(exponent=np.random.random() * t).on(
qubits[0], qubits[1]),
cirq.FSimGate(theta=np.random.random() * t,
phi=np.random.random() * r).on(
qubits[2], qubits[3])
]), cirq.Moment([cirq.H(q) for q in qubits])),
cirq.Circuit(
cirq.FSimGate(theta=np.random.random() * t,
phi=np.random.random() * r).on(*qubits[:2]),
cirq.FSimGate(theta=np.random.random() * r,
phi=np.random.random() * t).on(qubits[1], qubits[0])),
cirq.Circuit(
cirq.Moment([
cirq.ISwapPowGate(exponent=np.random.random() *
t).on(*qubits[:2]),
cirq.PhasedXPowGate(phase_exponent=np.random.random() * r).on(
qubits[2]),
cirq.ISwapPowGate(exponent=np.random.random() *
r).on(*qubits[3:5])
]))
]
return circuit_batch
class PSDecomposeTest(tf.test.TestCase):
"""Tests on tfq_ps_decompose"""
def test_iswap_gate_test(self):
"""Test 1 ISwapPowGate decomposition."""
t = sympy.Symbol('t')
qubits = cirq.GridQubit.rect(1, 2)
circuit = cirq.Circuit(
cirq.ISwapPowGate(exponent=np.random.random() * t).on(*qubits))
inputs = util.convert_to_tensor([circuit])
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
rand_resolver = {'t': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit, rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_phased_x_pow_gate_test(self):
"""Test 1 PhasedXPowGate decomposition."""
t = sympy.Symbol('t')
r = sympy.Symbol('r')
q = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.PhasedXPowGate(phase_exponent=np.random.random() * r,
exponent=np.random.random() * t).on(q))
inputs = util.convert_to_tensor([circuit])
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit, rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_fsim_gate_test(self):
"""Test 1 FSimPowGate decomposition."""
t = sympy.Symbol('t')
r = sympy.Symbol('r')
qubits = cirq.GridQubit.rect(1, 2)
circuit = cirq.Circuit(
cirq.FSimGate(theta=np.random.random() * r,
phi=np.random.random() * t).on(*qubits))
inputs = util.convert_to_tensor([circuit])
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit, rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_decompose_with_complex_circuit(self):
"""Test decompose with complex circuit."""
names = ['CLAE', 'HRYV', 'IRKB', 'LKRV', 'PJOU', 'CJKX', 'NASW']
# Test circuit has a Moment with 1) FSimGate & PhasedXPowGate,
# 2) PhasedXPowGate & ISwapPowGate and 3) FSimGate & ISwapPowGate.
# Be careful, they are not decomposed if not parameterized.
circuit_batch = [
cirq.Circuit([
cirq.Moment([
cirq.FSimGate(
theta=0.10338130973488413 * sympy.Symbol('CLAE'),
phi=0.10338130973488413 * sympy.Symbol('IRKB')).on(
cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
cirq.PhasedXPowGate(phase_exponent=1.0,
exponent=0.86426029696045281 *
sympy.Symbol('HRYV')).on(
cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.Y.on(cirq.GridQubit(0, 3)),
cirq.Z.on(cirq.GridQubit(0, 0)),
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 1),
cirq.GridQubit(0, 2)),
]),
cirq.Moment([
(cirq.CNOT**(0.92874230274398684 *
sympy.Symbol('IRKB'))).on(
cirq.GridQubit(0, 1), cirq.GridQubit(0,
2)),
]),
cirq.Moment([
cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('PJOU'),
exponent=0.2081415255258906 *
sympy.Symbol('LKRV')).on(
cirq.GridQubit(0, 2)),
(cirq.ISWAP**(0.32860954996781722 *
sympy.Symbol('PJOU'))).on(
cirq.GridQubit(0, 1),
cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('CJKX')).on(
cirq.GridQubit(0, 1)),
cirq.ZZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 3)),
(cirq.X**(0.6826594585474709 * sympy.Symbol('HRYV'))).on(
cirq.GridQubit(0, 2)),
]),
cirq.Moment([
(cirq.ZZ**(0.18781276022427218 * sympy.Symbol('PJOU'))).on(
cirq.GridQubit(0, 0), cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.Y.on(cirq.GridQubit(0, 0)),
]),
cirq.Moment([
cirq.FSimGate(
theta=0.13793763138552417 * sympy.Symbol('CJKX'),
phi=0.13793763138552417 * sympy.Symbol('PJOU')).on(
cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
(cirq.ISWAP**(0.028165738453673095 *
sympy.Symbol('NASW'))).on(
cirq.GridQubit(0, 0),
cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.FSimGate(
theta=0.74356520426349459 * sympy.Symbol('CJKX'),
phi=0.74356520426349459 * sympy.Symbol('NASW')).on(
cirq.GridQubit(0, 3), cirq.GridQubit(0, 0)),
]),
cirq.Moment([
cirq.CNOT.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 2)),
cirq.SWAP.on(cirq.GridQubit(0, 3), cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.H.on(cirq.GridQubit(0, 3)),
cirq.H.on(cirq.GridQubit(0, 2)),
cirq.CNOT.on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 0)),
]),
cirq.Moment([
cirq.CNOT.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)),
cirq.YY.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.CZ.on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 0)),
cirq.CNOT.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)),
]),
cirq.Moment([
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 0),
cirq.GridQubit(0, 2)),
cirq.CNOT.on(cirq.GridQubit(0, 3), cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 0),
cirq.GridQubit(0, 3)),
cirq.SWAP.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 1)),
]),
cirq.Moment([
cirq.Y.on(cirq.GridQubit(0, 0)),
cirq.PhasedXPowGate(phase_exponent=1.0).on(
cirq.GridQubit(0, 2)),
cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 1),
cirq.GridQubit(0, 3)),
]),
])
]
# Decompose programs.
inputs = util.convert_to_tensor(circuit_batch)
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
self.assertEqual(len(decomposed_programs), len(circuit_batch))
# Original programs has parameterized ISP, PXP, FSIM, but this result
# has no such gates at all. All parameterized gates have at most two
# eigenvalues. There are still ISwap and PhasedX(1.0) because they are
# not parameterized, which doesn't affect ParameterShift differentiation
# at all.
for program in decomposed_programs:
for moment in program:
for gate_op in moment:
# Consider parameterized gates only
if cirq.is_parameterized(gate_op.gate):
# Check I. The gate should have _eigen_components.
self.assertTrue(
hasattr(gate_op.gate, '_eigen_components'))
# Check II. The gate should have two eigen values.
self.assertEqual(len(gate_op.gate._eigen_components()),
2, gate_op.gate)
# Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has
# 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components.
# Check if two programs are identical.
rand_resolver = {name: np.random.random() for name in names}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit_batch[0], rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
def test_moment_preservation(self):
"""Test Moment-structure preservation."""
t = sympy.Symbol('t')
r = sympy.Symbol('r')
qubits = cirq.LineQubit.range(6)
circuit_batch = [
cirq.Circuit(
cirq.Moment([cirq.H(q) for q in qubits]),
cirq.Moment([
cirq.X(qubits[4]),
cirq.PhasedXPowGate(phase_exponent=np.random.random() *
t).on(qubits[5]),
cirq.ISwapPowGate(exponent=np.random.random() * t).on(
qubits[0], qubits[1]),
cirq.FSimGate(theta=np.random.random() * t,
phi=np.random.random() * r).on(
qubits[2], qubits[3])
]), cirq.Moment([cirq.H(q) for q in qubits]))
]
inputs = util.convert_to_tensor(circuit_batch)
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
# Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has
# 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components.
# Check if two programs are identical.
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit_batch[0], rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(decomposed_programs[0],
rand_resolver)),
atol=1e-5)
# Check if the Moments are conserved.
max_decomposed_length = 3
n_non_decomposed_moments = 2
self.assertEqual(len(decomposed_programs[0]),
n_non_decomposed_moments + max_decomposed_length)
# Total length of Moments = 5
# The non-decomposed moments should be the same.
self.assertEqual(decomposed_programs[0][0], circuit_batch[0][0])
self.assertEqual(decomposed_programs[0][-1], circuit_batch[0][-1])
# Check paralellized decompose gates in Moment[1]~[3].
# The target ops are replaced by the first decomposition gates. It means
# the first Moment has exactly the same number of gate ops.
self.assertEqual(len(decomposed_programs[0][1]),
len(circuit_batch[0][1]))
# From the second Moments, the Moments only have decomposition gates.
# In this example, two ISwapPowGate & one PhasedXPowGate are located.
# Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates
# Moment[2] have 3 gate ops and Moment[3] have 2 gate ops.
self.assertEqual(len(decomposed_programs[0][2]), 3)
self.assertEqual(len(decomposed_programs[0][3]), 2)
def test_more_complex_moment_preservation(self):
"""Test Moment-structure preservation."""
circuit_batch = _complex_test_circuit()
inputs = util.convert_to_tensor(circuit_batch)
outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs)
decomposed_programs = util.from_tensor(outputs)
# Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has
# 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components.
# Check if two programs are identical.
rand_resolver = {'t': np.random.random(), 'r': np.random.random()}
for i in range(3):
self.assertAllClose(cirq.unitary(
cirq.resolve_parameters(circuit_batch[i], rand_resolver)),
cirq.unitary(
cirq.resolve_parameters(
decomposed_programs[i], rand_resolver)),
atol=1e-5)
# Check if the Moments are conserved.
# Circuit 1.
max_decomposed_length = 3
n_non_decomposed_moments = 2
self.assertEqual(len(decomposed_programs[0]),
n_non_decomposed_moments + max_decomposed_length)
# Total length of Moments = 5
# The non-decomposed moments should be the same.
self.assertEqual(decomposed_programs[0][0], circuit_batch[0][0])
self.assertEqual(decomposed_programs[0][-1], circuit_batch[0][-1])
# Check paralellized decompose gates in Moment[1]~[3].
# The target ops are replaced by the first decomposition gates. It means
# the first Moment has exactly the same number of gate ops.
self.assertEqual(len(decomposed_programs[0][1]),
len(circuit_batch[0][1]))
# From the second Moments, the Moments only have decomposition gates.
# In this example, two ISwapPowGate & one PhasedXPowGate are located.
# Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates
# Moment[2] have 3 gate ops and Moment[3] have 2 gate ops.
self.assertEqual(len(decomposed_programs[0][2]), 3)
self.assertEqual(len(decomposed_programs[0][3]), 2)
# Circuit 2. two FSimGates.
self.assertEqual(len(decomposed_programs[1]), 2 * max_decomposed_length)
# Circuit 3. one PXP between two ISwapPowGates.
self.assertEqual(len(decomposed_programs[2]), max_decomposed_length)
class PSSymbolReplaceTest(tf.test.TestCase):
"""Tests tfq_ps_symbol_replace."""
def test_simple_case(self):
"""Test trivial case."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
new = tf.convert_to_tensor(['new'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_00 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('new'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_01 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('new'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_02 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('new'),
)
self.assertEqual(correct_00, output[0][0][0])
self.assertEqual(correct_01, output[0][0][1])
self.assertEqual(correct_02, output[0][0][2])
def test_error(self):
"""Ensure that errors happen with bad inputs."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2))
inputs = util.convert_to_tensor([[circuit]])
symbols = tf.convert_to_tensor(['test'])
replacements = tf.convert_to_tensor(['nothing'])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
inputs = tf.convert_to_tensor(['junk'])
with self.assertRaisesRegex(Exception,
expected_regex='Unparseable proto:'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor([['test']])
replacements = tf.convert_to_tensor(['nothing'])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
symbols = tf.convert_to_tensor(['test'])
replacements = tf.convert_to_tensor([['nothing']])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
symbols = tf.convert_to_tensor(['test'])
replacements = tf.convert_to_tensor(['nothing', 'too long'])
with self.assertRaisesRegex(
Exception,
expected_regex=
'symbols.shape is not equal to replacement_symbols.shape'):
tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements)
def test_weight_coefficient(self):
"""Test that scalar multiples of trivial case work."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.4),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.4),
cirq.Z(bit)**(sympy.Symbol('alpha') * 4.4),
)
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
new = tf.convert_to_tensor(['new'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_00 = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('new') * 2.4),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.4),
cirq.Z(bit)**(sympy.Symbol('alpha') * 4.4),
)
correct_01 = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.4),
cirq.Y(bit)**(sympy.Symbol('new') * 3.4),
cirq.Z(bit)**(sympy.Symbol('alpha') * 4.4),
)
correct_02 = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.4),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.4),
cirq.Z(bit)**(sympy.Symbol('new') * 4.4),
)
for i, c in enumerate([correct_00, correct_01, correct_02]):
u1 = cirq.unitary(
cirq.resolve_parameters(c,
param_resolver={
'alpha': 1.23,
'new': 4.56
}))
u2 = cirq.unitary(
cirq.resolve_parameters(output[0][0][i],
param_resolver={
'alpha': 1.23,
'new': 4.56
}))
self.assertTrue(cirq.approx_eq(u1, u2, atol=1e-5))
def test_simple_pad(self):
"""Test simple padding."""
bit = cirq.LineQubit(1)
circuit = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
circuit2 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
)
circuit3 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
inputs = util.convert_to_tensor([circuit, circuit2, circuit3])
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
new = tf.convert_to_tensor(['new', 'old', 'nothing'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_00 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('new'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_01 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('new'),
cirq.Z(bit)**sympy.Symbol('alpha'),
)
correct_02 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('new'),
)
self.assertEqual(correct_00, output[0][0][0])
self.assertEqual(correct_01, output[0][0][1])
self.assertEqual(correct_02, output[0][0][2])
self.assertEqual(correct_00, output[2][0][0])
self.assertEqual(correct_01, output[2][0][1])
self.assertEqual(correct_02, output[2][0][2])
correct_10 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('old'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
)
correct_11 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('old'),
cirq.Z(bit)**sympy.Symbol('beta'),
)
correct_12 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('old'),
)
self.assertEqual(correct_10, output[1][1][0])
self.assertEqual(correct_11, output[1][1][1])
self.assertEqual(correct_12, output[1][1][2])
correct_20 = cirq.Circuit()
correct_21 = cirq.Circuit()
correct_22 = cirq.Circuit()
self.assertEqual(correct_20, output[2][2][0])
self.assertEqual(correct_21, output[2][2][1])
self.assertEqual(correct_22, output[2][2][2])
correct = cirq.Circuit()
for i in range(3):
for j in range(3):
for k in range(3):
if i != j and (not (i == 2 and j == 0)):
self.assertEqual(correct, output[i][j][k])
def test_complex_pad(self):
"""Test trickier padding."""
bit = cirq.GridQubit(0, 0)
bit2 = cirq.GridQubit(0, 1)
circuit = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
circuit2 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
circuit3 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
inputs = util.convert_to_tensor([circuit, circuit2, circuit3])
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
new = tf.convert_to_tensor(['new', 'old', 'nothing'])
res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new)
output = util.from_tensor(res)
correct_000 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('new'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_001 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('new'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_002 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('new'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_003 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('alpha'),
cirq.Y(bit)**sympy.Symbol('alpha'),
cirq.Z(bit)**sympy.Symbol('alpha'),
cirq.XX(bit, bit2)**sympy.Symbol('new'))
self.assertEqual(correct_000, output[0][0][0])
self.assertEqual(correct_001, output[0][0][1])
self.assertEqual(correct_002, output[0][0][2])
self.assertEqual(correct_003, output[0][0][3])
self.assertEqual(correct_000, output[2][0][0])
self.assertEqual(correct_001, output[2][0][1])
self.assertEqual(correct_002, output[2][0][2])
self.assertEqual(correct_003, output[2][0][3])
correct_110 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('old'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_111 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('old'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_112 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('old'),
cirq.XX(bit, bit2)**sympy.Symbol('alpha'))
correct_113 = cirq.Circuit()
self.assertEqual(correct_110, output[1][1][0])
self.assertEqual(correct_111, output[1][1][1])
self.assertEqual(correct_112, output[1][1][2])
self.assertEqual(correct_113, output[1][1][3])
correct_100 = cirq.Circuit(
cirq.X(bit)**sympy.Symbol('beta'),
cirq.Y(bit)**sympy.Symbol('beta'),
cirq.Z(bit)**sympy.Symbol('beta'),
cirq.XX(bit, bit2)**sympy.Symbol('new'))
correct_101 = cirq.Circuit()
correct_102 = cirq.Circuit()
correct_103 = cirq.Circuit()
self.assertEqual(correct_100, output[1][0][0])
self.assertEqual(correct_101, output[1][0][1])
self.assertEqual(correct_102, output[1][0][2])
self.assertEqual(correct_103, output[1][0][3])
correct_220 = cirq.Circuit()
correct_221 = cirq.Circuit()
correct_222 = cirq.Circuit()
correct_223 = cirq.Circuit()
self.assertEqual(correct_220, output[2][2][0])
self.assertEqual(correct_221, output[2][2][1])
self.assertEqual(correct_222, output[2][2][2])
self.assertEqual(correct_223, output[2][2][3])
correct = cirq.Circuit()
for i in range(3):
for j in range(3):
for k in range(3):
if i != j and (not (i == 2 and j == 0)) \
and (not (i == 1 and j == 0)):
self.assertEqual(correct, output[i][j][k])
class PSWeightsFromSymbolTest(tf.test.TestCase):
"""Tests tfq_ps_weights_from_symbols."""
def test_simple(self):
"""Ensure that weight extraction works."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[2.0]]]))
def test_empty(self):
"""Test empty circuit. and symbol free circuit. does nothing."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit))
circuit2 = cirq.Circuit()
inputs = util.convert_to_tensor([circuit, circuit2])
symbols = tf.convert_to_tensor(['alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[]], [[]]]))
def test_rotation_gates(self):
"""Test that rotation gates work."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.rx(sympy.Symbol('alpha') * 5.0)(bit))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[5.0 / np.pi]]]))
def test_error(self):
"""Ensure if a symbol can't be found the op errors."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('delta') * 2))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha', 'delta'])
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
symbols = tf.convert_to_tensor(['alpha'])
with self.assertRaisesRegex(Exception, expected_regex='sympy.Symbol'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
symbols = tf.convert_to_tensor([['delta']])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
inputs = tf.convert_to_tensor(['junk'])
symbols = tf.convert_to_tensor(['delta'])
with self.assertRaisesRegex(Exception,
expected_regex='Unparseable proto:'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
inputs = util.convert_to_tensor([[circuit]])
with self.assertRaisesRegex(Exception,
expected_regex='rank 1. Got rank 2.'):
tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
def test_many_values(self):
"""Ensure that padding with few symbols and many values works."""
bit = cirq.LineQubit(1)
circuits = [
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0),
cirq.Z(bit)**(sympy.Symbol('alpha')),
cirq.X(bit)**(sympy.Symbol('alpha') * 4.0)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 9.0)),
cirq.Circuit(cirq.X(bit)**sympy.Symbol('beta'))
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0, 3.0, 1.0, 4.0], [0.0, 0.0, 0.0, 0.0]],
[[9.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]]))
def test_many_symbols(self):
"""Ensure that padding with few values and many symbols works."""
bit = cirq.GridQubit(0, 0)
circuits = [
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2.0)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('beta') * 6)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 5.0)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('gamma') * 8)),
cirq.Circuit(cirq.X(bit)**(sympy.Symbol('delta') * 9))
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma', 'delta'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0], [0.0], [0.0], [0.0]], [[0.0], [6.0], [0.0],
[0.0]],
[[5.0], [0.0], [0.0], [0.0]], [[0.0], [0.0], [8.0],
[0.0]],
[[0.0], [0.0], [0.0], [9.0]]]))
def test_out_of_order(self):
"""Test that discovery order of symbols in circuits doesn't matter."""
bit = cirq.GridQubit(0, 0)
circuit = cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2),
cirq.Y(bit)**(sympy.Symbol('beta') * 3))
inputs = util.convert_to_tensor([circuit])
symbols = tf.convert_to_tensor(['alpha', 'beta'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[2.0], [3.0]]]))
symbols = tf.convert_to_tensor(['beta', 'alpha'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(res, np.array([[[3.0], [2.0]]]))
def test_padding(self):
"""Ensure that the padding is correct in a complex example."""
bit = cirq.GridQubit(0, 0)
circuits = [
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0),
cirq.Z(bit)**(sympy.Symbol('beta') * 4.0),
),
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**(sympy.Symbol('beta') * 4.0),
),
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**(sympy.Symbol('gamma') * 4.0),
)
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0, 3.0], [4.0, 0.0], [0.0, 0.0]],
[[2.0, 0.0], [3.0, 4.0], [0.0, 0.0]],
[[2.0, 0.0], [3.0, 0.0], [4.0, 0.0]]]))
def test_padding_with_non_parameterized_gates(self):
"""Ensure that the padding is correct in a complex example."""
bit = cirq.GridQubit(0, 0)
circuits = [
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**3.0,
cirq.Z(bit)**(sympy.Symbol('beta') * 4.0),
),
cirq.Circuit(
cirq.X(bit)**(sympy.Symbol('alpha') * 2.0),
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**4.0,
),
cirq.Circuit(
cirq.X(bit)**2.0,
cirq.Y(bit)**(sympy.Symbol('beta') * 3.0),
cirq.Z(bit)**(sympy.Symbol('gamma') * 4.0),
)
]
inputs = util.convert_to_tensor(circuits)
symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
self.assertAllClose(
res,
np.array([[[2.0], [4.0], [0.0]], [[2.0], [3.0], [0.0]],
[[0.0], [3.0], [4.0]]]))
def test_ignorance(self):
"""Test ignorance of ISP, PXP, FSIM gates."""
circuit_batch = _complex_test_circuit()
inputs = util.convert_to_tensor(circuit_batch)
symbols = tf.convert_to_tensor(['r', 't'])
res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols)
# Because there are no weights to be gathered, the last dimension = 0
self.assertAllClose(tf.shape(res), [len(circuit_batch), 2, 0])
if __name__ == "__main__":
tf.test.main()
|
holoviews/tests/plotting/plotly/test_tiles.py | geronimos/holoviews | 864 | 12678197 | from holoviews.element import RGB, Tiles, Points, Bounds
from holoviews.element.tiles import StamenTerrain, _ATTRIBUTIONS
from .test_plot import TestPlotlyPlot, plotly_renderer
import numpy as np
class TestMapboxTilesPlot(TestPlotlyPlot):
def setUp(self):
super().setUp()
# Precompute coordinates
self.xs = [3000000, 2000000, 1000000]
self.ys = [-3000000, -2000000, -1000000]
self.x_range = (-5000000, 4000000)
self.x_center = sum(self.x_range) / 2.0
self.y_range = (-3000000, 2000000)
self.y_center = sum(self.y_range) / 2.0
self.lon_range, self.lat_range = Tiles.easting_northing_to_lon_lat(self.x_range, self.y_range)
self.lon_centers, self.lat_centers = Tiles.easting_northing_to_lon_lat(
[self.x_center], [self.y_center]
)
self.lon_center, self.lat_center = self.lon_centers[0], self.lat_centers[0]
self.lons, self.lats = Tiles.easting_northing_to_lon_lat(self.xs, self.ys)
def test_mapbox_tiles_defaults(self):
tiles = Tiles("").redim.range(
x=self.x_range, y=self.y_range
)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check dummy trace
self.assertEqual(len(fig_dict["data"]), 1)
dummy_trace = fig_dict["data"][0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertEqual(dummy_trace["showlegend"], False)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "white-bg")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check that xaxis and yaxis entries are not created
self.assertNotIn("xaxis", fig_dict["layout"])
self.assertNotIn("yaxis", fig_dict["layout"])
# Check no layers are introduced when an empty tile server string is
# passed
layers = fig_dict["layout"]["mapbox"].get("layers", [])
self.assertEqual(len(layers), 0)
def test_styled_mapbox_tiles(self):
tiles = Tiles().opts(mapboxstyle="dark", accesstoken="token-str").redim.range(
x=self.x_range, y=self.y_range
)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "dark")
self.assertEqual(subplot["accesstoken"], "token-str")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
def test_raster_layer(self):
tiles = StamenTerrain().redim.range(
x=self.x_range, y=self.y_range
).opts(alpha=0.7, min_zoom=3, max_zoom=7)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check dummy trace
self.assertEqual(len(fig_dict["data"]), 1)
dummy_trace = fig_dict["data"][0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertEqual(dummy_trace["showlegend"], False)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "white-bg")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check for raster layer
layers = fig_dict["layout"]["mapbox"].get("layers", [])
self.assertEqual(len(layers), 1)
layer = layers[0]
self.assertEqual(layer["source"][0].lower(), tiles.data.lower())
self.assertEqual(layer["opacity"], 0.7)
self.assertEqual(layer["sourcetype"], "raster")
self.assertEqual(layer["minzoom"], 3)
self.assertEqual(layer["maxzoom"], 7)
self.assertEqual(layer["sourceattribution"], _ATTRIBUTIONS[('stamen', 'net/t')])
def test_overlay(self):
# Base layer is mapbox vector layer
tiles = Tiles("").opts(mapboxstyle="dark", accesstoken="token-str")
# Raster tile layer
stamen_raster = StamenTerrain().opts(alpha=0.7)
# RGB layer
rgb_data = np.random.rand(10, 10, 3)
rgb = RGB(
rgb_data,
bounds=(self.x_range[0], self.y_range[0], self.x_range[1], self.y_range[1])
).opts(
opacity=0.5
)
# Points layer
points = Points([(0, 0), (self.x_range[1], self.y_range[1])]).opts(
show_legend=True
)
# Bounds
bounds = Bounds((self.x_range[0], self.y_range[0], 0, 0))
# Overlay
overlay = (tiles * stamen_raster * rgb * points * bounds).redim.range(
x=self.x_range, y=self.y_range
)
# Render to plotly figure dictionary
fig_dict = plotly_renderer.get_plot_state(overlay)
# Check number of traces and layers
traces = fig_dict["data"]
subplot = fig_dict["layout"]["mapbox"]
layers = subplot["layers"]
self.assertEqual(len(traces), 5)
self.assertEqual(len(layers), 2)
# Check vector layer
dummy_trace = traces[0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertFalse(dummy_trace["showlegend"])
self.assertEqual(subplot["style"], "dark")
self.assertEqual(subplot["accesstoken"], "token-str")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check raster layer
dummy_trace = traces[1]
raster_layer = layers[0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertFalse(dummy_trace["showlegend"])
# Check raster_layer
self.assertEqual(raster_layer["below"], "traces")
self.assertEqual(raster_layer["opacity"], 0.7)
self.assertEqual(raster_layer["sourcetype"], "raster")
self.assertEqual(raster_layer["source"][0].lower(), stamen_raster.data.lower())
# Check RGB layer
dummy_trace = traces[2]
rgb_layer = layers[1]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [None])
self.assertEqual(dummy_trace["lat"], [None])
self.assertFalse(dummy_trace["showlegend"])
# Check rgb_layer
self.assertEqual(rgb_layer["below"], "traces")
self.assertEqual(rgb_layer["opacity"], 0.5)
self.assertEqual(rgb_layer["sourcetype"], "image")
self.assertTrue(rgb_layer["source"].startswith("data:image/png;base64,iVBOR"))
self.assertEqual(rgb_layer["coordinates"], [
[self.lon_range[0], self.lat_range[1]],
[self.lon_range[1], self.lat_range[1]],
[self.lon_range[1], self.lat_range[0]],
[self.lon_range[0], self.lat_range[0]]
])
# Check Points layer
points_trace = traces[3]
self.assertEqual(points_trace["type"], "scattermapbox")
self.assertEqual(points_trace["lon"], np.array([0, self.lon_range[1]]))
self.assertEqual(points_trace["lat"], np.array([0, self.lat_range[1]]))
self.assertEqual(points_trace["mode"], "markers")
self.assertTrue(points_trace.get("showlegend", True))
# Check Bounds layer
bounds_trace = traces[4]
self.assertEqual(bounds_trace["type"], "scattermapbox")
self.assertEqual(bounds_trace["lon"], np.array([
self.lon_range[0], self.lon_range[0], 0, 0, self.lon_range[0]
]))
self.assertEqual(bounds_trace["lat"], np.array([
self.lat_range[0], 0, 0, self.lat_range[0], self.lat_range[0]
]))
self.assertEqual(bounds_trace["mode"], "lines")
self.assertTrue(points_trace["showlegend"], False)
# No xaxis/yaxis
self.assertNotIn("xaxis", fig_dict["layout"])
self.assertNotIn("yaxis", fig_dict["layout"])
|
rllib/policy/dynamic_tf_policy.py | linyiyue/ray | 21,382 | 12678218 | from collections import namedtuple, OrderedDict
import gym
import logging
import re
from typing import Callable, Dict, List, Optional, Tuple, Type
from ray.util.debug import log_once
from ray.rllib.models.tf.tf_action_dist import TFActionDistribution
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.tf_policy import TFPolicy
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.utils import force_list
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.debug import summarize
from ray.rllib.utils.deprecation import deprecation_warning, DEPRECATED_VALUE
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.tf_ops import get_placeholder
from ray.rllib.utils.typing import LocalOptimizer, ModelGradients, \
TensorType, TrainerConfigDict
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
# Variable scope in which created variables will be placed under.
TOWER_SCOPE_NAME = "tower"
@DeveloperAPI
class DynamicTFPolicy(TFPolicy):
"""A TFPolicy that auto-defines placeholders dynamically at runtime.
Do not sub-class this class directly (neither should you sub-class
TFPolicy), but rather use rllib.policy.tf_policy_template.build_tf_policy
to generate your custom tf (graph-mode or eager) Policy classes.
Initialization of this class occurs in two phases.
* Phase 1: the model is created and model variables are initialized.
* Phase 2: a fake batch of data is created, sent to the trajectory
postprocessor, and then used to create placeholders for the loss
function. The loss and stats functions are initialized with these
placeholders.
Initialization defines the static graph.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy
model (ModelV2): TF model instance
dist_class (type): TF action distribution class
"""
@DeveloperAPI
def __init__(
self,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
loss_fn: Callable[[
Policy, ModelV2, Type[TFActionDistribution], SampleBatch
], TensorType],
*,
stats_fn: Optional[Callable[[Policy, SampleBatch], Dict[
str, TensorType]]] = None,
grad_stats_fn: Optional[Callable[[
Policy, SampleBatch, ModelGradients
], Dict[str, TensorType]]] = None,
before_loss_init: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], None]] = None,
make_model: Optional[Callable[[
Policy, gym.spaces.Space, gym.spaces.Space, TrainerConfigDict
], ModelV2]] = None,
action_sampler_fn: Optional[Callable[[
TensorType, List[TensorType]
], Tuple[TensorType, TensorType]]] = None,
action_distribution_fn: Optional[Callable[[
Policy, ModelV2, TensorType, TensorType, TensorType
], Tuple[TensorType, type, List[TensorType]]]] = None,
existing_inputs: Optional[Dict[str, "tf1.placeholder"]] = None,
existing_model: Optional[ModelV2] = None,
get_batch_divisibility_req: Optional[Callable[[Policy],
int]] = None,
obs_include_prev_action_reward=DEPRECATED_VALUE):
"""Initializes a DynamicTFPolicy instance.
Args:
observation_space (gym.spaces.Space): Observation space of the
policy.
action_space (gym.spaces.Space): Action space of the policy.
config (TrainerConfigDict): Policy-specific configuration data.
loss_fn (Callable[[Policy, ModelV2, Type[TFActionDistribution],
SampleBatch], TensorType]): Function that returns a loss tensor
for the policy graph.
stats_fn (Optional[Callable[[Policy, SampleBatch],
Dict[str, TensorType]]]): Optional function that returns a dict
of TF fetches given the policy and batch input tensors.
grad_stats_fn (Optional[Callable[[Policy, SampleBatch,
ModelGradients], Dict[str, TensorType]]]):
Optional function that returns a dict of TF fetches given the
policy, sample batch, and loss gradient tensors.
before_loss_init (Optional[Callable[
[Policy, gym.spaces.Space, gym.spaces.Space,
TrainerConfigDict], None]]): Optional function to run prior to
loss init that takes the same arguments as __init__.
make_model (Optional[Callable[[Policy, gym.spaces.Space,
gym.spaces.Space, TrainerConfigDict], ModelV2]]): Optional
function that returns a ModelV2 object given
policy, obs_space, action_space, and policy config.
All policy variables should be created in this function. If not
specified, a default model will be created.
action_sampler_fn (Optional[Callable[[Policy, ModelV2, Dict[
str, TensorType], TensorType, TensorType], Tuple[TensorType,
TensorType]]]): A callable returning a sampled action and its
log-likelihood given Policy, ModelV2, input_dict, explore,
timestep, and is_training.
action_distribution_fn (Optional[Callable[[Policy, ModelV2,
Dict[str, TensorType], TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]]]]): A callable
returning distribution inputs (parameters), a dist-class to
generate an action distribution object from, and
internal-state outputs (or an empty list if not applicable).
Note: No Exploration hooks have to be called from within
`action_distribution_fn`. It's should only perform a simple
forward pass through some model.
If None, pass inputs through `self.model()` to get distribution
inputs.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
existing_inputs (Optional[Dict[str, tf1.placeholder]]): When
copying a policy, this specifies an existing dict of
placeholders to use instead of defining new ones.
existing_model (Optional[ModelV2]): When copying a policy, this
specifies an existing model to clone and share weights with.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]):
Optional callable that returns the divisibility requirement for
sample batches. If None, will assume a value of 1.
"""
if obs_include_prev_action_reward != DEPRECATED_VALUE:
deprecation_warning(
old="obs_include_prev_action_reward", error=False)
self.observation_space = obs_space
self.action_space = action_space
self.config = config
self.framework = "tf"
self._loss_fn = loss_fn
self._stats_fn = stats_fn
self._grad_stats_fn = grad_stats_fn
self._seq_lens = None
self._is_tower = existing_inputs is not None
dist_class = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given")
else:
dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
# Setup self.model.
if existing_model:
if isinstance(existing_model, list):
self.model = existing_model[0]
# TODO: (sven) hack, but works for `target_[q_]?model`.
for i in range(1, len(existing_model)):
setattr(self, existing_model[i][0], existing_model[i][1])
elif make_model:
self.model = make_model(self, obs_space, action_space, config)
else:
self.model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=logit_dim,
model_config=self.config["model"],
framework="tf")
# Auto-update model's inference view requirements, if recurrent.
self._update_model_view_requirements_from_init_state()
# Input placeholders already given -> Use these.
if existing_inputs:
self._state_inputs = [
v for k, v in existing_inputs.items()
if k.startswith("state_in_")
]
# Placeholder for RNN time-chunk valid lengths.
if self._state_inputs:
self._seq_lens = existing_inputs[SampleBatch.SEQ_LENS]
# Create new input placeholders.
else:
self._state_inputs = [
get_placeholder(
space=vr.space,
time_axis=not isinstance(vr.shift, int),
) for k, vr in self.model.view_requirements.items()
if k.startswith("state_in_")
]
# Placeholder for RNN time-chunk valid lengths.
if self._state_inputs:
self._seq_lens = tf1.placeholder(
dtype=tf.int32, shape=[None], name="seq_lens")
# Use default settings.
# Add NEXT_OBS, STATE_IN_0.., and others.
self.view_requirements = self._get_default_view_requirements()
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.view_requirements)
# Disable env-info placeholder.
if SampleBatch.INFOS in self.view_requirements:
self.view_requirements[SampleBatch.INFOS].used_for_training = False
# Setup standard placeholders.
if self._is_tower:
timestep = existing_inputs["timestep"]
explore = False
self._input_dict, self._dummy_batch = \
self._get_input_dict_and_dummy_batch(
self.view_requirements, existing_inputs)
else:
action_ph = ModelCatalog.get_action_placeholder(action_space)
prev_action_ph = {}
if SampleBatch.PREV_ACTIONS not in self.view_requirements:
prev_action_ph = {
SampleBatch.PREV_ACTIONS: ModelCatalog.
get_action_placeholder(action_space, "prev_action")
}
self._input_dict, self._dummy_batch = \
self._get_input_dict_and_dummy_batch(
self.view_requirements,
dict({SampleBatch.ACTIONS: action_ph},
**prev_action_ph))
# Placeholder for (sampling steps) timestep (int).
timestep = tf1.placeholder_with_default(
tf.zeros((), dtype=tf.int64), (), name="timestep")
# Placeholder for `is_exploring` flag.
explore = tf1.placeholder_with_default(
True, (), name="is_exploring")
# Placeholder for `is_training` flag.
self._input_dict.is_training = self._get_is_training_placeholder()
# Multi-GPU towers do not need any action computing/exploration
# graphs.
sampled_action = None
sampled_action_logp = None
dist_inputs = None
self._state_out = None
if not self._is_tower:
# Create the Exploration object to use for this Policy.
self.exploration = self._create_exploration()
# Fully customized action generation (e.g., custom policy).
if action_sampler_fn:
sampled_action, sampled_action_logp = action_sampler_fn(
self,
self.model,
obs_batch=self._input_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=self._input_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=self._input_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=self._input_dict.is_training)
# Distribution generation is customized, e.g., DQN, DDPG.
else:
if action_distribution_fn:
# Try new action_distribution_fn signature, supporting
# state_batches and seq_lens.
in_dict = self._input_dict
try:
dist_inputs, dist_class, self._state_out = \
action_distribution_fn(
self,
self.model,
input_dict=in_dict,
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
explore=explore,
timestep=timestep,
is_training=in_dict.is_training)
# Trying the old way (to stay backward compatible).
# TODO: Remove in future.
except TypeError as e:
if "positional argument" in e.args[0] or \
"unexpected keyword argument" in e.args[0]:
dist_inputs, dist_class, self._state_out = \
action_distribution_fn(
self, self.model,
obs_batch=in_dict[SampleBatch.CUR_OBS],
state_batches=self._state_inputs,
seq_lens=self._seq_lens,
prev_action_batch=in_dict.get(
SampleBatch.PREV_ACTIONS),
prev_reward_batch=in_dict.get(
SampleBatch.PREV_REWARDS),
explore=explore,
is_training=in_dict.is_training)
else:
raise e
# Default distribution generation behavior:
# Pass through model. E.g., PG, PPO.
else:
if isinstance(self.model, tf.keras.Model):
dist_inputs, self._state_out, \
self._extra_action_fetches = \
self.model(self._input_dict)
else:
dist_inputs, self._state_out = self.model(
self._input_dict, self._state_inputs,
self._seq_lens)
action_dist = dist_class(dist_inputs, self.model)
# Using exploration to get final action (e.g. via sampling).
sampled_action, sampled_action_logp = \
self.exploration.get_exploration_action(
action_distribution=action_dist,
timestep=timestep,
explore=explore)
# Phase 1 init.
sess = tf1.get_default_session() or tf1.Session(
config=tf1.ConfigProto(**self.config["tf_session_args"]))
batch_divisibility_req = get_batch_divisibility_req(self) if \
callable(get_batch_divisibility_req) else \
(get_batch_divisibility_req or 1)
super().__init__(
observation_space=obs_space,
action_space=action_space,
config=config,
sess=sess,
obs_input=self._input_dict[SampleBatch.OBS],
action_input=self._input_dict[SampleBatch.ACTIONS],
sampled_action=sampled_action,
sampled_action_logp=sampled_action_logp,
dist_inputs=dist_inputs,
dist_class=dist_class,
loss=None, # dynamically initialized on run
loss_inputs=[],
model=self.model,
state_inputs=self._state_inputs,
state_outputs=self._state_out,
prev_action_input=self._input_dict.get(SampleBatch.PREV_ACTIONS),
prev_reward_input=self._input_dict.get(SampleBatch.PREV_REWARDS),
seq_lens=self._seq_lens,
max_seq_len=config["model"]["max_seq_len"],
batch_divisibility_req=batch_divisibility_req,
explore=explore,
timestep=timestep)
# Phase 2 init.
if before_loss_init is not None:
before_loss_init(self, obs_space, action_space, config)
# Loss initialization and model/postprocessing test calls.
if not self._is_tower:
self._initialize_loss_from_dummy_batch(
auto_remove_unneeded_view_reqs=True)
# Create MultiGPUTowerStacks, if we have at least one actual
# GPU or >1 CPUs (fake GPUs).
if len(self.devices) > 1 or any("gpu" in d for d in self.devices):
# Per-GPU graph copies created here must share vars with the
# policy. Therefore, `reuse` is set to tf1.AUTO_REUSE because
# Adam nodes are created after all of the device copies are
# created.
with tf1.variable_scope("", reuse=tf1.AUTO_REUSE):
self.multi_gpu_tower_stacks = [
TFMultiGPUTowerStack(policy=self) for i in range(
self.config.get("num_multi_gpu_tower_stacks", 1))
]
# Initialize again after loss and tower init.
self.get_session().run(tf1.global_variables_initializer())
@override(TFPolicy)
@DeveloperAPI
def copy(self,
existing_inputs: List[Tuple[str, "tf1.placeholder"]]) -> TFPolicy:
"""Creates a copy of self using existing input placeholders."""
# Note that there might be RNN state inputs at the end of the list
if len(self._loss_input_dict) != len(existing_inputs):
raise ValueError("Tensor list mismatch", self._loss_input_dict,
self._state_inputs, existing_inputs)
for i, (k, v) in enumerate(self._loss_input_dict_no_rnn.items()):
if v.shape.as_list() != existing_inputs[i].shape.as_list():
raise ValueError("Tensor shape mismatch", i, k, v.shape,
existing_inputs[i].shape)
# By convention, the loss inputs are followed by state inputs and then
# the seq len tensor.
rnn_inputs = []
for i in range(len(self._state_inputs)):
rnn_inputs.append(
("state_in_{}".format(i),
existing_inputs[len(self._loss_input_dict_no_rnn) + i]))
if rnn_inputs:
rnn_inputs.append((SampleBatch.SEQ_LENS, existing_inputs[-1]))
input_dict = OrderedDict(
[("is_exploring", self._is_exploring), ("timestep",
self._timestep)] +
[(k, existing_inputs[i])
for i, k in enumerate(self._loss_input_dict_no_rnn.keys())] +
rnn_inputs)
instance = self.__class__(
self.observation_space,
self.action_space,
self.config,
existing_inputs=input_dict,
existing_model=[
self.model,
# Deprecated: Target models should all reside under
# `policy.target_model` now.
("target_q_model", getattr(self, "target_q_model", None)),
("target_model", getattr(self, "target_model", None)),
])
instance._loss_input_dict = input_dict
losses = instance._do_loss_init(SampleBatch(input_dict))
loss_inputs = [
(k, existing_inputs[i])
for i, k in enumerate(self._loss_input_dict_no_rnn.keys())
]
TFPolicy._initialize_loss(instance, losses, loss_inputs)
if instance._grad_stats_fn:
instance._stats_fetches.update(
instance._grad_stats_fn(instance, input_dict, instance._grads))
return instance
@override(Policy)
@DeveloperAPI
def get_initial_state(self) -> List[TensorType]:
if self.model:
return self.model.get_initial_state()
else:
return []
@override(Policy)
@DeveloperAPI
def load_batch_into_buffer(
self,
batch: SampleBatch,
buffer_index: int = 0,
) -> int:
# Set the is_training flag of the batch.
batch.is_training = True
# Shortcut for 1 CPU only: Store batch in
# `self._loaded_single_cpu_batch`.
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
self._loaded_single_cpu_batch = batch
return len(batch)
input_dict = self._get_loss_inputs_dict(batch, shuffle=False)
data_keys = list(self._loss_input_dict_no_rnn.values())
if self._state_inputs:
state_keys = self._state_inputs + [self._seq_lens]
else:
state_keys = []
inputs = [input_dict[k] for k in data_keys]
state_inputs = [input_dict[k] for k in state_keys]
return self.multi_gpu_tower_stacks[buffer_index].load_data(
sess=self.get_session(),
inputs=inputs,
state_inputs=state_inputs,
)
@override(Policy)
@DeveloperAPI
def get_num_samples_loaded_into_buffer(self, buffer_index: int = 0) -> int:
# Shortcut for 1 CPU only: Batch should already be stored in
# `self._loaded_single_cpu_batch`.
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
return len(self._loaded_single_cpu_batch) if \
self._loaded_single_cpu_batch is not None else 0
return self.multi_gpu_tower_stacks[buffer_index].num_tuples_loaded
@override(Policy)
@DeveloperAPI
def learn_on_loaded_batch(self, offset: int = 0, buffer_index: int = 0):
# Shortcut for 1 CPU only: Batch should already be stored in
# `self._loaded_single_cpu_batch`.
if len(self.devices) == 1 and self.devices[0] == "/cpu:0":
assert buffer_index == 0
if self._loaded_single_cpu_batch is None:
raise ValueError(
"Must call Policy.load_batch_into_buffer() before "
"Policy.learn_on_loaded_batch()!")
# Get the correct slice of the already loaded batch to use,
# based on offset and batch size.
batch_size = self.config.get("sgd_minibatch_size",
self.config["train_batch_size"])
if batch_size >= len(self._loaded_single_cpu_batch):
sliced_batch = self._loaded_single_cpu_batch
else:
sliced_batch = self._loaded_single_cpu_batch.slice(
start=offset, end=offset + batch_size)
return self.learn_on_batch(sliced_batch)
return self.multi_gpu_tower_stacks[buffer_index].optimize(
self.get_session(), offset)
def _get_input_dict_and_dummy_batch(self, view_requirements,
existing_inputs):
"""Creates input_dict and dummy_batch for loss initialization.
Used for managing the Policy's input placeholders and for loss
initialization.
Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays.
Args:
view_requirements (ViewReqs): The view requirements dict.
existing_inputs (Dict[str, tf.placeholder]): A dict of already
existing placeholders.
Returns:
Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The
input_dict/dummy_batch tuple.
"""
input_dict = {}
for view_col, view_req in view_requirements.items():
# Point state_in to the already existing self._state_inputs.
mo = re.match("state_in_(\d+)", view_col)
if mo is not None:
input_dict[view_col] = self._state_inputs[int(mo.group(1))]
# State-outs (no placeholders needed).
elif view_col.startswith("state_out_"):
continue
# Skip action dist inputs placeholder (do later).
elif view_col == SampleBatch.ACTION_DIST_INPUTS:
continue
# This is a tower, input placeholders already exist.
elif view_col in existing_inputs:
input_dict[view_col] = existing_inputs[view_col]
# All others.
else:
time_axis = not isinstance(view_req.shift, int)
if view_req.used_for_training:
# Create a +time-axis placeholder if the shift is not an
# int (range or list of ints).
flatten = view_col not in [
SampleBatch.OBS, SampleBatch.NEXT_OBS] or \
not self.config["_disable_preprocessor_api"]
input_dict[view_col] = get_placeholder(
space=view_req.space,
name=view_col,
time_axis=time_axis,
flatten=flatten,
)
dummy_batch = self._get_dummy_batch_from_view_requirements(
batch_size=32)
return SampleBatch(input_dict, seq_lens=self._seq_lens), dummy_batch
@override(Policy)
def _initialize_loss_from_dummy_batch(
self, auto_remove_unneeded_view_reqs: bool = True,
stats_fn=None) -> None:
# Create the optimizer/exploration optimizer here. Some initialization
# steps (e.g. exploration postprocessing) may need this.
if not self._optimizers:
self._optimizers = force_list(self.optimizer())
# Backward compatibility.
self._optimizer = self._optimizers[0]
# Test calls depend on variable init, so initialize model first.
self.get_session().run(tf1.global_variables_initializer())
logger.info("Testing `compute_actions` w/ dummy batch.")
actions, state_outs, extra_fetches = \
self.compute_actions_from_input_dict(
self._dummy_batch, explore=False, timestep=0)
for key, value in extra_fetches.items():
self._dummy_batch[key] = value
self._input_dict[key] = get_placeholder(value=value, name=key)
if key not in self.view_requirements:
logger.info("Adding extra-action-fetch `{}` to "
"view-reqs.".format(key))
self.view_requirements[key] = ViewRequirement(
space=gym.spaces.Box(
-1.0, 1.0, shape=value.shape[1:], dtype=value.dtype),
used_for_compute_actions=False,
)
dummy_batch = self._dummy_batch
logger.info("Testing `postprocess_trajectory` w/ dummy batch.")
self.exploration.postprocess_trajectory(self, dummy_batch,
self.get_session())
_ = self.postprocess_trajectory(dummy_batch)
# Add new columns automatically to (loss) input_dict.
for key in dummy_batch.added_keys:
if key not in self._input_dict:
self._input_dict[key] = get_placeholder(
value=dummy_batch[key], name=key)
if key not in self.view_requirements:
self.view_requirements[key] = ViewRequirement(
space=gym.spaces.Box(
-1.0,
1.0,
shape=dummy_batch[key].shape[1:],
dtype=dummy_batch[key].dtype),
used_for_compute_actions=False,
)
train_batch = SampleBatch(
dict(self._input_dict, **self._loss_input_dict))
if self._state_inputs:
train_batch[SampleBatch.SEQ_LENS] = self._seq_lens
self._loss_input_dict.update({
SampleBatch.SEQ_LENS: train_batch[SampleBatch.SEQ_LENS]
})
self._loss_input_dict.update({k: v for k, v in train_batch.items()})
if log_once("loss_init"):
logger.debug(
"Initializing loss function with dummy input:\n\n{}\n".format(
summarize(train_batch)))
losses = self._do_loss_init(train_batch)
all_accessed_keys = \
train_batch.accessed_keys | dummy_batch.accessed_keys | \
dummy_batch.added_keys | set(
self.model.view_requirements.keys())
TFPolicy._initialize_loss(self, losses, [
(k, v) for k, v in train_batch.items() if k in all_accessed_keys
] + ([(SampleBatch.SEQ_LENS, train_batch[SampleBatch.SEQ_LENS])]
if SampleBatch.SEQ_LENS in train_batch else []))
if "is_training" in self._loss_input_dict:
del self._loss_input_dict["is_training"]
# Call the grads stats fn.
# TODO: (sven) rename to simply stats_fn to match eager and torch.
if self._grad_stats_fn:
self._stats_fetches.update(
self._grad_stats_fn(self, train_batch, self._grads))
# Add new columns automatically to view-reqs.
if auto_remove_unneeded_view_reqs:
# Add those needed for postprocessing and training.
all_accessed_keys = train_batch.accessed_keys | \
dummy_batch.accessed_keys
# Tag those only needed for post-processing (with some exceptions).
for key in dummy_batch.accessed_keys:
if key not in train_batch.accessed_keys and \
key not in self.model.view_requirements and \
key not in [
SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX,
SampleBatch.UNROLL_ID, SampleBatch.DONES,
SampleBatch.REWARDS, SampleBatch.INFOS]:
if key in self.view_requirements:
self.view_requirements[key].used_for_training = False
if key in self._loss_input_dict:
del self._loss_input_dict[key]
# Remove those not needed at all (leave those that are needed
# by Sampler to properly execute sample collection).
# Also always leave DONES, REWARDS, and INFOS, no matter what.
for key in list(self.view_requirements.keys()):
if key not in all_accessed_keys and key not in [
SampleBatch.EPS_ID, SampleBatch.AGENT_INDEX,
SampleBatch.UNROLL_ID, SampleBatch.DONES,
SampleBatch.REWARDS, SampleBatch.INFOS] and \
key not in self.model.view_requirements:
# If user deleted this key manually in postprocessing
# fn, warn about it and do not remove from
# view-requirements.
if key in dummy_batch.deleted_keys:
logger.warning(
"SampleBatch key '{}' was deleted manually in "
"postprocessing function! RLlib will "
"automatically remove non-used items from the "
"data stream. Remove the `del` from your "
"postprocessing function.".format(key))
else:
del self.view_requirements[key]
if key in self._loss_input_dict:
del self._loss_input_dict[key]
# Add those data_cols (again) that are missing and have
# dependencies by view_cols.
for key in list(self.view_requirements.keys()):
vr = self.view_requirements[key]
if (vr.data_col is not None
and vr.data_col not in self.view_requirements):
used_for_training = \
vr.data_col in train_batch.accessed_keys
self.view_requirements[vr.data_col] = ViewRequirement(
space=vr.space, used_for_training=used_for_training)
self._loss_input_dict_no_rnn = {
k: v
for k, v in self._loss_input_dict.items()
if (v not in self._state_inputs and v != self._seq_lens)
}
def _do_loss_init(self, train_batch: SampleBatch):
losses = self._loss_fn(self, self.model, self.dist_class, train_batch)
losses = force_list(losses)
if self._stats_fn:
self._stats_fetches.update(self._stats_fn(self, train_batch))
# Override the update ops to be those of the model.
self._update_ops = []
if not isinstance(self.model, tf.keras.Model):
self._update_ops = self.model.update_ops()
return losses
class TFMultiGPUTowerStack:
"""Optimizer that runs in parallel across multiple local devices.
TFMultiGPUTowerStack automatically splits up and loads training data
onto specified local devices (e.g. GPUs) with `load_data()`. During a call
to `optimize()`, the devices compute gradients over slices of the data in
parallel. The gradients are then averaged and applied to the shared
weights.
The data loaded is pinned in device memory until the next call to
`load_data`, so you can make multiple passes (possibly in randomized order)
over the same data once loaded.
This is similar to tf1.train.SyncReplicasOptimizer, but works within a
single TensorFlow graph, i.e. implements in-graph replicated training:
https://www.tensorflow.org/api_docs/python/tf/train/SyncReplicasOptimizer
"""
def __init__(
self,
# Deprecated.
optimizer=None,
devices=None,
input_placeholders=None,
rnn_inputs=None,
max_per_device_batch_size=None,
build_graph=None,
grad_norm_clipping=None,
# Use only `policy` argument from here on.
policy: TFPolicy = None,
):
"""Initializes a TFMultiGPUTowerStack instance.
Args:
policy (TFPolicy): The TFPolicy object that this tower stack
belongs to.
"""
# Obsoleted usage, use only `policy` arg from here on.
if policy is None:
deprecation_warning(
old="TFMultiGPUTowerStack(...)",
new="TFMultiGPUTowerStack(policy=[Policy])",
error=False,
)
self.policy = None
self.optimizers = optimizer
self.devices = devices
self.max_per_device_batch_size = max_per_device_batch_size
self.policy_copy = build_graph
else:
self.policy: TFPolicy = policy
self.optimizers: List[LocalOptimizer] = self.policy._optimizers
self.devices = self.policy.devices
self.max_per_device_batch_size = \
(max_per_device_batch_size or
policy.config.get("sgd_minibatch_size", policy.config.get(
"train_batch_size", 999999))) // len(self.devices)
input_placeholders = list(
self.policy._loss_input_dict_no_rnn.values())
rnn_inputs = []
if self.policy._state_inputs:
rnn_inputs = self.policy._state_inputs + [
self.policy._seq_lens
]
grad_norm_clipping = self.policy.config.get("grad_clip")
self.policy_copy = self.policy.copy
assert len(self.devices) > 1 or "gpu" in self.devices[0]
self.loss_inputs = input_placeholders + rnn_inputs
shared_ops = tf1.get_collection(
tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name)
# Then setup the per-device loss graphs that use the shared weights
self._batch_index = tf1.placeholder(tf.int32, name="batch_index")
# Dynamic batch size, which may be shrunk if there isn't enough data
self._per_device_batch_size = tf1.placeholder(
tf.int32, name="per_device_batch_size")
self._loaded_per_device_batch_size = max_per_device_batch_size
# When loading RNN input, we dynamically determine the max seq len
self._max_seq_len = tf1.placeholder(tf.int32, name="max_seq_len")
self._loaded_max_seq_len = 1
# Split on the CPU in case the data doesn't fit in GPU memory.
with tf.device("/cpu:0"):
data_splits = zip(
*[tf.split(ph, len(self.devices)) for ph in self.loss_inputs])
self._towers = []
for tower_i, (device, device_placeholders) in enumerate(
zip(self.devices, data_splits)):
self._towers.append(
self._setup_device(tower_i, device, device_placeholders,
len(input_placeholders)))
if self.policy.config["_tf_policy_handles_more_than_one_loss"]:
avgs = []
for i, optim in enumerate(self.optimizers):
avg = average_gradients([t.grads[i] for t in self._towers])
if grad_norm_clipping:
clipped = []
for grad, _ in avg:
clipped.append(grad)
clipped, _ = tf.clip_by_global_norm(
clipped, grad_norm_clipping)
for i, (grad, var) in enumerate(avg):
avg[i] = (clipped[i], var)
avgs.append(avg)
# Gather update ops for any batch norm layers.
# TODO(ekl) here we
# will use all the ops found which won't work for DQN / DDPG, but
# those aren't supported with multi-gpu right now anyways.
self._update_ops = tf1.get_collection(
tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name)
for op in shared_ops:
self._update_ops.remove(op) # only care about tower update ops
if self._update_ops:
logger.debug("Update ops to run on apply gradient: {}".format(
self._update_ops))
with tf1.control_dependencies(self._update_ops):
self._train_op = tf.group([
o.apply_gradients(a)
for o, a in zip(self.optimizers, avgs)
])
else:
avg = average_gradients([t.grads for t in self._towers])
if grad_norm_clipping:
clipped = []
for grad, _ in avg:
clipped.append(grad)
clipped, _ = tf.clip_by_global_norm(clipped,
grad_norm_clipping)
for i, (grad, var) in enumerate(avg):
avg[i] = (clipped[i], var)
# Gather update ops for any batch norm layers.
# TODO(ekl) here we
# will use all the ops found which won't work for DQN / DDPG, but
# those aren't supported with multi-gpu right now anyways.
self._update_ops = tf1.get_collection(
tf1.GraphKeys.UPDATE_OPS, scope=tf1.get_variable_scope().name)
for op in shared_ops:
self._update_ops.remove(op) # only care about tower update ops
if self._update_ops:
logger.debug("Update ops to run on apply gradient: {}".format(
self._update_ops))
with tf1.control_dependencies(self._update_ops):
self._train_op = self.optimizers[0].apply_gradients(avg)
def load_data(self, sess, inputs, state_inputs):
"""Bulk loads the specified inputs into device memory.
The shape of the inputs must conform to the shapes of the input
placeholders this optimizer was constructed with.
The data is split equally across all the devices. If the data is not
evenly divisible by the batch size, excess data will be discarded.
Args:
sess: TensorFlow session.
inputs: List of arrays matching the input placeholders, of shape
[BATCH_SIZE, ...].
state_inputs: List of RNN input arrays. These arrays have size
[BATCH_SIZE / MAX_SEQ_LEN, ...].
Returns:
The number of tuples loaded per device.
"""
if log_once("load_data"):
logger.info(
"Training on concatenated sample batches:\n\n{}\n".format(
summarize({
"placeholders": self.loss_inputs,
"inputs": inputs,
"state_inputs": state_inputs
})))
feed_dict = {}
assert len(self.loss_inputs) == len(inputs + state_inputs), \
(self.loss_inputs, inputs, state_inputs)
# Let's suppose we have the following input data, and 2 devices:
# 1 2 3 4 5 6 7 <- state inputs shape
# A A A B B B C C C D D D E E E F F F G G G <- inputs shape
# The data is truncated and split across devices as follows:
# |---| seq len = 3
# |---------------------------------| seq batch size = 6 seqs
# |----------------| per device batch size = 9 tuples
if len(state_inputs) > 0:
smallest_array = state_inputs[0]
seq_len = len(inputs[0]) // len(state_inputs[0])
self._loaded_max_seq_len = seq_len
else:
smallest_array = inputs[0]
self._loaded_max_seq_len = 1
sequences_per_minibatch = (
self.max_per_device_batch_size // self._loaded_max_seq_len * len(
self.devices))
if sequences_per_minibatch < 1:
logger.warning(
("Target minibatch size is {}, however the rollout sequence "
"length is {}, hence the minibatch size will be raised to "
"{}.").format(self.max_per_device_batch_size,
self._loaded_max_seq_len,
self._loaded_max_seq_len * len(self.devices)))
sequences_per_minibatch = 1
if len(smallest_array) < sequences_per_minibatch:
# Dynamically shrink the batch size if insufficient data
sequences_per_minibatch = make_divisible_by(
len(smallest_array), len(self.devices))
if log_once("data_slicing"):
logger.info(
("Divided {} rollout sequences, each of length {}, among "
"{} devices.").format(
len(smallest_array), self._loaded_max_seq_len,
len(self.devices)))
if sequences_per_minibatch < len(self.devices):
raise ValueError(
"Must load at least 1 tuple sequence per device. Try "
"increasing `sgd_minibatch_size` or reducing `max_seq_len` "
"to ensure that at least one sequence fits per device.")
self._loaded_per_device_batch_size = (sequences_per_minibatch // len(
self.devices) * self._loaded_max_seq_len)
if len(state_inputs) > 0:
# First truncate the RNN state arrays to the sequences_per_minib.
state_inputs = [
make_divisible_by(arr, sequences_per_minibatch)
for arr in state_inputs
]
# Then truncate the data inputs to match
inputs = [arr[:len(state_inputs[0]) * seq_len] for arr in inputs]
assert len(state_inputs[0]) * seq_len == len(inputs[0]), \
(len(state_inputs[0]), sequences_per_minibatch, seq_len,
len(inputs[0]))
for ph, arr in zip(self.loss_inputs, inputs + state_inputs):
feed_dict[ph] = arr
truncated_len = len(inputs[0])
else:
truncated_len = 0
for ph, arr in zip(self.loss_inputs, inputs):
truncated_arr = make_divisible_by(arr, sequences_per_minibatch)
feed_dict[ph] = truncated_arr
if truncated_len == 0:
truncated_len = len(truncated_arr)
sess.run([t.init_op for t in self._towers], feed_dict=feed_dict)
self.num_tuples_loaded = truncated_len
samples_per_device = truncated_len // len(self.devices)
assert samples_per_device > 0, "No data loaded?"
assert samples_per_device % self._loaded_per_device_batch_size == 0
# Return loaded samples per-device.
return samples_per_device
def optimize(self, sess, batch_index):
"""Run a single step of SGD.
Runs a SGD step over a slice of the preloaded batch with size given by
self._loaded_per_device_batch_size and offset given by the batch_index
argument.
Updates shared model weights based on the averaged per-device
gradients.
Args:
sess: TensorFlow session.
batch_index: Offset into the preloaded data. This value must be
between `0` and `tuples_per_device`. The amount of data to
process is at most `max_per_device_batch_size`.
Returns:
The outputs of extra_ops evaluated over the batch.
"""
feed_dict = {
self._batch_index: batch_index,
self._per_device_batch_size: self._loaded_per_device_batch_size,
self._max_seq_len: self._loaded_max_seq_len,
}
for tower in self._towers:
feed_dict.update(tower.loss_graph.extra_compute_grad_feed_dict())
fetches = {"train": self._train_op}
for tower_num, tower in enumerate(self._towers):
tower_fetch = tower.loss_graph._get_grad_and_stats_fetches()
fetches["tower_{}".format(tower_num)] = tower_fetch
return sess.run(fetches, feed_dict=feed_dict)
def get_device_losses(self):
return [t.loss_graph for t in self._towers]
def _setup_device(self, tower_i, device, device_input_placeholders,
num_data_in):
assert num_data_in <= len(device_input_placeholders)
with tf.device(device):
with tf1.name_scope(TOWER_SCOPE_NAME + f"_{tower_i}"):
device_input_batches = []
device_input_slices = []
for i, ph in enumerate(device_input_placeholders):
current_batch = tf1.Variable(
ph,
trainable=False,
validate_shape=False,
collections=[])
device_input_batches.append(current_batch)
if i < num_data_in:
scale = self._max_seq_len
granularity = self._max_seq_len
else:
scale = self._max_seq_len
granularity = 1
current_slice = tf.slice(
current_batch,
([self._batch_index // scale * granularity] +
[0] * len(ph.shape[1:])),
([self._per_device_batch_size // scale * granularity] +
[-1] * len(ph.shape[1:])))
current_slice.set_shape(ph.shape)
device_input_slices.append(current_slice)
graph_obj = self.policy_copy(device_input_slices)
device_grads = graph_obj.gradients(self.optimizers,
graph_obj._losses)
return Tower(
tf.group(
*[batch.initializer for batch in device_input_batches]),
device_grads, graph_obj)
# Each tower is a copy of the loss graph pinned to a specific device.
Tower = namedtuple("Tower", ["init_op", "grads", "loss_graph"])
def make_divisible_by(a, n):
if type(a) is int:
return a - a % n
return a[0:a.shape[0] - a.shape[0] % n]
def average_gradients(tower_grads):
"""Averages gradients across towers.
Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer
list is over individual gradients. The inner list is over the
gradient calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
TODO(ekl): We could use NCCL if this becomes a bottleneck.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
if g is not None:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over
# below.
grads.append(expanded_g)
if not grads:
continue
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
app/src/thirdparty/telemetry/internal/backends/remote/trybot_browser_finder_unittest.py | ta2edchimp/big-rig | 925 | 12678226 | <gh_stars>100-1000
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import StringIO
import unittest
from telemetry.internal.backends.remote import trybot_browser_finder
from telemetry.internal.browser import browser_options
from telemetry.testing import simple_mock
from telemetry.testing import system_stub
class TrybotBrowserFinderTest(unittest.TestCase):
def setUp(self):
self.log_output = StringIO.StringIO()
self.stream_handler = logging.StreamHandler(self.log_output)
logging.getLogger().addHandler(self.stream_handler)
self._real_subprocess = trybot_browser_finder.subprocess
self._real_urllib2 = trybot_browser_finder.urllib2
self._stubs = system_stub.Override(trybot_browser_finder,
['sys', 'open', 'os'])
def tearDown(self):
logging.getLogger().removeHandler(self.stream_handler)
self.log_output.close()
trybot_browser_finder.subprocess = self._real_subprocess
trybot_browser_finder.urllib2 = self._real_urllib2
self._stubs.Restore()
def _ExpectProcesses(self, args):
mock_subprocess = simple_mock.MockObject()
mock_subprocess.SetAttribute('PIPE', simple_mock.MockObject())
for arg in args:
mock_popen = simple_mock.MockObject()
mock_popen.ExpectCall('communicate').WillReturn(arg[1][1:])
mock_popen.ExpectCall('poll').WillReturn(arg[1][0])
mock_subprocess.ExpectCall(
'Popen').WithArgs(arg[0]).WillReturn(mock_popen)
trybot_browser_finder.subprocess = mock_subprocess
def _MockTryserverJson(self, bots_dict):
trybot_browser_finder.urllib2 = simple_mock.MockObject()
trybot_browser_finder.urllib2.ExpectCall('urlopen').WithArgs(
'http://build.chromium.org/p/tryserver.chromium.perf/json').WillReturn(
StringIO.StringIO(json.dumps({'builders': bots_dict})))
def test_find_all_browser_types_list(self):
finder_options = browser_options.BrowserFinderOptions(browser_type='list')
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'mac_10_9_perf_bisect': 'otherstuff',
'win_perf_bisect_builder': 'not a trybot',
})
expected_trybots_list = [
'trybot-all',
'trybot-all-android',
'trybot-all-linux',
'trybot-all-mac',
'trybot-all-win',
'trybot-android-nexus4',
'trybot-mac-10-9'
]
self.assertEquals(
expected_trybots_list,
sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))
def test_find_all_browser_types_trybot(self):
finder_options = browser_options.BrowserFinderOptions(
browser_type='trybot-win')
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'mac_10_9_perf_bisect': 'otherstuff',
'win_perf_bisect_builder': 'not a trybot',
})
expected_trybots_list = [
'trybot-all',
'trybot-all-android',
'trybot-all-linux',
'trybot-all-mac',
'trybot-all-win',
'trybot-android-nexus4',
'trybot-mac-10-9'
]
self.assertEquals(
expected_trybots_list,
sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))
def test_find_all_browser_types_non_trybot_browser(self):
finder_options = browser_options.BrowserFinderOptions(
browser_type='release')
trybot_browser_finder.urllib2 = simple_mock.MockObject()
self.assertEquals(
[],
# pylint: disable=W0212
sorted(trybot_browser_finder.FindAllBrowserTypes(finder_options)))
def test_constructor(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'mac_10_9_perf_bisect': 'otherstuff',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self.assertEquals('android', browser.target_os)
# pylint: disable=W0212
self.assertTrue('android' in browser._builder_names)
self.assertEquals(['android_nexus4_perf_bisect'],
browser._builder_names.get('android'))
def test_constructor_trybot_all(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'android_nexus5_perf_bisect': 'stuff2',
'mac_10_9_perf_bisect': 'otherstuff',
'mac_perf_bisect': 'otherstuff1',
'win_perf_bisect': 'otherstuff2',
'linux_perf_bisect': 'otherstuff3',
'win_x64_perf_bisect': 'otherstuff4',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=W0212
self.assertEquals(
['android', 'linux', 'mac', 'win', 'win-x64'],
sorted(browser._builder_names))
self.assertEquals(
['android_nexus4_perf_bisect', 'android_nexus5_perf_bisect'],
sorted(browser._builder_names.get('android')))
self.assertEquals(
['mac_10_9_perf_bisect', 'mac_perf_bisect'],
sorted(browser._builder_names.get('mac')))
self.assertEquals(
['linux_perf_bisect'], sorted(browser._builder_names.get('linux')))
self.assertEquals(
['win_perf_bisect'], sorted(browser._builder_names.get('win')))
self.assertEquals(
['win_x64_perf_bisect'], sorted(browser._builder_names.get('win-x64')))
def test_constructor_trybot_all_win(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'android_nexus5_perf_bisect': 'stuff2',
'win_8_perf_bisect': 'otherstuff',
'win_perf_bisect': 'otherstuff2',
'linux_perf_bisect': 'otherstuff3',
'win_x64_perf_bisect': 'otherstuff4',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-win', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=W0212
self.assertEquals(
['win', 'win-x64'],
sorted(browser._builder_names))
self.assertEquals(
['win_8_perf_bisect', 'win_perf_bisect'],
sorted(browser._builder_names.get('win')))
self.assertEquals(
['win_x64_perf_bisect'], sorted(browser._builder_names.get('win-x64')))
def test_constructor_trybot_all_android(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'android_nexus5_perf_bisect': 'stuff2',
'win_8_perf_bisect': 'otherstuff',
'win_perf_bisect': 'otherstuff2',
'linux_perf_bisect': 'otherstuff3',
'win_x64_perf_bisect': 'otherstuff4',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-android', finder_options)
self.assertEquals(
['android_nexus4_perf_bisect', 'android_nexus5_perf_bisect'],
sorted(browser._builder_names.get('android')))
def test_constructor_trybot_all_mac(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'win_8_perf_bisect': 'otherstuff',
'mac_perf_bisect': 'otherstuff2',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-mac', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=W0212
self.assertEquals(
['mac'],
sorted(browser._builder_names))
self.assertEquals(
['mac_perf_bisect'],
sorted(browser._builder_names.get('mac')))
def test_constructor_trybot_all_linux(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({
'android_nexus4_perf_bisect': 'stuff',
'linux_perf_bisect': 'stuff1',
'win_8_perf_bisect': 'otherstuff',
'mac_perf_bisect': 'otherstuff2',
'win_perf_bisect_builder': 'not a trybot',
})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-all-linux', finder_options)
self.assertEquals('all', browser.target_os)
# pylint: disable=W0212
self.assertEquals(
['linux'],
sorted(browser._builder_names))
self.assertEquals(
['linux_perf_bisect'],
sorted(browser._builder_names.get('linux')))
def test_no_git(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (128, None, None)),
))
browser.RunRemote()
self.assertEquals(
'Must be in a git repository to send changes to trybots.\n',
self.log_output.getvalue())
def test_dirty_tree(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, 'dirty tree', None)),
))
browser.RunRemote()
self.assertEquals(
'Cannot send a try job with a dirty tree. Commit locally first.\n',
self.log_output.getvalue())
def test_no_local_commits(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, '', None)),
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, '', None)),
))
browser.RunRemote()
self.assertEquals(
('No local changes found in chromium or blink trees. '
'browser=trybot-android-nexus4 argument sends local changes to the '
'perf trybot(s): '
'[[\'android_nexus4_perf_bisect\']].\n'),
self.log_output.getvalue())
def test_branch_checkout_fails(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, 'logs here', None)),
(['git', 'checkout', '-b', 'telemetry-tryjob'],
(1, None, 'fatal: A branch named \'telemetry-try\' already exists.')),
))
browser.RunRemote()
self.assertEquals(
('Error creating branch telemetry-tryjob. '
'Please delete it if it exists.\n'
'fatal: A branch named \'telemetry-try\' already exists.\n'),
self.log_output.getvalue())
def _GetConfigForBrowser(self, name, platform, branch, cfg_filename,
is_blink=False):
finder_options = browser_options.BrowserFinderOptions()
bot = '%s_perf_bisect' % name.replace('trybot-', '').replace('-', '_')
self._MockTryserverJson({bot: 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(name, finder_options)
first_processes = ()
if is_blink:
first_processes = (
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, 'br', None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, '', None))
)
self._ExpectProcesses(first_processes + (
(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], (0, branch, None)),
(['git', 'update-index', '--refresh', '-q'], (0, None, None,)),
(['git', 'diff-index', 'HEAD'], (0, '', None)),
(['git', 'log', 'origin/master..HEAD'], (0, 'logs here', None)),
(['git', 'checkout', '-b', 'telemetry-tryjob'], (0, None, None)),
(['git', 'branch', '--set-upstream-to', 'origin/master'],
(0, None, None)),
(['git', 'commit', '-a', '-m', 'bisect config: %s' % platform],
(0, None, None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on %s' % platform],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b', bot],
(0, None, None)),
(['git', 'checkout', branch], (0, None, None)),
(['git', 'branch', '-D', 'telemetry-tryjob'], (0, None, None))
))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
browser.RunRemote()
return cfg.getvalue()
def test_config_android(self):
config = self._GetConfigForBrowser(
'trybot-android-nexus4', 'android', 'somebranch',
'tools/run-perf-test.cfg')
self.assertEquals(
('config = {\n'
' "command": "./tools/perf/run_benchmark '
'--browser=android-chromium sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "ia32",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_config_mac(self):
config = self._GetConfigForBrowser(
'trybot-mac-10-9', 'mac', 'currentwork', 'tools/run-perf-test.cfg')
self.assertEquals(
('config = {\n'
' "command": "./tools/perf/run_benchmark '
'--browser=release sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "ia32",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_config_win_x64(self):
config = self._GetConfigForBrowser(
'trybot-win-x64', 'win-x64', 'currentwork', 'tools/run-perf-test.cfg')
self.assertEquals(
('config = {\n'
' "command": "python tools\\\\perf\\\\run_benchmark '
'--browser=release_x64 sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "x64",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_config_blink(self):
config = self._GetConfigForBrowser(
'trybot-mac-10-9', 'mac', 'blinkbranch',
'Tools/run-perf-test.cfg', True)
self.assertEquals(
('config = {\n'
' "command": "./tools/perf/run_benchmark '
'--browser=release sunspider",\n'
' "max_time_minutes": "120",\n'
' "repeat_count": "1",\n'
' "target_arch": "ia32",\n'
' "truncate_percent": "0"\n'
'}'), config)
def test_update_config_git_commit_tryboterror(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(128, 'None', 'commit failed')),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (0, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertRaises(trybot_browser_finder.TrybotError,
browser._UpdateConfigAndRunTryjob, 'android', cfg_filename)
def test_update_config_git_upload_tryboterror(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(0, 'None', None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(128, None, 'error')),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (0, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertRaises(trybot_browser_finder.TrybotError,
browser._UpdateConfigAndRunTryjob, 'android', cfg_filename)
def test_update_config_git_try_tryboterror(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(0, 'None', None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (128, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertRaises(trybot_browser_finder.TrybotError,
browser._UpdateConfigAndRunTryjob, 'android', cfg_filename)
def test_update_config_git_try(self):
finder_options = browser_options.BrowserFinderOptions()
self._MockTryserverJson({'android_nexus4_perf_bisect': 'stuff'})
browser = trybot_browser_finder.PossibleTrybotBrowser(
'trybot-android-nexus4', finder_options)
self._ExpectProcesses((
(['git', 'commit', '-a', '-m', 'bisect config: android'],
(0, 'None', None)),
(['git', 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on android'],
(0, 'stuff https://codereview.chromium.org/12345 stuff', None)),
(['git', 'cl', 'try', '-m', 'tryserver.chromium.perf', '-b',
'android_nexus4_perf_bisect'], (0, None, None))))
self._stubs.sys.argv = [
'tools/perf/run_benchmark',
'--browser=%s' % browser,
'sunspider']
cfg_filename = 'tools/run-perf-test.cfg'
cfg = StringIO.StringIO()
self._stubs.open.files = {cfg_filename: cfg}
self.assertEquals((0, 'https://codereview.chromium.org/12345'),
browser._UpdateConfigAndRunTryjob('android', cfg_filename))
|
safe_learning/tests/test_utilities.py | xin-alice/cs159_safe_learning | 169 | 12678240 | <reponame>xin-alice/cs159_safe_learning<filename>safe_learning/tests/test_utilities.py<gh_stars>100-1000
"""Test the utilities."""
from __future__ import absolute_import, print_function, division
import pytest
import numpy as np
import tensorflow as tf
from numpy.testing import assert_allclose
from safe_learning.utilities import (dlqr, get_storage, set_storage,
get_feed_dict, unique_rows,
compute_trajectory)
from safe_learning import LinearSystem
def test_dlqr():
"""Test the dlqr function."""
true_k = np.array([[0.61803399]])
true_p = np.array([[1.61803399]])
k, p = dlqr(1, 1, 1, 1)
assert_allclose(k, true_k)
assert_allclose(p, true_p)
k, p = dlqr([[1]], [[1]], [[1]], [[1]])
assert_allclose(k, true_k)
assert_allclose(p, true_p)
class TestStorage(object):
"""Test the class storage."""
@pytest.fixture
def sample_class(self):
"""Sample class for testing."""
class A(object):
"""Some class."""
def __init__(self):
"""Initialize."""
super(A, self).__init__()
self.storage = {}
def method(self, value, index=None):
storage = get_storage(self.storage, index=index)
set_storage(self.storage, [('value', value)], index=index)
return storage
return A()
def test_storage(self, sample_class):
"""Test the storage."""
storage = sample_class.method(5)
assert storage is None
storage = sample_class.method(4)
assert storage['value'] == 5
storage = sample_class.method(None)
assert storage['value'] == 4
# Test index
storage = sample_class.method(3, index='test')
assert storage is None
storage = sample_class.method(4, index='test')
assert storage['value'] == 3
storage = sample_class.method(3, index='test2')
assert storage is None
storage = sample_class.method(3, index='test')
assert storage['value'] is 4
def test_get_feed_dict():
"""Test the global get_feed_dict method."""
graph = tf.Graph()
feed_dict = get_feed_dict(graph)
# Initialized new dictionary
assert feed_dict == {}
# Test assignment
feed_dict['test'] = 5
# Make sure we keep getting the same object
assert feed_dict is get_feed_dict(graph)
def test_unique_rows():
"""Test the unique_rows function."""
a = np.array([[1, 1], [1, 2], [1, 3], [1, 2], [1, 3], [1, 4], [2, 3]])
uniques = np.array([[1, 1], [1, 2], [1, 3], [1, 4], [2, 3]])
assert_allclose(unique_rows(a), uniques)
def test_compute_trajectory():
"""Test the compute_trajectory function."""
A = np.array([[1., 0.1],
[0., 1.]])
B = np.array([[0.01],
[0.1]])
dynamics = LinearSystem((A, B))
Q = np.diag([1., 0.01])
R = np.array([[0.01]])
K, _ = dlqr(A, B, Q, R)
policy = LinearSystem([-K])
x0 = np.array([[0.1, 0.]])
with tf.Session() as sess:
res = compute_trajectory(dynamics, policy, x0, num_steps=20)
states, actions = res
assert_allclose(states[[0], :], x0)
assert_allclose(states[-1, :], np.array([0., 0.]), atol=0.01)
assert_allclose(actions, states[:-1].dot(-K.T)) |
pytorch_lightning/loggers/wandb.py | Code-Cornelius/pytorch-lightning | 15,666 | 12678269 | <reponame>Code-Cornelius/pytorch-lightning
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Weights and Biases Logger
-------------------------
"""
import operator
import os
from argparse import Namespace
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
from weakref import ReferenceType
import torch.nn as nn
from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from pytorch_lightning.loggers.base import LightningLoggerBase, rank_zero_experiment
from pytorch_lightning.utilities import _module_available, rank_zero_only
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.imports import _compare_version
from pytorch_lightning.utilities.warnings import rank_zero_warn
_WANDB_AVAILABLE = _module_available("wandb")
_WANDB_GREATER_EQUAL_0_10_22 = _compare_version("wandb", operator.ge, "0.10.22")
try:
import wandb
from wandb.wandb_run import Run
except ModuleNotFoundError:
# needed for test mocks, these tests shall be updated
wandb, Run = None, None
class WandbLogger(LightningLoggerBase):
r"""
Log using `Weights and Biases <https://docs.wandb.ai/integrations/lightning>`_.
**Installation and set-up**
Install with pip:
.. code-block:: bash
pip install wandb
Create a `WandbLogger` instance:
.. code-block:: python
from pytorch_lightning.loggers import WandbLogger
wandb_logger = WandbLogger(project="MNIST")
Pass the logger instance to the `Trainer`:
.. code-block:: python
trainer = Trainer(logger=wandb_logger)
A new W&B run will be created when training starts if you have not created one manually before with `wandb.init()`.
**Log metrics**
Log from :class:`~pytorch_lightning.core.lightning.LightningModule`:
.. code-block:: python
class LitModule(LightningModule):
def training_step(self, batch, batch_idx):
self.log("train/loss", loss)
Use directly wandb module:
.. code-block:: python
wandb.log({"train/loss": loss})
**Log hyper-parameters**
Save :class:`~pytorch_lightning.core.lightning.LightningModule` parameters:
.. code-block:: python
class LitModule(LightningModule):
def __init__(self, *args, **kwarg):
self.save_hyperparameters()
Add other config parameters:
.. code-block:: python
# add one parameter
wandb_logger.experiment.config["key"] = value
# add multiple parameters
wandb_logger.experiment.config.update({key1: val1, key2: val2})
# use directly wandb module
wandb.config["key"] = value
wandb.config.update()
**Log gradients, parameters and model topology**
Call the `watch` method for automatically tracking gradients:
.. code-block:: python
# log gradients and model topology
wandb_logger.watch(model)
# log gradients, parameter histogram and model topology
wandb_logger.watch(model, log="all")
# change log frequency of gradients and parameters (100 steps by default)
wandb_logger.watch(model, log_freq=500)
# do not log graph (in case of errors)
wandb_logger.watch(model, log_graph=False)
The `watch` method adds hooks to the model which can be removed at the end of training:
.. code-block:: python
wandb_logger.unwatch(model)
**Log model checkpoints**
Log model checkpoints at the end of training:
.. code-block:: python
wandb_logger = WandbLogger(log_model=True)
Log model checkpoints as they get created during training:
.. code-block:: python
wandb_logger = WandbLogger(log_model="all")
Custom checkpointing can be set up through :class:`~pytorch_lightning.callbacks.ModelCheckpoint`:
.. code-block:: python
# log model only if `val_accuracy` increases
wandb_logger = WandbLogger(log_model="all")
checkpoint_callback = ModelCheckpoint(monitor="val_accuracy", mode="max")
trainer = Trainer(logger=wandb_logger, callbacks=[checkpoint_callback])
`latest` and `best` aliases are automatically set to easily retrieve a model checkpoint:
.. code-block:: python
# reference can be retrieved in artifacts panel
# "VERSION" can be a version (ex: "v2") or an alias ("latest or "best")
checkpoint_reference = "USER/PROJECT/MODEL-RUN_ID:VERSION"
# download checkpoint locally (if not already cached)
run = wandb.init(project="MNIST")
artifact = run.use_artifact(checkpoint_reference, type="model")
artifact_dir = artifact.download()
# load checkpoint
model = LitModule.load_from_checkpoint(Path(artifact_dir) / "model.ckpt")
**Log media**
Log text with:
.. code-block:: python
# using columns and data
columns = ["input", "label", "prediction"]
data = [["cheese", "english", "english"], ["fromage", "french", "spanish"]]
wandb_logger.log_text(key="samples", columns=columns, data=data)
# using a pandas DataFrame
wandb_logger.log_text(key="samples", dataframe=my_dataframe)
Log images with:
.. code-block:: python
# using tensors, numpy arrays or PIL images
wandb_logger.log_image(key="samples", images=[img1, img2])
# adding captions
wandb_logger.log_image(key="samples", images=[img1, img2], caption=["tree", "person"])
# using file path
wandb_logger.log_image(key="samples", images=["img_1.jpg", "img_2.jpg"])
More arguments can be passed for logging segmentation masks and bounding boxes. Refer to
`Image Overlays documentation <https://docs.wandb.ai/guides/track/log/media#image-overlays>`_.
**Log Tables**
`W&B Tables <https://docs.wandb.ai/guides/data-vis>`_ can be used to log, query and analyze tabular data.
They support any type of media (text, image, video, audio, molecule, html, etc) and are great for storing,
understanding and sharing any form of data, from datasets to model predictions.
.. code-block:: python
columns = ["caption", "image", "sound"]
data = [["cheese", wandb.Image(img_1), wandb.Audio(snd_1)], ["wine", wandb.Image(img_2), wandb.Audio(snd_2)]]
wandb_logger.log_table(key="samples", columns=columns, data=data)
See Also:
- `Demo in Google Colab <http://wandb.me/lightning>`__ with hyperparameter search and model logging
- `W&B Documentation <https://docs.wandb.ai/integrations/lightning>`__
Args:
name: Display name for the run.
save_dir: Path where data is saved (wandb dir by default).
offline: Run offline (data can be streamed later to wandb servers).
id: Sets the version, mainly used to resume a previous run.
version: Same as id.
anonymous: Enables or explicitly disables anonymous logging.
project: The name of the project to which this run will belong.
log_model: Log checkpoints created by :class:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint`
as W&B artifacts. `latest` and `best` aliases are automatically set.
* if ``log_model == 'all'``, checkpoints are logged during training.
* if ``log_model == True``, checkpoints are logged at the end of training, except when
:paramref:`~pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint.save_top_k` ``== -1``
which also logs every checkpoint during training.
* if ``log_model == False`` (default), no checkpoint is logged.
prefix: A string to put at the beginning of metric keys.
experiment: WandB experiment object. Automatically set when creating a run.
\**kwargs: Arguments passed to :func:`wandb.init` like `entity`, `group`, `tags`, etc.
Raises:
ModuleNotFoundError:
If required WandB package is not installed on the device.
MisconfigurationException:
If both ``log_model`` and ``offline`` is set to ``True``.
"""
LOGGER_JOIN_CHAR = "-"
def __init__(
self,
name: Optional[str] = None,
save_dir: Optional[str] = None,
offline: Optional[bool] = False,
id: Optional[str] = None,
anonymous: Optional[bool] = None,
version: Optional[str] = None,
project: Optional[str] = None,
log_model: Optional[bool] = False,
experiment=None,
prefix: Optional[str] = "",
**kwargs,
):
if wandb is None:
raise ModuleNotFoundError(
"You want to use `wandb` logger which is not installed yet,"
" install it with `pip install wandb`." # pragma: no-cover
)
if offline and log_model:
raise MisconfigurationException(
f"Providing log_model={log_model} and offline={offline} is an invalid configuration"
" since model checkpoints cannot be uploaded in offline mode.\n"
"Hint: Set `offline=False` to log your model."
)
if log_model and not _WANDB_GREATER_EQUAL_0_10_22:
rank_zero_warn(
f"Providing log_model={log_model} requires wandb version >= 0.10.22"
" for logging associated model metadata.\n"
"Hint: Upgrade with `pip install --upgrade wandb`."
)
super().__init__()
self._offline = offline
self._log_model = log_model
self._prefix = prefix
self._experiment = experiment
self._logged_model_time = {}
self._checkpoint_callback = None
# set wandb init arguments
anonymous_lut = {True: "allow", False: None}
self._wandb_init = dict(
name=name,
project=project,
id=version or id,
dir=save_dir,
resume="allow",
anonymous=anonymous_lut.get(anonymous, anonymous),
)
self._wandb_init.update(**kwargs)
# extract parameters
self._save_dir = self._wandb_init.get("dir")
self._name = self._wandb_init.get("name")
self._id = self._wandb_init.get("id")
def __getstate__(self):
state = self.__dict__.copy()
# args needed to reload correct experiment
state["_id"] = self._experiment.id if self._experiment is not None else None
# cannot be pickled
state["_experiment"] = None
return state
@property
@rank_zero_experiment
def experiment(self) -> Run:
r"""
Actual wandb object. To use wandb features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
.. code-block:: python
self.logger.experiment.some_wandb_function()
"""
if self._experiment is None:
if self._offline:
os.environ["WANDB_MODE"] = "dryrun"
if wandb.run is None:
self._experiment = wandb.init(**self._wandb_init)
else:
rank_zero_warn(
"There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse"
" this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`."
)
self._experiment = wandb.run
# define default x-axis (for latest wandb versions)
if getattr(self._experiment, "define_metric", None):
self._experiment.define_metric("trainer/global_step")
self._experiment.define_metric("*", step_metric="trainer/global_step", step_sync=True)
return self._experiment
def watch(self, model: nn.Module, log: str = "gradients", log_freq: int = 100, log_graph: bool = True):
self.experiment.watch(model, log=log, log_freq=log_freq, log_graph=log_graph)
@rank_zero_only
def log_hyperparams(self, params: Union[Dict[str, Any], Namespace]) -> None:
params = self._convert_params(params)
params = self._flatten_dict(params)
params = self._sanitize_callable_params(params)
self.experiment.config.update(params, allow_val_change=True)
@rank_zero_only
def log_metrics(self, metrics: Dict[str, float], step: Optional[int] = None) -> None:
assert rank_zero_only.rank == 0, "experiment tried to log from global_rank != 0"
metrics = self._add_prefix(metrics)
if step is not None:
self.experiment.log({**metrics, "trainer/global_step": step})
else:
self.experiment.log(metrics)
@rank_zero_only
def log_table(
self,
key: str,
columns: List[str] = None,
data: List[List[Any]] = None,
dataframe: Any = None,
step: Optional[int] = None,
) -> None:
"""Log a Table containing any object type (text, image, audio, video, molecule, html, etc).
Can be defined either with `columns` and `data` or with `dataframe`.
"""
metrics = {key: wandb.Table(columns=columns, data=data, dataframe=dataframe)}
self.log_metrics(metrics, step)
@rank_zero_only
def log_text(
self,
key: str,
columns: List[str] = None,
data: List[List[str]] = None,
dataframe: Any = None,
step: Optional[int] = None,
) -> None:
"""Log text as a Table.
Can be defined either with `columns` and `data` or with `dataframe`.
"""
self.log_table(key, columns, data, dataframe, step)
@rank_zero_only
def log_image(self, key: str, images: List[Any], **kwargs: str) -> None:
"""Log images (tensors, numpy arrays, PIL Images or file paths).
Optional kwargs are lists passed to each image (ex: caption, masks, boxes).
"""
if not isinstance(images, list):
raise TypeError(f'Expected a list as "images", found {type(images)}')
n = len(images)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
step = kwargs.pop("step", None)
kwarg_list = [{k: kwargs[k][i] for k in kwargs.keys()} for i in range(n)]
metrics = {key: [wandb.Image(img, **kwarg) for img, kwarg in zip(images, kwarg_list)]}
self.log_metrics(metrics, step)
@property
def save_dir(self) -> Optional[str]:
"""Gets the save directory.
Returns:
The path to the save directory.
"""
return self._save_dir
@property
def name(self) -> Optional[str]:
"""Gets the name of the experiment.
Returns:
The name of the experiment if the experiment exists else the name given to the constructor.
"""
# don't create an experiment if we don't have one
return self._experiment.project_name() if self._experiment else self._name
@property
def version(self) -> Optional[str]:
"""Gets the id of the experiment.
Returns:
The id of the experiment if the experiment exists else the id given to the constructor.
"""
# don't create an experiment if we don't have one
return self._experiment.id if self._experiment else self._id
def after_save_checkpoint(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None:
# log checkpoints as artifacts
if self._log_model == "all" or self._log_model is True and checkpoint_callback.save_top_k == -1:
self._scan_and_log_checkpoints(checkpoint_callback)
elif self._log_model is True:
self._checkpoint_callback = checkpoint_callback
@rank_zero_only
def finalize(self, status: str) -> None:
# log checkpoints as artifacts
if self._checkpoint_callback:
self._scan_and_log_checkpoints(self._checkpoint_callback)
def _scan_and_log_checkpoints(self, checkpoint_callback: "ReferenceType[ModelCheckpoint]") -> None:
# get checkpoints to be saved with associated score
checkpoints = {
checkpoint_callback.last_model_path: checkpoint_callback.current_score,
checkpoint_callback.best_model_path: checkpoint_callback.best_model_score,
**checkpoint_callback.best_k_models,
}
checkpoints = sorted((Path(p).stat().st_mtime, p, s) for p, s in checkpoints.items() if Path(p).is_file())
checkpoints = [
c for c in checkpoints if c[1] not in self._logged_model_time.keys() or self._logged_model_time[c[1]] < c[0]
]
# log iteratively all new checkpoints
for t, p, s in checkpoints:
metadata = (
{
"score": s,
"original_filename": Path(p).name,
"ModelCheckpoint": {
k: getattr(checkpoint_callback, k)
for k in [
"monitor",
"mode",
"save_last",
"save_top_k",
"save_weights_only",
"_every_n_train_steps",
]
# ensure it does not break if `ModelCheckpoint` args change
if hasattr(checkpoint_callback, k)
},
}
if _WANDB_GREATER_EQUAL_0_10_22
else None
)
artifact = wandb.Artifact(name=f"model-{self.experiment.id}", type="model", metadata=metadata)
artifact.add_file(p, name="model.ckpt")
aliases = ["latest", "best"] if p == checkpoint_callback.best_model_path else ["latest"]
self.experiment.log_artifact(artifact, aliases=aliases)
# remember logged models - timestamp needed in case filename didn't change (lastkckpt or custom name)
self._logged_model_time[p] = t
|
timm/optim/optim_factory.py | vb7401/pytorch-image-models | 431 | 12678271 | """ Optimizer Factory w/ Custom Weight Decay
Hacked together by / Copyright 2020 <NAME>
"""
import torch
from torch import optim as optim
from .adafactor import Adafactor
from .adahessian import Adahessian
from .adamp import AdamP
from .lookahead import Lookahead
from .nadam import Nadam
from .novograd import NovoGrad
from .nvnovograd import NvNovoGrad
from .radam import RAdam
from .rmsprop_tf import RMSpropTF
from .sgdp import SGDP
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def add_weight_decay(model, weight_decay=1e-5, skip_list=()):
decay = []
no_decay = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
no_decay.append(param)
else:
decay.append(param)
return [
{'params': no_decay, 'weight_decay': 0.},
{'params': decay, 'weight_decay': weight_decay}]
def create_optimizer(args, model, filter_bias_and_bn=True):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = add_weight_decay(model, weight_decay, skip)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
if hasattr(args, 'opt_args') and args.opt_args is not None:
opt_args.update(args.opt_args)
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
|
examples/pyplot/scatter2.py | hadivafaii/vedo | 836 | 12678272 | # Scatter plot of a gaussian distribution
# with varying color and point sizes
from vedo import *
from vedo.pyplot import plot
import numpy as np
n = 1000
x = np.random.randn(n)
y = np.random.randn(n)
# define what size must have each marker:
marker_sizes = np.sin(2*x)/8
# define a (r,g,b) list of colors for each marker:
marker_cols = np.c_[np.cos(2*x), np.zeros(n), np.zeros(n)]
txt0 = Text2D("A scatter plot of a\n2D gaussian distribution")
plt0 = plot(x, y, ma=0.3, lw=0, # ma = marker alpha
marker="*", # marker style
xtitle="variable A",
ytitle="variable B",
)
txt1 = Text2D("marker size proportional to sin(2x) ")
plt1 = plot(x, y, ma=0.3, lw=0,
marker="*", # marker style
ms=marker_sizes, # VARIABLE marker sizes
mc='red', # same fixed color for markers
)
txt2 = Text2D("marker size proportional to sin(2x)\nred level proportional to cos(2x)")
plt2 = plot(x, y, ma=0.3, lw=0,
marker=">", # marker style
ms=marker_sizes, # VARIABLE marker sizes
mc=marker_cols, # VARIABLE marker colors
)
show(plt0, txt0, at=0, N=3, size=(1800,500))
show(plt1, txt1, at=1)
show(plt2, txt2, at=2, interactive=True).close()
|
homeassistant/components/telegram_bot/polling.py | mtarjoianu/core | 30,023 | 12678281 | <gh_stars>1000+
"""Support for Telegram bot using polling."""
import logging
from telegram import Update
from telegram.error import NetworkError, RetryAfter, TelegramError, TimedOut
from telegram.ext import CallbackContext, TypeHandler, Updater
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from . import BaseTelegramBotEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, bot, config):
"""Set up the Telegram polling platform."""
pollbot = PollBot(hass, bot, config)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, pollbot.start_polling)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, pollbot.stop_polling)
return True
def process_error(update: Update, context: CallbackContext):
"""Telegram bot error handler."""
try:
raise context.error
except (TimedOut, NetworkError, RetryAfter):
# Long polling timeout or connection problem. Nothing serious.
pass
except TelegramError:
_LOGGER.error('Update "%s" caused error: "%s"', update, context.error)
class PollBot(BaseTelegramBotEntity):
"""
Controls the Updater object that holds the bot and a dispatcher.
The dispatcher is set up by the super class to pass telegram updates to `self.handle_update`
"""
def __init__(self, hass, bot, config):
"""Create Updater and Dispatcher before calling super()."""
self.bot = bot
self.updater = Updater(bot=bot, workers=4)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(TypeHandler(Update, self.handle_update))
self.dispatcher.add_error_handler(process_error)
super().__init__(hass, config)
def start_polling(self, event=None):
"""Start the polling task."""
_LOGGER.debug("Starting polling")
self.updater.start_polling()
def stop_polling(self, event=None):
"""Stop the polling task."""
_LOGGER.debug("Stopping polling")
self.updater.stop()
|
onnxruntime/python/tools/quantization/calibrate.py | fushwLZU/onnxruntime_test | 6,036 | 12678292 | <gh_stars>1000+
#!/usr/bin/env python
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft, Intel Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
import numpy as np
import onnx
import onnxruntime
from onnx import helper, TensorProto, ModelProto
from onnx import onnx_pb as onnx_proto
from six import string_types
from enum import Enum
from .quant_utils import QuantType, smooth_distribution
from .registry import QLinearOpsRegistry
import abc
import itertools
class CalibrationMethod(Enum):
MinMax = 0
Entropy = 1
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'get_next') and callable(subclass.get_next) or NotImplemented)
@abc.abstractmethod
def get_next(self) -> dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
class CalibraterBase:
def __init__(self, model, op_types_to_calibrate=[], augmented_model_path='augmented_model.onnx'):
'''
:param model: ONNX model to calibrate. It can be a ModelProto or a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
'''
if isinstance(model, string_types):
self.model = onnx.load(model)
elif isinstance(model, ModelProto):
self.model = model
else:
raise ValueError('model should be either model path or onnx.ModelProto.')
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
# augment graph
self.augment_model = None
self.augment_graph()
# Create InferenceSession
self.infer_session = None
self.execution_providers = ['CPUExecutionProvider']
self._create_inference_session()
def set_execution_providers(self, execution_providers=['CPUExecutionProvider']):
'''
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
'''
self.execution_providers = execution_providers
self._create_inference_session()
def _create_inference_session(self):
'''
create an OnnxRuntime InferenceSession.
'''
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
self.infer_session = onnxruntime.InferenceSession(self.augmented_model_path,
sess_options=sess_options,
providers=self.execution_providers)
def select_tensors_to_calibrate(self, model):
'''
select all quantization_candidates op type nodes' input/output tensors.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
'''
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = set(init.name for init in model.graph.initializer)
tensors_to_calibrate = set()
tensor_type_to_calibrate = set([TensorProto.FLOAT, TensorProto.FLOAT16])
for node in model.graph.node:
if len(self.op_types_to_calibrate) == 0 or node.op_type in self.op_types_to_calibrate:
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos.keys():
vi = value_infos[tensor_name]
if vi.type.HasField('tensor_type') and (
vi.type.tensor_type.elem_type in tensor_type_to_calibrate) and (
tensor_name not in initializer):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
'''
return: augmented onnx model
'''
return self.augment_model
def augment_graph(self):
'''
abstract method: augment the input model to prepare for collecting data. It will:
1. save augmented model to augmented_model_path.
2. set the self.augment_model
'''
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
'''
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
'''
raise NotImplementedError
def compute_range(self, data_reader: CalibrationDataReader):
'''
abstract method: compute the [min, max] range for the tensors to calibrate based on the collected data.
'''
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(self, model, op_types_to_calibrate=[], augmented_model_path='augmented_model.onnx'):
'''
:param model: ONNX model to calibrate. It can be a ModelProto or a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
'''
super(MinMaxCalibrater, self).__init__(model, op_types_to_calibrate, augmented_model_path)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = set(output.name for output in self.model.graph.output)
def augment_graph(self):
'''
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
'''
model = onnx_proto.ModelProto()
model.CopyFrom(self.model)
model = onnx.shape_inference.infer_shapes(model)
added_nodes = []
added_outputs = []
tensors, value_infos = self.select_tensors_to_calibrate(model)
for tensor in tensors:
# When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.
# To make the code simple, we always let keepdims to be 1.
keepdims = 1
# dim could be:
# [dim_param: "batch_size", dim_value: 256, dim_value: 36, dim_value: 64],
# [dim_value: 0],
# ...
# Please see the definition of TensorShapeProto https://github.com/onnx/onnx/blob/master/onnx/onnx.proto#L651
dim = value_infos[tensor].type.tensor_type.shape.dim
shape = (1,) if len(dim) == 1 else tuple(1 for i in range(len(dim)))
# Adding ReduceMin nodes
reduce_min_name = tensor + '_ReduceMin'
reduce_min_node = onnx.helper.make_node('ReduceMin', [tensor], [tensor + '_ReduceMin'], reduce_min_name, keepdims=keepdims)
added_nodes.append(reduce_min_node)
added_outputs.append(helper.make_tensor_value_info(reduce_min_node.output[0], TensorProto.FLOAT, shape))
# Adding ReduceMax nodes
reduce_max_name = tensor + '_ReduceMax'
reduce_max_node = onnx.helper.make_node('ReduceMax', [tensor], [tensor + '_ReduceMax'], reduce_max_name, keepdims=keepdims)
added_nodes.append(reduce_max_node)
added_outputs.append(helper.make_tensor_value_info(reduce_max_node.output[0], TensorProto.FLOAT, shape))
model.graph.node.extend(added_nodes)
model.graph.output.extend(added_outputs)
onnx.save(model, self.augmented_model_path)
self.augment_model = model
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
self.compute_range()
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = (min_value, max_value)
return new_range
def compute_range(self):
'''
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
'''
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs:]
calibrate_tensor_names = [
added_output_names[i].rpartition('_')[0] for i in range(0, len(added_output_names), 2)
] #output names
merged_added_output_dict = dict(
(i, merged_output_dict[i]) for i in merged_output_dict if i not in self.model_original_outputs)
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
min_value_array = min(merged_added_output_dict[added_output_names[i]])
max_value_array = max(merged_added_output_dict[added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = dict(zip(calibrate_tensor_names, pairs))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class EntropyCalibrater(CalibraterBase):
def __init__(self, model, op_types_to_calibrate=[], augmented_model_path='augmented_model.onnx'):
'''
:param model: ONNX model to calibrate. It can be a ModelProto or a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
'''
super(EntropyCalibrater, self).__init__(model, op_types_to_calibrate, augmented_model_path)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = set(output.name for output in self.model.graph.output)
self.collector = None
def augment_graph(self):
'''
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
'''
model = onnx_proto.ModelProto()
model.CopyFrom(self.model)
model = onnx.shape_inference.infer_shapes(model)
added_nodes = []
added_outputs = []
tensors, value_infos = self.select_tensors_to_calibrate(model)
for tensor in tensors:
added_outputs.append(value_infos[tensor])
model.graph.node.extend(added_nodes)
model.graph.output.extend(added_outputs)
onnx.save(model, self.augmented_model_path)
self.augment_model = model
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
'''
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
'''
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = dict((i, merged_dict[i]) for i in merged_dict if i not in self.model_original_outputs)
if not self.collector:
self.collector = HistogramCollector()
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_range(self):
'''
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
'''
if not self.collector:
raise ValueError("No collector created and can't generate calibration data.")
return self.collector.get_optimal_collection_result()
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def get_optimal_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Implementation of collecting histogram data as dict for each tensor targeting on entropy calibration.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
"""
def __init__(self, num_quantized_bins=128):
self.histogram_dict = {}
self.num_quantized_bins= num_quantized_bins
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr)
data_arr = data_arr.flatten()
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(old_histogram, data_arr, min_value, max_value, threshold)
else:
# hist, hist_edges = np.histogram(data_arr, self.num_quantized_bins, range=(min_value, max_value))
hist, hist_edges = np.histogram(data_arr, self.num_quantized_bins, range=(-threshold, threshold))
self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value, threshold)
def merge_histogram(self, old_histogram, data_arr, new_min, new_max, new_threshold):
(old_hist, old_hist_edges, old_min, old_max, old_threshold) = old_histogram
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-old_threshold, old_threshold))
return (new_hist + old_hist, old_hist_edges, min(old_min, new_min), max(old_max, new_max), old_threshold)
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, new_num_bins, range=(-new_threshold, new_threshold))
hist[len(hist) // 2] += len(old_hist)
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) // old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = half_increased_bins * old_stride + old_threshold
hist, hist_edges = np.histogram(data_arr, new_num_bins, range=(-new_threshold, new_threshold))
hist[half_increased_bins:new_num_bins-half_increased_bins] += old_hist
return (hist, hist_edges, min(old_min, new_min), max(old_max, new_max), new_threshold)
def get_optimal_collection_result(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {} # per tensor thresholds
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_optimal_threshold(histogram, num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
return thresholds_dict
def get_optimal_threshold(self, histogram, num_quantized_bins):
from scipy.stats import entropy
import copy
hist, hist_edges, _, _, _ = histogram
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = zero_bin_index + i + 1 if (zero_bin_index + i + 1) <= num_bins else num_bins
thresholds[i - num_half_quantized_bin] = (float(hist_edges[start_index]), float(hist_edges[end_index]))
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
# reference distribution p
p = sliced_distribution.copy() # a copy of np array
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
# nonzeros[i] incidates whether p[i] is non-zero
nonzeros = (p != 0).astype(np.int64)
# quantize p.size bins into quantized bins (default 128 bins)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
# merge bins into quantized bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[num_quantized_bins * num_merged_bins:])
# in order to compare p and q, we need to make length of q equals to length of p
# expand quantized bins into p.size bins
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float('inf')
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
return optimal_threshold
def create_calibrator(model,
op_types_to_calibrate=[],
augmented_model_path='augmented_model.onnx',
calibrate_method=CalibrationMethod.MinMax):
if calibrate_method == CalibrationMethod.MinMax:
return MinMaxCalibrater(model, op_types_to_calibrate, augmented_model_path)
elif calibrate_method == CalibrationMethod.Entropy:
return EntropyCalibrater(model, op_types_to_calibrate, augmented_model_path)
raise ValueError('Unsupported calibration method {}'.format(calibrate_method))
|
arelle/ModelRssItem.py | hamscher/Arelle | 292 | 12678296 | <gh_stars>100-1000
'''
Created on Nov 11, 2010
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
import os
from arelle import XmlUtil
from arelle.ModelObject import ModelObject
newRssWatchOptions = {
"feedSource": "",
"feedSourceUri": None,
"matchTextExpr": "",
"formulaFileUri": "",
"logFileUri": "",
"emailAddress": "",
"validateXbrlRules": False,
"validateDisclosureSystemRules": False,
"validateCalcLinkbase": False,
"validateFormulaAssertions": False,
"alertMatchedFactText": False,
"alertAssertionUnsuccessful": False,
"alertValiditionError": False,
"latestPubDate": None,
}
# Note: if adding to this list keep DialogRssWatch in sync
class ModelRssItem(ModelObject):
def init(self, modelDocument):
super(ModelRssItem, self).init(modelDocument)
try:
if (self.modelXbrl.modelManager.rssWatchOptions.latestPubDate and
self.pubDate <= self.modelXbrl.modelManager.rssWatchOptions.latestPubDate):
self.status = _("tested")
else:
self.status = _("not tested")
except AttributeError:
self.status = _("not tested")
self.results = None
self.assertions = None
# find edgar namespace
self.edgr = None
for elt in self.iterdescendants("{*}xbrlFiling"):
self.edgr = elt.qname.namespaceURI
break
if self.edgr:
edgrPrefix = "{" + self.edgr + "}"
else:
edgrPrefix = ""
self.edgrDescription = edgrPrefix + "description"
self.edgrFile = edgrPrefix + "file"
self.edgrInlineXBRL = edgrPrefix + "inlineXBRL"
self.edgrSequence = edgrPrefix + "sequence"
self.edgrType = edgrPrefix + "type"
self.edgrUrl = edgrPrefix + "url"
@property
def cikNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "cikNumber"))
@property
def accessionNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "accessionNumber"))
@property
def fileNumber(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "fileNumber"))
@property
def companyName(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "companyName"))
@property
def formType(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "formType"))
@property
def pubDate(self):
try:
return self._pubDate
except AttributeError:
from arelle.UrlUtil import parseRfcDatetime
self._pubDate = parseRfcDatetime(XmlUtil.text(XmlUtil.descendant(self, None, "pubDate")))
return self._pubDate
@property
def filingDate(self):
try:
return self._filingDate
except AttributeError:
import datetime
self._filingDate = None
date = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "filingDate"))
d = date.split("/")
if d and len(d) == 3:
self._filingDate = datetime.date(_INT(d[2]),_INT(d[0]),_INT(d[1]))
return self._filingDate
@property
def period(self):
per = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "period"))
if per and len(per) == 8:
return "{0}-{1}-{2}".format(per[0:4],per[4:6],per[6:8])
return None
@property
def assignedSic(self):
return XmlUtil.text(XmlUtil.descendant(self, self.edgr, "assignedSic"))
@property
def acceptanceDatetime(self):
try:
return self._acceptanceDatetime
except AttributeError:
import datetime
self._acceptanceDatetime = None
date = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "acceptanceDatetime"))
if date and len(date) == 14:
self._acceptanceDatetime = datetime.datetime(_INT(date[0:4]),_INT(date[4:6]),_INT(date[6:8]),_INT(date[8:10]),_INT(date[10:12]),_INT(date[12:14]))
return self._acceptanceDatetime
@property
def fiscalYearEnd(self):
yrEnd = XmlUtil.text(XmlUtil.descendant(self, self.edgr, "fiscalYearEnd"))
if yrEnd and len(yrEnd) == 4:
return "{0}-{1}".format(yrEnd[0:2],yrEnd[2:4])
return None
@property
def htmlUrl(self): # main filing document
htmlDocElt = XmlUtil.descendant(self, self.edgr, "xbrlFile", attrName=self.edgrSequence, attrValue="1")
if htmlDocElt is not None:
return htmlDocElt.get(self.edgrUrl)
return None
@property
def url(self):
try:
return self._url
except AttributeError:
self._url = None
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile"):
if instDocElt.get(self.edgrType).endswith(".INS") or instDocElt.get(self.edgrInlineXBRL) == "true":
self._url = instDocElt.get(self.edgrUrl)
break
return self._url
@property
def enclosureUrl(self):
return XmlUtil.childAttr(self, None, "enclosure", "url")
@property
def zippedUrl(self):
enclosure = XmlUtil.childAttr(self, None, "enclosure", "url")
if enclosure:
# modify url to use zip file
_path, sep, file = (self.url or "").rpartition("/")
# return path + sep + self.accessionNumber + "-xbrl.zip" + sep + file
return enclosure + sep + file
else: # no zipped enclosure, just use unzipped file
return self.url
@property
def htmURLs(self):
try:
return self._htmURLs
except AttributeError:
self._htmURLs = [
(instDocElt.get(self.edgrDescription),instDocElt.get(self.edgrUrl))
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile")
if instDocElt.get(self.edgrFile).endswith(".htm")]
return self._htmURLs
@property
def primaryDocumentURL(self):
try:
return self._primaryDocumentURL
except AttributeError:
formType = self.formType
self._primaryDocumentURL = None
for instDocElt in XmlUtil.descendants(self, self.edgr, "xbrlFile"):
if instDocElt.get(self.edgrType) == formType:
self._primaryDocumentURL = instDocElt.get(self.edgrUrl)
break
return self._primaryDocumentURL
def setResults(self, modelXbrl):
self.results = []
self.assertionUnsuccessful = False
# put error codes first, sorted, then assertion result (dict's)
self.status = "pass"
for error in modelXbrl.errors:
if isinstance(error,dict): # assertion results
self.assertions = error
for countSuccessful, countNotsuccessful in error.items():
if countNotsuccessful > 0:
self.assertionUnsuccessful = True
self.status = "unsuccessful"
else: # error code results
self.results.append(error)
self.status = "fail" # error code
self.results.sort()
@property
def propertyView(self):
return (("CIK", self.cikNumber),
("company", self.companyName),
("published", self.pubDate),
("form type", self.formType),
("filing date", self.filingDate),
("period", self.period),
("year end", self.fiscalYearEnd),
("status", self.status),
("instance", os.path.basename(self.url)),
)
def __repr__(self):
return ("rssItem[{0}]{1})".format(self.objectId(),self.propertyView))
|
neptune/internal/hardware/resources/gpu_card_indices_provider.py | Raalsky/neptune-client | 254 | 12678310 | #
# Copyright (c) 2019, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
class GPUCardIndicesProvider(object):
def __init__(self, cuda_visible_devices, gpu_card_count):
self.__cuda_visible_devices = cuda_visible_devices
self.__gpu_card_count = gpu_card_count
self.__cuda_visible_devices_regex = r"^-?\d+(,-?\d+)*$"
def get(self):
if self.__is_cuda_visible_devices_correct():
return self.__gpu_card_indices_from_cuda_visible_devices()
else:
return list(range(self.__gpu_card_count))
def __is_cuda_visible_devices_correct(self):
return self.__cuda_visible_devices is not None and re.match(
self.__cuda_visible_devices_regex, self.__cuda_visible_devices
)
def __gpu_card_indices_from_cuda_visible_devices(self):
correct_indices = []
# According to CUDA Toolkit specification.
# https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars
for gpu_index_str in self.__cuda_visible_devices.split(","):
gpu_index = int(gpu_index_str)
if 0 <= gpu_index < self.__gpu_card_count:
correct_indices.append(gpu_index)
else:
break
return list(set(correct_indices))
|
tests/pre_commit_hook_test.py | paulo-sampaio/detect-secrets | 2,212 | 12678351 | <reponame>paulo-sampaio/detect-secrets
import json
import tempfile
from contextlib import contextmanager
from functools import partial
from typing import List
from unittest import mock
import pytest
from detect_secrets.core import baseline
from detect_secrets.core.secrets_collection import SecretsCollection
from detect_secrets.pre_commit_hook import main
from detect_secrets.settings import transient_settings
from testing.mocks import disable_gibberish_filter
@pytest.fixture(autouse=True)
def configure_settings():
with transient_settings({
'plugins_used': [{'name': 'Base64HighEntropyString', 'limit': 4.5}],
}):
yield
def test_file_with_secrets():
assert_commit_blocked(['test_data/files/file_with_secrets.py'])
def test_file_with_no_secrets():
assert_commit_succeeds(['test_data/files/file_with_no_secrets.py'])
def test_quit_early_if_bad_baseline():
with pytest.raises(SystemExit):
main(['test_data/files/file_with_secrets.py', '--baseline', 'does-not-exist'])
def test_quit_if_baseline_is_changed_but_not_staged():
with mock.patch(
'detect_secrets.pre_commit_hook.raise_exception_if_baseline_file_is_unstaged',
) as m:
m.side_effect = ValueError
assert_commit_blocked([
'test_data/files/file_with_no_secrets.py',
'--baseline',
'.secrets.baseline',
])
def test_baseline_filters_out_known_secrets():
secrets = SecretsCollection()
secrets.scan_file('test_data/each_secret.py')
assert secrets
with disable_gibberish_filter():
with tempfile.NamedTemporaryFile() as f:
baseline.save_to_file(secrets, f.name)
f.seek(0)
# This succeeds, because all the secrets are known.
assert_commit_succeeds([
'test_data/each_secret.py',
'--baseline',
f.name,
])
# Remove one arbitrary secret, so that it won't be the full set.
secrets.data['test_data/each_secret.py'].pop()
with tempfile.NamedTemporaryFile() as f:
baseline.save_to_file(secrets, f.name)
f.seek(0)
# Test that it isn't the case that a baseline is provided, and everything passes.
assert_commit_blocked([
'test_data/each_secret.py',
'--baseline',
f.name,
])
class TestModifiesBaselineFromVersionChange:
FILENAME = 'test_data/files/file_with_secrets.py'
def test_success(self):
with self.get_baseline_file() as f:
assert_commit_blocked_with_diff_exit_code([
# We use file_with_no_secrets so that we can be certain that the commit is blocked
# due to the version change only.
'test_data/files/file_with_no_secrets.py',
'--baseline',
f.name,
])
def test_maintains_labelled_data(self):
def label_secret(secrets):
list(secrets[self.FILENAME])[0].is_secret = True
return baseline.format_for_output(secrets)
with self.get_baseline_file(formatter=label_secret) as f:
assert_commit_blocked_with_diff_exit_code([
'test_data/files/file_with_no_secrets.py',
'--baseline',
f.name,
])
f.seek(0)
data = json.loads(f.read())
assert data['results'][self.FILENAME][0]['is_secret']
def test_maintains_slim_mode(self):
with self.get_baseline_file(
formatter=partial(baseline.format_for_output, is_slim_mode=True),
) as f:
assert_commit_blocked_with_diff_exit_code([
'test_data/files/file_with_no_secrets.py',
'--baseline',
f.name,
])
f.seek(0)
assert b'line_number' not in f.read()
@contextmanager
def get_baseline_file(self, formatter=baseline.format_for_output):
secrets = SecretsCollection()
secrets.scan_file(self.FILENAME)
with tempfile.NamedTemporaryFile() as f:
with mock.patch('detect_secrets.core.baseline.VERSION', '0.0.1'):
data = formatter(secrets)
# Simulating old version
data['plugins_used'][0]['base64_limit'] = data['plugins_used'][0].pop('limit')
baseline.save_to_file(data, f.name)
yield f
class TestLineNumberChanges:
FILENAME = 'test_data/files/file_with_secrets.py'
def test_modifies_baseline(self, modified_baseline):
with tempfile.NamedTemporaryFile() as f:
baseline.save_to_file(modified_baseline, f.name)
assert_commit_blocked_with_diff_exit_code([
self.FILENAME,
'--baseline',
f.name,
])
def test_does_not_modify_slim_baseline(self, modified_baseline):
with tempfile.NamedTemporaryFile() as f:
baseline.save_to_file(
baseline.format_for_output(modified_baseline, is_slim_mode=True),
f.name,
)
assert_commit_succeeds([
self.FILENAME,
'--baseline',
f.name,
])
@pytest.fixture
def modified_baseline(self):
secrets = SecretsCollection()
secrets.scan_file(self.FILENAME)
for _, secret in secrets:
secret.line_number += 1
yield secrets
def assert_commit_succeeds(command: List[str]):
assert main(command) == 0
def assert_commit_blocked(command: List[str]):
assert main(command) == 1
def assert_commit_blocked_with_diff_exit_code(command: List[str]):
assert main(command) == 3
|
pyNastran/dev/bdf_vectorized/cards/elements/solid/chexa20.py | ACea15/pyNastran | 293 | 12678371 | <reponame>ACea15/pyNastran<filename>pyNastran/dev/bdf_vectorized/cards/elements/solid/chexa20.py
from numpy import arange, searchsorted, array, eye, ones
from numpy.linalg import norm # type: ignore
from pyNastran.bdf.field_writer_8 import print_card_8
from pyNastran.bdf.bdf_interface.assign_type import integer, integer_or_blank
from pyNastran.dev.bdf_vectorized.cards.elements.solid.chexa8 import quad_area_centroid, volume8
from pyNastran.dev.bdf_vectorized.cards.elements.solid.solid_element import SolidElement
class CHEXA20(SolidElement):
type = 'CHEXA20'
nnodes = 20
def __init__(self, model):
"""
Defines the CHEXA object.
Parameters
----------
model : BDF
the BDF object
"""
SolidElement.__init__(self, model)
def add_card(self, card, comment=''):
i = self.i
eid = integer(card, 1, 'element_id')
if comment:
self.set_comment(eid, comment)
#: Element ID
self.element_id[i] = eid
#: Property ID
self.property_id[i] = integer(card, 2, 'property_id')
#: Node IDs
nids = array([
integer(card, 3, 'node_id_1'), integer(card, 4, 'node_id_2'),
integer(card, 5, 'node_id_3'), integer(card, 6, 'node_id_4'),
integer(card, 7, 'node_id_5'), integer(card, 8, 'node_id_6'),
integer(card, 9, 'node_id_7'), integer(card, 10, 'node_id_8'),
integer_or_blank(card, 11, 'node_id_9', 0),
integer_or_blank(card, 12, 'node_id_10', 0),
integer_or_blank(card, 13, 'node_id_11', 0),
integer_or_blank(card, 14, 'node_id_12', 0),
integer_or_blank(card, 15, 'node_id_13', 0),
integer_or_blank(card, 16, 'node_id_14', 0),
integer_or_blank(card, 17, 'node_id_15', 0),
integer_or_blank(card, 18, 'node_id_16', 0),
integer_or_blank(card, 19, 'node_id_17', 0),
integer_or_blank(card, 20, 'node_id_18', 0),
integer_or_blank(card, 21, 'node_id_19', 0),
integer_or_blank(card, 22, 'node_id_20', 0)
], dtype='int32')
self.node_ids[i, :] = nids
assert len(card) <= 23, 'len(CHEXA20 card) = %i\ncard=%s' % (len(card), card)
self.i += 1
def build(self):
if self.n:
i = self.element_id.argsort()
self.element_id = self.element_id[i]
self.property_id = self.property_id[i]
self.node_ids = self.node_ids[i, :]
self._cards = []
else:
self.element_id = array([], dtype='int32')
self.property_id = array([], dtype='int32')
def update(self, maps):
"""
maps = {
'node_id' : nid_map,
'property' : pid_map,
}
"""
if self.n:
eid_map = maps['element']
nid_map = maps['node']
pid_map = maps['property']
for i, (eid, pid, nids) in enumerate(zip(self.element_id, self.property_id, self.node_ids)):
print(self.print_card(i))
self.element_id[i] = eid_map[eid]
self.property_id[i] = pid_map[pid]
self.node_ids[i, 0] = nid_map[nids[0]]
self.node_ids[i, 1] = nid_map[nids[1]]
self.node_ids[i, 2] = nid_map[nids[2]]
self.node_ids[i, 3] = nid_map[nids[3]]
self.node_ids[i, 4] = nid_map[nids[4]]
self.node_ids[i, 5] = nid_map[nids[5]]
self.node_ids[i, 6] = nid_map[nids[6]]
self.node_ids[i, 7] = nid_map[nids[7]]
self.node_ids[i, 8] = nid_map[nids[8]]
self.node_ids[i, 9] = nid_map[nids[9]]
self.node_ids[i, 10] = nid_map[nids[10]]
self.node_ids[i, 11] = nid_map[nids[11]]
self.node_ids[i, 12] = nid_map[nids[12]]
self.node_ids[i, 13] = nid_map[nids[13]]
self.node_ids[i, 14] = nid_map[nids[14]]
self.node_ids[i, 15] = nid_map[nids[15]]
self.node_ids[i, 16] = nid_map[nids[16]]
self.node_ids[i, 17] = nid_map[nids[17]]
self.node_ids[i, 18] = nid_map[nids[18]]
self.node_ids[i, 19] = nid_map[nids[19]]
def get_mass_matrix(self, i, model, positions, index0s, is_lumped=True):
nnodes = 8
ndof = 3 * nnodes
pid = self.property_id[i]
rho = self.model.elements.properties_solid.psolid.get_density_by_property_id(pid)[0]
n0, n1, n2, n3, n4, n5, n6, n7 = self.node_ids[i, :]
V = volume8(positions[self.node_ids[i, 0]],
positions[self.node_ids[i, 1]],
positions[self.node_ids[i, 2]],
positions[self.node_ids[i, 3]],
positions[self.node_ids[i, 4]],
positions[self.node_ids[i, 5]],
positions[self.node_ids[i, 6]],
positions[self.node_ids[i, 7]],
)
mass = rho * V
if is_lumped:
mi = mass / 4.
nnodes = 4
M = eye(ndof, dtype='float32')
else:
mi = mass / 20.
M = ones((ndof, ndof), dtype='float32')
for i in range(nnodes):
j = i * 3
M[j:j+3, j:j+3] = 2.
M *= mi
dofs, nijv = self.get_dofs_nijv(index0s, n0, n1, n2, n3, n4, n5, n6, n7)
return M, dofs, nijv
def get_stiffness_matrix(self, i, model, positions, index0s):
return K, dofs, nijv
def get_dofs_nijv(self, index0s, n0, n1, n2, n3, n4, n5, n6, n7):
i0 = index0s[n0]
i1 = index0s[n1]
i2 = index0s[n2]
i3 = index0s[n3]
i4 = index0s[n4]
i5 = index0s[n5]
i6 = index0s[n6]
i7 = index0s[n7]
dofs = array([
i0, i0+1, i0+2,
i1, i1+1, i1+2,
i2, i2+1, i2+2,
i3, i3+1, i3+2,
i4, i4+1, i4+2,
i5, i5+1, i5+2,
i6, i6+1, i6+2,
i7, i7+1, i7+2,
], 'int32')
nijv = [
# translation
(n0, 1), (n0, 2), (n0, 3),
(n1, 1), (n1, 2), (n1, 3),
(n2, 1), (n2, 2), (n2, 3),
(n3, 1), (n3, 2), (n3, 3),
(n4, 1), (n4, 2), (n4, 3),
(n5, 1), (n5, 2), (n5, 3),
(n6, 1), (n6, 2), (n6, 3),
(n7, 1), (n7, 2), (n7, 3),
]
return dofs, nijv
def _verify(self, xref=True):
eid = self.eid
pid = self.Pid()
nids = self.node_ids
assert isinstance(eid, int)
assert isinstance(pid, int)
for i, nid in enumerate(nids):
assert isinstance(nid, int), 'nid%i is not an integer; nid=%s' %(i, nid)
if xref:
c = self.centroid()
v = self.volume()
assert isinstance(v, float)
for i in range(3):
assert isinstance(c[i], float)
def get_node_indicies(self, i=None):
if i is None:
i1 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 0])
i2 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 1])
i3 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 2])
i4 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 3])
i5 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 4])
i6 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 5])
i7 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 6])
i8 = self.model.grid.get_node_index_by_node_id(self.node_ids[:, 7])
else:
i1 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 0])
i2 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 1])
i3 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 2])
i4 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 3])
i5 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 4])
i6 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 5])
i7 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 6])
i8 = self.model.grid.get_node_index_by_node_id(self.node_ids[i, 7])
return i1, i2, i3, i4, i5, i6, i7, i8
def _get_node_locations_by_index(self, i, xyz_cid0):
"""
Parameters
----------
i : (nnodes, ) int ndarray; None -> all
node IDs
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
"""
grid = self.model.grid
get_node_index_by_node_id = self.model.grid.get_node_index_by_node_id
node_ids = self.node_ids
msg = ', which is required by %s' % self.type
i1, i2, i3, i4, i5, i6, i7, i8 = self.get_node_indicies(i)
n1 = xyz_cid0[i1, :]
n2 = xyz_cid0[i2, :]
n3 = xyz_cid0[i3, :]
n4 = xyz_cid0[i4, :]
n5 = xyz_cid0[i5, :]
n6 = xyz_cid0[i6, :]
n7 = xyz_cid0[i7, :]
n8 = xyz_cid0[i8, :]
return n1, n2, n3, n4, n5, n6, n7, n8
def get_volume_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the volume for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None
the elements to consider
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
total : bool; default=False
should the volume be summed
Notes
-----
Volume for a CHEXA is the average area of two opposing faces
times the length between the centroids of those points
"""
nodes = self._get_node_locations_by_element_id(element_id, xyz_cid0)
n1, n2, n3, n4, n5, n6, n7, n8 = nodes
volume = volume8(n1, n2, n3, n4, n5, n6, n7, n8)
if total:
volume = abs(volume).sum()
else:
volume = abs(volume)
return volume
def get_centroid_volume(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the centroid and volume for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None
the elements to consider
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
total : bool; default=False
should the volume be summed; centroid be averaged
.. seealso:: CHEXA20.get_volume_by_element_id() and CHEXA20.get_centroid_by_element_id() for more information.
"""
nodes = self._get_node_locations_by_element_id(element_id, xyz_cid0)
n1, n2, n3, n4, n5, n6, n7, n8 = nodes
(A1, c1) = quad_area_centroid(n1, n2, n3, n4)
(A2, c2) = quad_area_centroid(n5, n6, n7, n8)
centroid = (c1 * A1 + c2 * A2) / (A1 + A2)
volume = (A1 + A2) / 2. * norm(c1 - c2, axis=1)
if total:
centroid = centroid.mean()
volume = abs(volume).sum()
else:
volume = abs(volume)
assert volume.min() > 0.0, 'volume.min() = %f' % volume.min()
return centroid, volume
def get_centroid_by_element_id(self, element_id=None, xyz_cid0=None, total=False):
"""
Gets the centroid for one or more elements.
Parameters
----------
element_id : (nelements, ) int ndarray; default=None
the elements to consider
xyz_cid0 : (nnodes, 3) float ndarray; default=None -> calculate
the GRIDs in CORD2R=0
total : bool; default=False
should the centroid be averaged
"""
nodes = self._get_node_locations_by_element_id(element_id, xyz_cid0)
n1, n2, n3, n4, n5, n6, n7, n8 = nodes
(A1, c1) = quad_area_centroid(n1, n2, n3, n4)
(A2, c2) = quad_area_centroid(n5, n6, n7, n8)
centroid = (c1 * A1 + c2 * A2) / (A1 + A2)
if total:
centroid = centroid.mean(axis=0)
return centroid
def get_face_nodes(self, nid, nid_opposite):
raise NotImplementedError()
#nids = self.node_ids[:4]
#indx = nids.index(nid_opposite)
#nids.pop(indx)
#return nids
def write_card(self, bdf_file, size=8, element_id=None):
if self.n:
if element_id is None:
i = arange(self.n)
else:
i = searchsorted(self.element_id, element_id)
for (eid, pid, n) in zip(self.element_id[i], self.property_id[i], self.node_ids[i]):
if eid in self._comments:
bdf_file.write(self._comments[eid])
n = [ni if ni != 0 else None for ni in n]
card = ['CHEXA', eid, pid,
n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[8], n[9],
n[10], n[11], n[12], n[13], n[14], n[15], n[16], n[17], n[18], n[19]]
bdf_file.write(print_card_8(card))
|
src/sensing/drivers/radar/umrr_driver/src/smartmicro/Services/basicCanServices/uatResponseService.py | P4nos/Aslan | 227 | 12678378 | import queue
import struct
from smartmicro.Protocols.udt.udtUatResponseV1 import UATv1Response
# from smartmicro.Protocols.udt.udtUatResponseV2 import UATv2Response
# from smartmicro.Protocols.udt.udtUatResponseV3 import UATv3Response
from smartmicro.Protocols.udt.udtUatResponseV4 import UATv4Response
from smartmicro.Services.basicCanServices.canService import CanIDService
class uatResponseService(CanIDService):
# ---------------------------------------------------------------------------------------------------------------- #
# function: initialization #
# ---------------------------------------------------------------------------------------------------------------- #
def __init__(self):
"""
The function provides all necessary variables and instances to deal with the udt sub-service uat responses.
"""
# init super class
CanIDService.__init__(self)
# provide receive queue
self.recvQueue = queue.Queue()
# provide decode dictionary
self.decDict = dict()
# provide udt identifier referring to uat response service
self.uatRespIdList = [17000, 17001, 17002, 17003, 17004, 17005, 17006, 17007, 17008, 17009,
17010, 17011, 17012, 17013, 17014, 17015, 17016, 17017, 17018]
# set decode functions
self.__regDecodeFunctions()
# ---------------------------------------------------------------------------------------------------------------- #
# function: __regDecodeFunctions #
# ---------------------------------------------------------------------------------------------------------------- #
def __regDecodeFunctions(self):
"""
The function registers all decode functions into one dictionary.
Returns
-------
"""
# register decode functions
self.decDict["2"] = UATv1Response.decode
# self.decDict["3"] = UATv2Response.decode
# self.decDict["4"] = UATv3Response.decode
self.decDict["5"] = UATv4Response.decode
# ---------------------------------------------------------------------------------------------------------------- #
# function: getUdtIdentifier #
# ---------------------------------------------------------------------------------------------------------------- #
def getUdtIdentifier(self):
"""
The function returns a list of used udt identifier for this response service.
Returns
-------
uatRespIdList : list
list of used uat response identifier
"""
return self.uatRespIdList
# ---------------------------------------------------------------------------------------------------------------- #
# function: getMessage #
# ---------------------------------------------------------------------------------------------------------------- #
def getMessage(self, timeout=None):
"""
The function decodes received uat responses
Parameters
----------
timeout : integer
timeout in [s]
Returns
-------
"""
# wait for header
header = self.__waitForHeader(timeout)
# decode uat response version
msgList, respVersion = self.__uatVersionCtrl(header, timeout)
# decode uat response
decResp = self.decDict[str(respVersion)](msgList)
return decResp
# ---------------------------------------------------------------------------------------------------------------- #
# function: __waitForHeader #
# ---------------------------------------------------------------------------------------------------------------- #
def __waitForHeader(self, timeout=None):
"""
The function waits for the header of the response.
Parameters
----------
timeout : integer
timeout in [s]
Returns
-------
header : bytearray
header message of the response
"""
# init default udt index
udtIndex = 0
# set default header
header = bytearray(8)
# run as long as header is not found
while udtIndex != 17000:
# get header from queue
header = self.recvQueue.get(block=True, timeout=timeout)['data']
# extract udt index
udtIndex = struct.unpack('<H', header[0:2])[0]
return header
# ---------------------------------------------------------------------------------------------------------------- #
# function: __waitForHeader #
# ---------------------------------------------------------------------------------------------------------------- #
def __uatVersionCtrl(self, header, timeout=None):
"""
The function decodes the corresponding uat version for further response decoding. Additional the corresponding
messages for the response will be collected.
Parameters
----------
header : bytearray
header message used to determine next steps
timeout : integer
timeout in [s]
Returns
-------
msgList : list
list of bytearrays
uatRespVersion : integer
current version of uat response
"""
# decode uat response version
uatRespVersion = struct.unpack("<B", header[2:3])[0]
# UDT_UATv1Response
if uatRespVersion == 2:
# repetition header plus 3 data packages
remaining_datapackages = 4
# UDT_UATv2Response
# elif uatRespVersion == 3:
# # repetition header plus 4 data packages
# remaining_datapackages = 6
# # UDT_UATv3Response
# elif uatRespVersion == 4:
# # repetition header plus 7 data packages
# remaining_datapackages = 8
# UDT_UATv4Response
elif uatRespVersion == 5:
numberOfInstructions = header[5]
# (Number of instructions * 3) data packages
remaining_datapackages = numberOfInstructions * 3
else:
raise TypeError("unsupported UDT-UAT response index received")
# provide list of response messages
msgList = [header]
for nsgIdx in range(0, remaining_datapackages):
msgList.append(self.recvQueue.get(block=True, timeout=timeout)['data'])
return msgList, uatRespVersion
# ---------------------------------------------------------------------------------------------------------------- #
# function: clearQueue #
# ---------------------------------------------------------------------------------------------------------------- #
def clearQueue(self):
"""
Flushes the recvQueue.
Returns
-------
None
"""
while self.isEmpty() is False:
self.getMessage()
|
codes/models/networks/base_nets.py | sanchitvohra/EGVSR | 709 | 12678403 | import torch.nn as nn
class BaseSequenceGenerator(nn.Module):
def __init__(self):
super(BaseSequenceGenerator, self).__init__()
def generate_dummy_input(self, lr_size):
""" use for compute per-step FLOPs and speed
return random tensors that can be taken as input of <forward>
"""
return None
def forward(self, *args, **kwargs):
""" forward pass for a singe frame
"""
pass
def forward_sequence(self, lr_data):
""" forward pass for a whole sequence (for training)
"""
pass
def infer_sequence(self, lr_data, device):
""" infer for a whole sequence (for inference)
"""
pass
class BaseSequenceDiscriminator(nn.Module):
def __init__(self):
super(BaseSequenceDiscriminator, self).__init__()
def forward(self, *args, **kwargs):
""" forward pass for a singe frame
"""
pass
def forward_sequence(self, data, args_dict):
""" forward pass for a whole sequence (for training)
"""
pass
|
examples/daily/point.py | mitchkaden/meteostat-python | 133 | 12678419 | """
Example: Daily point data access
Meteorological data provided by Meteostat (https://dev.meteostat.net)
under the terms of the Creative Commons Attribution-NonCommercial
4.0 International Public License.
The code is licensed under the MIT license.
"""
from datetime import datetime
import matplotlib.pyplot as plt
from meteostat import Point, Daily
# Set time period
start = datetime(2018, 1, 1)
end = datetime(2018, 12, 31)
# Create Point for Vancouver, BC
vancouver = Point(49.2497, -123.1193, 70)
# Get daily data for 2018
data = Daily(vancouver, start, end)
data = data.fetch()
# Plot line chart including average, minimum and maximum temperature
data.plot(y=['tavg', 'tmin', 'tmax'])
plt.show()
|
gw_full_latest/CloudMQTT.py | rendikanyut/LowCostLoRaGw | 654 | 12678424 | #-------------------------------------------------------------------------------
# Copyright 2017 <NAME>, University of Pau, France.
#
# <EMAIL>
#
# This file is part of the low-cost LoRa gateway developped at University of Pau
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the program. If not, see <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------
import urllib2
import subprocess
import time
import ssl
import socket
import datetime
import sys
import os
import json
import re
import shlex
#don't generate pyc (no compilation of imported module) so change in key_* file can be done dynamically
sys.dont_write_bytecode = True
# get key definition from external file to ease
# update of cloud script in the future
import key_MQTT
MQTT_port=''
try:
key_MQTT.source_list
except AttributeError:
key_MQTT.source_list=[]
# didn't get a response from server?
connection_failure = False
# function to check connection availability with the server
def test_network_available():
response = 1
iteration = 0
global connection_failure
# we try 4 times to connect to the server.
while(response!=0 and iteration < 4) :
response = os.system("ping -q -c 1 -W 1 " + key_MQTT.MQTT_server + " > /dev/null 2>&1")
# if connection_failure == True and the connection with the server is unavailable
# don't waste more time, exit directly
if (connection_failure and response!=0) :
print('MQTT: the server is still unavailable')
iteration = 4
# print connection failure
elif (response!=0) :
print('MQTT: server unavailable, retrying to connect soon...')
# wait before retrying
time.sleep(1)
iteration += 1
if response==0:
return True
else:
return False
# send a data to the server
def send_data(data, src, nomenclatures, tdata):
global connection_failure
i=0
if data[0]=='':
data[0]=key_MQTT.project_name
if data[1]=='':
data[1]=key_MQTT.organization_name
while i < len(data)-2:
#we use mosquitto client
#be sure to have run sudo apt-get install mosquitto-clients
#the topic will be for instance waziup_UPPA_Sensor2/TC
if nomenclatures=='':
cmd = 'mosquitto_pub -h '+key_MQTT.MQTT_server+MQTT_port+' -t '+data[0]+'/'+data[1]+'/'+src+' -m \"'+data[i+2]+'\"'
else:
cmd = 'mosquitto_pub -h '+key_MQTT.MQTT_server+MQTT_port+' -t '+data[0]+'/'+data[1]+'/'+src+'/'+nomenclatures[i]+' -m \"'+data[i+2]+'\"'
i += 1
print "CloudMQTT: will issue cmd"
print(cmd)
args = shlex.split(cmd)
print args
try:
out = subprocess.check_output(args, shell=False)
if out != '':
print 'MQTT: returned msg from server is'
print out
else :
print 'MQTT: publish success'
except subprocess.CalledProcessError:
print "MQTT: publish command failed (maybe a disconnection)"
connection_failure = True
def MQTT_uploadData(nomenclatures, data, src, tdata):
connected = test_network_available()
#if we got a response from the server, send the data to it
if(connected):
print("MQTT: publishing")
send_data(data, src, nomenclatures, tdata)
else:
print("MQTT: not publishing")
# update connection_failure value
global connection_failure
connection_failure = not connected
# main
# -------------------
#
# ldata can be formatted to indicate a specifc project and organization name. Options are:
# TC/22.4/HU/85 -> use default project and organization name
# UPPA#TC/22.4/HU/85 -> use default project and organization name=UPPA
# waziup#UPPA#TC/22.4/HU/85 -> project=waziup and organization name=UPPA
#
# project and organization name must BOTH have more than 2 characters
#
def main(ldata, pdata, rdata, tdata, gwid):
# this is common code to process packet information provided by the main gateway script (i.e. post_processing_gw.py)
# these information are provided in case you need them
arr = map(int,pdata.split(','))
dst=arr[0]
ptype=arr[1]
src=arr[2]
seq=arr[3]
datalen=arr[4]
SNR=arr[5]
RSSI=arr[6]
#LoRaWAN packet
if dst==256:
src_str="0x%0.8X" % src
else:
src_str=str(src)
if (src_str in key_MQTT.source_list) or (len(key_MQTT.source_list)==0):
global MQTT_port
# check if ':' separator is present that would indicate a custom MQTT port number
if key_MQTT.MQTT_server.count(':')>0:
server_port=re.split(':',key_MQTT.MQTT_server)
key_MQTT.MQTT_server=server_port[0]
MQTT_port=' -p '+server_port[1]
else:
MQTT_port=''
#LoRaWAN (so encrypted packet) -> ptype & 0x40 == 0x40 or ptype & 0x80 == 0x80
#or encapsulated encrypted -> ptype & 0x04 == 0x04
if ptype & 0x40 == 0x40 or ptype & 0x80 == 0x80 or ptype & 0x04 == 0x04:
nomenclatures = ''
data=['','']
data.append(ldata)
MQTT_uploadData(nomenclatures, data, key_MQTT.sensor_name+src_str, tdata)
else:
# this part depends on the syntax used by the end-device
# we use: TC/22.4/HU/85...
#
# but we accept also a_str#b_str#TC/22.4/HU/85... to indicate a project and organization
# or simply 22.4 in which case, the nomemclature will be DEF
# get number of '#' separator
nsharp=ldata.count('#')
nslash=0
# no separator
if nsharp==0:
# will use default project and organisation name
data=['','']
# get number of '/' separator on ldata
nslash = ldata.count('/')
# contains ['', '', "s1", s1value, "s2", s2value, ...]
data_array = data + re.split("/", ldata)
else:
data_array = re.split("#", ldata)
# only 1 separator
if nsharp==1:
# insert '' to indicate default project
# as we assume that the only parameter indicate the organisation name
data_array.insert(0,'');
# if the length is greater than 2
if len(data_array[1])<3:
data_array[1]=''
# we have 2 separators
if nsharp==2:
# if the length of a fields is greater than 2 then we take it into account
if len(data_array[0])<3:
data_array[0]=''
if len(data_array[1])<3:
data_array[1]=''
# get number of '/' separator on data_array[2]
# because ldata may contain '/'
nslash = data_array[2].count('/')
# then reconstruct data_array
data_array=[data_array[0],data_array[1]]+re.split("/", data_array[2])
# at the end data_array contains
# ["project", "organisation_name", "s1", s1value, "s2", s2value, ...]
# just in case we have an ending CR or 0
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\n', '')
data_array[len(data_array)-1] = data_array[len(data_array)-1].replace('\0', '')
nomenclatures = []
# data to send
data = []
data.append(data_array[0]) #project (if '' default)
data.append(data_array[1]) #organization name (if '' default)
if nslash==0:
# old syntax without nomenclature key, so insert only one key
# we use DEF
nomenclatures.append("DEF")
data.append(data_array[2])
else:
# completing nomenclatures and data
i=2
while i < len(data_array)-1 :
nomenclatures.append(data_array[i])
data.append(data_array[i+1])
i += 2
#here we append the device's address to get for instance UPPA_Sensor2
#if packet come from a LoRaWAN device with 4-byte devAddr then we will have for instance UPPA_Sensor01020304
#where the devAddr is expressed in hex format
MQTT_uploadData(nomenclatures, data, key_MQTT.sensor_name+src_str, tdata)
else:
print "Source is not is source list, not sending with CloudMQTT.py"
if __name__ == "__main__":
main(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]) |
python/test/feature_assembler_test.py | xinglinsky/vmaf | 2,874 | 12678429 | <filename>python/test/feature_assembler_test.py
from __future__ import absolute_import
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
import unittest
from vmaf.core.feature_assembler import FeatureAssembler
from vmaf.core.feature_extractor import VmafFeatureExtractor, FeatureExtractor, \
MomentFeatureExtractor
from test.testutil import set_default_576_324_videos_for_testing
class FeatureAssemblerTest(unittest.TestCase):
def tearDown(self):
if hasattr(self, 'fassembler'):
self.fassembler.remove_results()
pass
def test_get_fextractor_subclasses(self):
fextractor_subclasses = FeatureExtractor.get_subclasses_recursively()
self.assertTrue(VmafFeatureExtractor in fextractor_subclasses)
self.assertTrue(MomentFeatureExtractor in fextractor_subclasses)
def test_feature_assembler_whole_feature(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fassembler = FeatureAssembler(
feature_dict={'VMAF_feature': 'all'},
feature_option_dict=None,
assets=[asset, asset_original],
logger=None,
fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict=None,
optional_dict2=None,
parallelize=True,
processes=None,
)
self.fassembler.run()
results = self.fassembler.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.44609306249999997, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.9345149030293786, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.509571520833333, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.271439270833337, places=4)
def test_feature_assembler_whole_feature_processes(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fassembler = FeatureAssembler(
feature_dict={'VMAF_feature': 'all'},
feature_option_dict=None,
assets=[asset, asset_original],
logger=None,
fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict=None,
optional_dict2=None,
parallelize=True,
processes=1,
)
self.fassembler.run()
results = self.fassembler.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.44609306249999997, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.9345149030293786, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.509571520833333, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.271439270833337, places=4)
def test_feature_assembler_selected_atom_feature(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fassembler = FeatureAssembler(
feature_dict={'VMAF_feature': ['vif', 'motion']},
feature_option_dict=None,
assets=[asset, asset_original],
logger=None,
fifo_mode=True,
delete_workdir=True,
result_store=None,
optional_dict=None,
optional_dict2=None,
parallelize=True,
)
self.fassembler.run()
results = self.fassembler.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.44609306249999997, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.0498253541666669, places=4)
with self.assertRaises(KeyError):
results[0]['VMAF_feature_ansnr_scores']
with self.assertRaises(KeyError):
results[0]['VMAF_feature_ansnr_score']
with self.assertRaises(KeyError):
results[0]['VMAF_feature_adm_scores']
with self.assertRaises(KeyError):
results[0]['VMAF_feature_adm_score']
if __name__ == '__main__':
unittest.main(verbosity=2)
|
mim/commands/install.py | zhouzaida/mim | 188 | 12678433 | <filename>mim/commands/install.py
import os
import os.path as osp
import shutil
import tempfile
from distutils.version import LooseVersion
from pkg_resources import parse_requirements, resource_filename
from typing import List
import click
import pip
from mim.click import get_official_package, param2lowercase
from mim.commands.uninstall import uninstall
from mim.utils import (
DEFAULT_URL,
MODULE2PKG,
PKG2MODULE,
PKG2PROJECT,
WHEEL_URL,
call_command,
echo_success,
echo_warning,
get_installed_version,
get_latest_version,
get_package_version,
get_release_version,
get_torch_cuda_version,
highlighted_error,
is_installed,
is_version_equal,
parse_url,
split_package_version,
)
@click.command('install')
@click.argument(
'package',
type=str,
autocompletion=get_official_package,
callback=param2lowercase)
@click.option(
'-f', '--find', 'find_url', type=str, help='Url for finding package.')
@click.option(
'--default-timeout',
'timeout',
type=int,
default=45,
help='Set the socket timeout (default 15 seconds).')
@click.option(
'-y',
'--yes',
'is_yes',
is_flag=True,
help='Don’t ask for confirmation of uninstall deletions.')
@click.option(
'--user',
'is_user_dir',
is_flag=True,
help='Install to the Python user install directory')
@click.option(
'-e',
'--editable',
'is_editable',
is_flag=True,
help='Install a package in editable mode.')
def cli(
package: str,
find_url: str = '',
timeout: int = 30,
is_yes: bool = False,
is_user_dir: bool = False,
is_editable: bool = False,
) -> None:
"""Install package.
Example:
\b
# install latest version of mmcv-full
> mim install mmcv-full # wheel
# install 1.3.1
> mim install mmcv-full==1.3.1
# install master branch
> mim install mmcv-full -f https://github.com/open-mmlab/mmcv.git
# install latest version of mmcls
> mim install mmcls
# install 0.11.0
> mim install mmcls==0.11.0 # v0.11.0
# install master branch
> mim install mmcls -f https://github.com/open-mmlab/mmclassification.git
# install local repo
> git clone https://github.com/open-mmlab/mmclassification.git
> cd mmclassification
> mim install .
# install extension based on OpenMMLab
> mim install mmcls-project -f https://github.com/xxx/mmcls-project.git
"""
install(
package,
find_url,
timeout,
is_yes=is_yes,
is_user_dir=is_user_dir,
is_editable=is_editable)
def install(package: str,
find_url: str = '',
timeout: int = 15,
is_yes: bool = False,
is_user_dir: bool = False,
is_editable: bool = False) -> None:
"""Install a package by wheel or from github.
Args:
package (str): The name of installed package, such as mmcls.
find_url (str): Url for finding package. If finding is not provided,
program will infer the find_url as much as possible. Default: ''.
timeout (int): The socket timeout. Default: 15.
is_yes (bool): Don’t ask for confirmation of uninstall deletions.
Default: False.
is_usr_dir (bool): Install to the Python user install directory for
environment variables and user configuration. Default: False.
is_editable (bool): Install a package in editable mode. Default: False.
"""
target_pkg, target_version = split_package_version(package)
# whether install from local repo
if looks_like_path(target_pkg):
if is_installable_dir(target_pkg):
is_install_local_repo = True
else:
raise ValueError(
highlighted_error(
f'{target_pkg} is not a installable directory'))
else:
is_install_local_repo = False
# whether install master branch from github
is_install_master = bool(not target_version and find_url)
# get target version
if target_pkg in PKG2PROJECT:
latest_version = get_latest_version(target_pkg, timeout)
if target_version:
if LooseVersion(target_version) > LooseVersion(latest_version):
error_msg = (f'target_version=={target_version} should not be'
f' greater than latest_version=={latest_version}')
raise ValueError(highlighted_error(error_msg))
else:
target_version = latest_version
# check local environment whether package existed
if is_install_master or is_install_local_repo:
pass
elif is_installed(target_pkg) and target_version:
existed_version = get_installed_version(target_pkg)
if is_version_equal(existed_version, target_version):
echo_warning(f'{target_pkg}=={existed_version} existed.')
return None
else:
if is_yes:
uninstall(target_pkg, is_yes)
else:
confirm_msg = (f'{target_pkg}=={existed_version} has been '
f'installed, but want to install {target_pkg}=='
f'{target_version}, do you want to uninstall '
f'{target_pkg}=={existed_version} and '
f'install {target_pkg}=={target_version}? ')
if click.confirm(confirm_msg):
uninstall(target_pkg, True)
else:
echo_warning(f'skip {target_pkg}')
return None
# try to infer find_url if possible
if not find_url:
find_url = infer_find_url(target_pkg)
if is_install_local_repo:
repo_root = osp.abspath(target_pkg)
module_name, target_version = get_package_version(repo_root)
if not module_name:
raise FileNotFoundError(
highlighted_error(f'version.py is missed in {repo_root}'))
target_pkg = MODULE2PKG.get(module_name, module_name)
if target_pkg == 'mmcv' and os.getenv('MMCV_WITH_OPS', '0') == '1':
target_pkg = 'mmcv-full'
echo_success(f'installing {target_pkg} from local repo.')
install_from_repo(
repo_root,
package=target_pkg,
timeout=timeout,
is_yes=is_yes,
is_user_dir=is_user_dir,
is_editable=is_editable)
elif find_url and find_url.find('git') >= 0 or is_install_master:
install_from_github(target_pkg, target_version, find_url, timeout,
is_yes, is_user_dir, is_install_master)
else:
# if installing from wheel failed, it will try to install package by
# building from source if possible.
try:
install_from_wheel(target_pkg, target_version, find_url, timeout,
is_user_dir)
except RuntimeError as error:
if target_pkg in PKG2PROJECT:
find_url = f'{DEFAULT_URL}/{PKG2PROJECT[target_pkg]}.git'
if target_version:
target_pkg = f'{target_pkg}=={target_version}'
if is_yes:
install(target_pkg, find_url, timeout, is_yes, is_user_dir)
else:
confirm_msg = (f'install {target_pkg} from wheel, but it '
'failed. Do you want to build it from '
'source if possible?')
if click.confirm(confirm_msg):
install(target_pkg, find_url, timeout, is_yes,
is_user_dir)
else:
raise RuntimeError(
highlighted_error(
f'Failed to install {target_pkg}.'))
else:
raise RuntimeError(highlighted_error(error))
echo_success(f'Successfully installed {target_pkg}.')
def looks_like_path(name: str) -> bool:
"""Checks whether the string "looks like" a path on the filesystem.
This does not check whether the target actually exists, only judge from the
appearance.
Args:
name (str): The string to be checked.
"""
if osp.sep in name:
return True
if osp.altsep is not None and osp.altsep in name:
return True
if name.startswith('.'):
return True
return False
def is_installable_dir(name: str) -> bool:
"""Check whether path is a directory containing setup.py.
Args:
name (str): The string to be checked.
"""
path = osp.abspath(name)
if osp.isdir(path):
setup_py = osp.join(path, 'setup.py')
return osp.isfile(setup_py)
else:
return False
def infer_find_url(package: str) -> str:
"""Try to infer find_url if possible.
If package is the official package, the find_url can be inferred.
Args:
package (str): The name of package, such as mmcls.
"""
find_url = ''
if package in WHEEL_URL:
torch_v, cuda_v = get_torch_cuda_version()
# In order to avoid builiding mmcv-full from source, we ignore the
# difference among micro version because there are usually no big
# changes among micro version. For example, the mmcv-full built in
# pytorch 1.8.0 also works on 1.8.1 or other versions.
major, minor, *_ = torch_v.split('.')
torch_v = '.'.join([major, minor, '0'])
if cuda_v.isdigit():
cuda_v = f'cu{cuda_v}'
find_url = WHEEL_URL[package].format(
cuda_version=cuda_v, torch_version=f'torch{torch_v}')
elif package in PKG2PROJECT:
find_url = (f'{DEFAULT_URL}/{PKG2PROJECT[package]}.git')
return find_url
def parse_dependencies(path: str) -> list:
"""Parse dependencies from repo/requirements/mminstall.txt.
Args:
path (str): Path of mminstall.txt.
"""
def _get_proper_version(package, version, op):
releases = get_release_version(package)
if op == '>':
for r_v in releases:
if LooseVersion(r_v) > LooseVersion(version):
return r_v
else:
raise ValueError(
highlighted_error(f'invalid min version of {package}'))
elif op == '<':
for r_v in releases[::-1]:
if LooseVersion(r_v) < LooseVersion(version):
return r_v
else:
raise ValueError(
highlighted_error(f'invalid max version of {package}'))
dependencies = []
with open(path, 'r') as fr:
for requirement in parse_requirements(fr):
pkg_name = requirement.project_name
min_version = ''
max_version = ''
for op, version in requirement.specs:
if op == '==':
min_version = max_version = version
break
elif op == '>=':
min_version = version
elif op == '>':
min_version = _get_proper_version(pkg_name, version, '>')
elif op == '<=':
max_version = version
elif op == '<':
max_version = _get_proper_version(pkg_name, version, '<')
dependencies.append([pkg_name, min_version, max_version])
return dependencies
def install_dependencies(dependencies: List[List[str]],
timeout: int = 15,
is_yes: bool = False,
is_user_dir: bool = False) -> None:
"""Install dependencies, such as mmcls depends on mmcv.
Args:
dependencies (list): The list of dependency.
timeout (int): The socket timeout. Default: 15.
is_yes (bool): Don’t ask for confirmation of uninstall deletions.
Default: False.
is_usr_dir (bool): Install to the Python user install directory for
environment variables and user configuration. Default: False.
"""
for target_pkg, min_v, max_v in dependencies:
target_version = max_v
latest_version = get_latest_version(target_pkg, timeout)
if not target_version or LooseVersion(target_version) > LooseVersion(
latest_version):
target_version = latest_version
if is_installed(target_pkg):
existed_version = get_installed_version(target_pkg)
if (LooseVersion(min_v) <= LooseVersion(existed_version) <=
LooseVersion(target_version)):
continue
echo_success(f'installing dependency: {target_pkg}')
target_pkg = f'{target_pkg}=={target_version}'
install(
target_pkg,
timeout=timeout,
is_yes=is_yes,
is_user_dir=is_user_dir)
echo_success('Successfully installed dependencies.')
def install_from_repo(repo_root: str,
*,
package: str = '',
timeout: int = 15,
is_yes: bool = False,
is_user_dir: bool = False,
is_editable: bool = False):
"""Install package from local repo.
Args:
repo_root (str): The root of repo.
package (str): The name of installed package. Default: ''.
timeout (int): The socket timeout. Default: 15.
is_yes (bool): Don’t ask for confirmation of uninstall deletions.
Default: False.
is_usr_dir (bool): Install to the Python user install directory for
environment variables and user configuration. Default: False.
is_editable (bool): Install a package in editable mode. Default: False.
"""
def copy_file_to_package():
# rename the model_zoo.yml to model-index.yml but support both of them
# for backward compatibility
filenames = ['tools', 'configs', 'model_zoo.yml', 'model-index.yml']
module_name = PKG2MODULE.get(package, package)
# configs, tools and model-index.yml will be copied to package/.mim
mim_root = resource_filename(module_name, '.mim')
os.makedirs(mim_root, exist_ok=True)
for filename in filenames:
src_path = osp.join(repo_root, filename)
dst_path = osp.join(mim_root, filename)
if osp.exists(src_path):
if osp.islink(dst_path):
os.unlink(dst_path)
if osp.isfile(src_path):
shutil.copyfile(src_path, dst_path)
elif osp.isdir(src_path):
if osp.exists(dst_path):
shutil.rmtree(dst_path)
shutil.copytree(src_path, dst_path)
def link_file_to_package():
# When user installs package with editable mode, we should create
# symlinks to package, which will synchronize the modified files.
# Besides, rename the model_zoo.yml to model-index.yml but support both
# of them for backward compatibility
filenames = ['tools', 'configs', 'model_zoo.yml', 'model-index.yml']
module_name = PKG2MODULE.get(package, package)
pkg_root = osp.join(repo_root, module_name)
# configs, tools and model-index.yml will be linked to package/.mim
mim_root = osp.join(pkg_root, '.mim')
os.makedirs(mim_root, exist_ok=True)
for filename in filenames:
src_path = osp.join(repo_root, filename)
dst_path = osp.join(mim_root, filename)
if osp.exists(dst_path):
continue
if osp.exists(src_path):
if osp.isfile(dst_path) or osp.islink(dst_path):
os.remove(dst_path)
elif osp.isdir(dst_path):
shutil.rmtree(dst_path)
os.symlink(src_path, dst_path)
# install dependencies. For example,
# install mmcls should install mmcv-full first if it is not installed or
# its(mmcv) verison does not match.
mminstall_path = osp.join(repo_root, 'requirements', 'mminstall.txt')
if osp.exists(mminstall_path):
dependencies = parse_dependencies(mminstall_path)
if dependencies:
install_dependencies(dependencies, timeout, is_yes, is_user_dir)
third_dependencies = osp.join(repo_root, 'requirements', 'build.txt')
if osp.exists(third_dependencies):
dep_cmd = [
'python', '-m', 'pip', 'install', '-r', third_dependencies,
'--default-timeout', f'{timeout}'
]
if is_user_dir:
dep_cmd.append('--user')
call_command(dep_cmd)
install_cmd = ['python', '-m', 'pip', 'install']
if is_editable:
install_cmd.append('-e')
else:
# solving issues related to out-of-tree builds
# more datails at https://github.com/pypa/pip/issues/7555
if LooseVersion(pip.__version__) >= LooseVersion('21.1.1'):
install_cmd.append('--use-feature=in-tree-build')
install_cmd.append(repo_root)
if is_user_dir:
install_cmd.append('--user')
# The issue is caused by the import order of numpy and torch
# Please refer to github.com/pytorch/pytorch/issue/37377
os.environ['MKL_SERVICE_FORCE_INTEL'] = '1'
if package in WHEEL_URL:
echo_success(f'compiling {package} with "MMCV_WITH_OPS=1"')
os.environ['MMCV_WITH_OPS'] = '1'
call_command(install_cmd)
if is_editable:
link_file_to_package()
else:
copy_file_to_package()
def install_from_github(package: str,
version: str = '',
find_url: str = '',
timeout: int = 15,
is_yes: bool = False,
is_user_dir: bool = False,
is_install_master: bool = False) -> None:
"""Install package from github.
Args:
package (str): The name of installed package, such as mmcls.
version (str): Version of package. Default: ''.
find_url (str): Url for finding package. If finding is not provided,
program will infer the find_url as much as possible. Default: ''.
timeout (int): The socket timeout. Default: 15.
is_yes (bool): Don’t ask for confirmation of uninstall deletions.
Default: False.
is_usr_dir (bool): Install to the Python user install directory for
environment variables and user configuration. Default: False.
is_install_master (bool): Whether install master branch. If it is True,
process will install master branch. If it is False, process will
install the specified version. Default: False.
"""
click.echo(f'installing {package} from {find_url}.')
_, repo = parse_url(find_url)
clone_cmd = ['git', 'clone', find_url]
if not is_install_master:
clone_cmd.extend(['-b', f'v{version}'])
with tempfile.TemporaryDirectory() as temp_root:
repo_root = osp.join(temp_root, repo)
clone_cmd.append(repo_root)
call_command(clone_cmd)
install_from_repo(
repo_root,
package=package,
timeout=timeout,
is_yes=is_yes,
is_user_dir=is_user_dir)
def install_from_wheel(package: str,
version: str = '',
find_url: str = '',
timeout: int = 15,
is_user_dir: bool = False) -> None:
"""Install wheel from find_url.
Args:
package (str): The name of installed package, such as mmcls.
version (str): Version of package. Default: ''.
find_url (str): Url for finding package. If finding is not provided,
program will infer the find_url as much as possible. Default: ''.
timeout (int): The socket timeout. Default: 15.
is_usr_dir (bool): Install to the Python user install directory for
environment variables and user configuration. Default: False.
"""
click.echo(f'installing {package} from wheel.')
install_cmd = [
'python', '-m', 'pip', '--default-timeout', f'{timeout}', 'install'
]
if version:
install_cmd.append(f'{package}=={version}')
else:
install_cmd.append(package)
if find_url:
install_cmd.extend(['-f', find_url])
if is_user_dir:
install_cmd.append('--user')
call_command(install_cmd)
|
logisland-components/logisland-processors/logisland-processor-scripting/src/main/python/processors/basic/BasicProcessor.py | FeizNouri/logisland | 101 | 12678501 | # coding: utf-8
from AbstractProcessor import AbstractProcessor
from com.hurence.logisland.record import StandardRecord
#
# Simple python processor to test ability to run python code and process some
# records.
#
# The python_processor.python_processor_script_path config property of the
# java python processor must point to a pyhton module file. This module must
# at least contain the definition of a python class with the same name as the
# one of the module and this class must inherits from the logisland provided
# python class: AbstractProcessor
#
class BasicProcessor(AbstractProcessor):
def init(self, context):
print "Inside init of BasicProcessor python code"
def process(self, context, records):
print "Inside process of BasicProcessor python code"
# Copy the records and add python_field field in it
outputRecords = []
for record in records:
copyRecord = StandardRecord(record)
# Check that one can read values coming from java
javaFieldValue = copyRecord.getField("java_field").getRawValue()
expectedValue = "java_field_value"
assert (javaFieldValue == expectedValue) , "Expected " + expectedValue + " but got " + javaFieldValue
copyRecord.setStringField('python_field', 'python_field_value')
outputRecords.append(copyRecord)
return outputRecords |
grid/coder_test.py | susannahsoon/oldperth | 302 | 12678505 | from nose.tools import *
from grid import coder
def assert_close(ll1, ll2):
'''Assert that two latitude & longitude tuples are "close".'''
try:
assert_almost_equal(ll1[0], ll2[0], places=6)
assert_almost_equal(ll1[1], ll2[1], places=6)
except AssertionError as e:
print '%s != %s' % (ll1, ll2)
raise e
def test_exact():
# It's really Park Avenue South.
assert_close(coder.code('4', '17'), (40.736518, -73.988962))
def test_interpolate():
# This is halfway between 26th & 28th.
assert_close(coder.code('9', '27'), (40.749020, -73.9995210))
def test_extrapolate():
assert_close(coder.code('A', '15'), (40.731083, -73.979847))
assert_close(coder.code('A', '20'), (40.734071, -73.977654))
def test_may_extrapolate():
assert(coder.may_extrapolate('A', '15'))
assert(coder.may_extrapolate('2', '8'))
assert(coder.may_extrapolate('A', '25'))
assert(not coder.may_extrapolate('4', '8'))
assert(not coder.may_extrapolate('D', '25'))
assert(not coder.may_extrapolate('7', '10'))
assert(not coder.may_extrapolate('B', '93')) # 723557f-c
|
tests/hwsim/nl80211.py | yoavst/hostapd-atrik | 1,991 | 12678530 | <filename>tests/hwsim/nl80211.py
# nl80211 definitions
# Copyright (c) 2014, <NAME> <<EMAIL>>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import binascii
import struct
nl80211_cmd = {
'GET_WIPHY': 1,
'SET_WIPHY': 2,
'NEW_WIPHY': 3,
'DEL_WIPHY': 4,
'GET_INTERFACE': 5,
'SET_INTERFACE': 6,
'NEW_INTERFACE': 7,
'DEL_INTERFACE': 8,
'GET_KEY': 9,
'SET_KEY': 10,
'NEW_KEY': 11,
'DEL_KEY': 12,
'GET_BEACON': 13,
'SET_BEACON': 14,
'START_AP': 15,
'STOP_AP': 16,
'GET_STATION': 17,
'SET_STATION': 18,
'NEW_STATION': 19,
'DEL_STATION': 20,
'GET_MPATH': 21,
'SET_MPATH': 22,
'NEW_MPATH': 23,
'DEL_MPATH': 24,
'SET_BSS': 25,
'SET_REG': 26,
'REQ_SET_REG': 27,
'GET_MESH_CONFIG': 28,
'SET_MESH_CONFIG': 29,
'SET_MGMT_EXTRA_IE[RESERVED]': 30,
'GET_REG': 31,
'GET_SCAN': 32,
'TRIGGER_SCAN': 33,
'NEW_SCAN_RESULTS': 34,
'SCAN_ABORTED': 35,
'REG_CHANGE': 36,
'AUTHENTICATE': 37,
'ASSOCIATE': 38,
'DEAUTHENTICATE': 39,
'DISASSOCIATE': 40,
'MICHAEL_MIC_FAILURE': 41,
'REG_BEACON_HINT': 42,
'JOIN_IBSS': 43,
'LEAVE_IBSS': 44,
'TESTMODE': 45,
'CONNECT': 46,
'ROAM': 47,
'DISCONNECT': 48,
'SET_WIPHY_NETNS': 49,
'GET_SURVEY': 50,
'NEW_SURVEY_RESULTS': 51,
'SET_PMKSA': 52,
'DEL_PMKSA': 53,
'FLUSH_PMKSA': 54,
'REMAIN_ON_CHANNEL': 55,
'CANCEL_REMAIN_ON_CHANNEL': 56,
'SET_TX_BITRATE_MASK': 57,
'REGISTER_FRAME': 58,
'FRAME': 59,
'FRAME_TX_STATUS': 60,
'SET_POWER_SAVE': 61,
'GET_POWER_SAVE': 62,
'SET_CQM': 63,
'NOTIFY_CQM': 64,
'SET_CHANNEL': 65,
'SET_WDS_PEER': 66,
'FRAME_WAIT_CANCEL': 67,
'JOIN_MESH': 68,
'LEAVE_MESH': 69,
'UNPROT_DEAUTHENTICATE': 70,
'UNPROT_DISASSOCIATE': 71,
'NEW_PEER_CANDIDATE': 72,
'GET_WOWLAN': 73,
'SET_WOWLAN': 74,
'START_SCHED_SCAN': 75,
'STOP_SCHED_SCAN': 76,
'SCHED_SCAN_RESULTS': 77,
'SCHED_SCAN_STOPPED': 78,
'SET_REKEY_OFFLOAD': 79,
'PMKSA_CANDIDATE': 80,
'TDLS_OPER': 81,
'TDLS_MGMT': 82,
'UNEXPECTED_FRAME': 83,
'PROBE_CLIENT': 84,
'REGISTER_BEACONS': 85,
'UNEXPECTED_4ADDR_FRAME': 86,
'SET_NOACK_MAP': 87,
'CH_SWITCH_NOTIFY': 88,
'START_P2P_DEVICE': 89,
'STOP_P2P_DEVICE': 90,
'CONN_FAILED': 91,
'SET_MCAST_RATE': 92,
'SET_MAC_ACL': 93,
'RADAR_DETECT': 94,
'GET_PROTOCOL_FEATURES': 95,
'UPDATE_FT_IES': 96,
'FT_EVENT': 97,
'CRIT_PROTOCOL_START': 98,
'CRIT_PROTOCOL_STOP': 99,
'GET_COALESCE': 100,
'SET_COALESCE': 101,
'CHANNEL_SWITCH': 102,
'VENDOR': 103,
'SET_QOS_MAP': 104,
}
nl80211_attr = {
'WIPHY': 1,
'WIPHY_NAME': 2,
'IFINDEX': 3,
'IFNAME': 4,
'IFTYPE': 5,
'MAC': 6,
'KEY_DATA': 7,
'KEY_IDX': 8,
'KEY_CIPHER': 9,
'KEY_SEQ': 10,
'KEY_DEFAULT': 11,
'BEACON_INTERVAL': 12,
'DTIM_PERIOD': 13,
'BEACON_HEAD': 14,
'BEACON_TAIL': 15,
'STA_AID': 16,
'STA_FLAGS': 17,
'STA_LISTEN_INTERVAL': 18,
'STA_SUPPORTED_RATES': 19,
'STA_VLAN': 20,
'STA_INFO': 21,
'WIPHY_BANDS': 22,
'MNTR_FLAGS': 23,
'MESH_ID': 24,
'STA_PLINK_ACTION': 25,
'MPATH_NEXT_HOP': 26,
'MPATH_INFO': 27,
'BSS_CTS_PROT': 28,
'BSS_SHORT_PREAMBLE': 29,
'BSS_SHORT_SLOT_TIME': 30,
'HT_CAPABILITY': 31,
'SUPPORTED_IFTYPES': 32,
'REG_ALPHA2': 33,
'REG_RULES': 34,
'MESH_CONFIG': 35,
'BSS_BASIC_RATES': 36,
'WIPHY_TXQ_PARAMS': 37,
'WIPHY_FREQ': 38,
'WIPHY_CHANNEL_TYPE': 39,
'KEY_DEFAULT_MGMT': 40,
'MGMT_SUBTYPE': 41,
'IE': 42,
'MAX_NUM_SCAN_SSIDS': 43,
'SCAN_FREQUENCIES': 44,
'SCAN_SSIDS': 45,
'GENERATION': 46,
'BSS': 47,
'REG_INITIATOR': 48,
'REG_TYPE': 49,
'SUPPORTED_COMMANDS': 50,
'FRAME': 51,
'SSID': 52,
'AUTH_TYPE': 53,
'REASON_CODE': 54,
'KEY_TYPE': 55,
'MAX_SCAN_IE_LEN': 56,
'CIPHER_SUITES': 57,
'FREQ_BEFORE': 58,
'FREQ_AFTER': 59,
'FREQ_FIXED': 60,
'WIPHY_RETRY_SHORT': 61,
'WIPHY_RETRY_LONG': 62,
'WIPHY_FRAG_THRESHOLD': 63,
'WIPHY_RTS_THRESHOLD': 64,
'TIMED_OUT': 65,
'USE_MFP': 66,
'STA_FLAGS2': 67,
'CONTROL_PORT': 68,
'TESTDATA': 69,
'PRIVACY': 70,
'DISCONNECTED_BY_AP': 71,
'STATUS_CODE': 72,
'CIPHER_SUITES_PAIRWISE': 73,
'CIPHER_SUITE_GROUP': 74,
'WPA_VERSIONS': 75,
'AKM_SUITES': 76,
'REQ_IE': 77,
'RESP_IE': 78,
'PREV_BSSID': 79,
'KEY': 80,
'KEYS': 81,
'PID': 82,
'4ADDR': 83,
'SURVEY_INFO': 84,
'PMKID': 85,
'MAX_NUM_PMKIDS': 86,
'DURATION': 87,
'COOKIE': 88,
'WIPHY_COVERAGE_CLASS': 89,
'TX_RATES': 90,
'FRAME_MATCH': 91,
'ACK': 92,
'PS_STATE': 93,
'CQM': 94,
'LOCAL_STATE_CHANGE': 95,
'AP_ISOLATE': 96,
'WIPHY_TX_POWER_SETTING': 97,
'WIPHY_TX_POWER_LEVEL': 98,
'TX_FRAME_TYPES': 99,
'RX_FRAME_TYPES': 100,
'FRAME_TYPE': 101,
'CONTROL_PORT_ETHERTYPE': 102,
'CONTROL_PORT_NO_ENCRYPT': 103,
'SUPPORT_IBSS_RSN': 104,
'WIPHY_ANTENNA_TX': 105,
'WIPHY_ANTENNA_RX': 106,
'MCAST_RATE': 107,
'OFFCHANNEL_TX_OK': 108,
'BSS_HT_OPMODE': 109,
'KEY_DEFAULT_TYPES': 110,
'MAX_REMAIN_ON_CHANNEL_DURATION': 111,
'MESH_SETUP': 112,
'WIPHY_ANTENNA_AVAIL_TX': 113,
'WIPHY_ANTENNA_AVAIL_RX': 114,
'SUPPORT_MESH_AUTH': 115,
'STA_PLINK_STATE': 116,
'WOWLAN_TRIGGERS': 117,
'WOWLAN_TRIGGERS_SUPPORTED': 118,
'SCHED_SCAN_INTERVAL': 119,
'INTERFACE_COMBINATIONS': 120,
'SOFTWARE_IFTYPES': 121,
'REKEY_DATA': 122,
'MAX_NUM_SCHED_SCAN_SSIDS': 123,
'MAX_SCHED_SCAN_IE_LEN': 124,
'SCAN_SUPP_RATES': 125,
'HIDDEN_SSID': 126,
'IE_PROBE_RESP': 127,
'IE_ASSOC_RESP': 128,
'STA_WME': 129,
'SUPPORT_AP_UAPSD': 130,
'ROAM_SUPPORT': 131,
'SCHED_SCAN_MATCH': 132,
'MAX_MATCH_SETS': 133,
'PMKSA_CANDIDATE': 134,
'TX_NO_CCK_RATE': 135,
'TDLS_ACTION': 136,
'TDLS_DIALOG_TOKEN': 137,
'TDLS_OPERATION': 138,
'TDLS_SUPPORT': 139,
'TDLS_EXTERNAL_SETUP': 140,
'DEVICE_AP_SME': 141,
'DONT_WAIT_FOR_ACK': 142,
'FEATURE_FLAGS': 143,
'PROBE_RESP_OFFLOAD': 144,
'PROBE_RESP': 145,
'DFS_REGION': 146,
'DISABLE_HT': 147,
'HT_CAPABILITY_MASK': 148,
'NOACK_MAP': 149,
'INACTIVITY_TIMEOUT': 150,
'RX_SIGNAL_DBM': 151,
'BG_SCAN_PERIOD': 152,
'WDEV': 153,
'USER_REG_HINT_TYPE': 154,
'CONN_FAILED_REASON': 155,
'SAE_DATA': 156,
'VHT_CAPABILITY': 157,
'SCAN_FLAGS': 158,
'CHANNEL_WIDTH': 159,
'CENTER_FREQ1': 160,
'CENTER_FREQ2': 161,
'P2P_CTWINDOW': 162,
'P2P_OPPPS': 163,
'LOCAL_MESH_POWER_MODE': 164,
'ACL_POLICY': 165,
'MAC_ADDRS': 166,
'MAC_ACL_MAX': 167,
'RADAR_EVENT': 168,
'EXT_CAPA': 169,
'EXT_CAPA_MASK': 170,
'STA_CAPABILITY': 171,
'STA_EXT_CAPABILITY': 172,
'PROTOCOL_FEATURES': 173,
'SPLIT_WIPHY_DUMP': 174,
'DISABLE_VHT': 175,
'VHT_CAPABILITY_MASK': 176,
'MDID': 177,
'IE_RIC': 178,
'CRIT_PROT_ID': 179,
'MAX_CRIT_PROT_DURATION': 180,
'PEER_AID': 181,
'COALESCE_RULE': 182,
'CH_SWITCH_COUNT': 183,
'CH_SWITCH_BLOCK_TX': 184,
'CSA_IES': 185,
'CSA_C_OFF_BEACON': 186,
'CSA_C_OFF_PRESP': 187,
'RXMGMT_FLAGS': 188,
'STA_SUPPORTED_CHANNELS': 189,
'STA_SUPPORTED_OPER_CLASSES': 190,
'HANDLE_DFS': 191,
'SUPPORT_5_MHZ': 192,
'SUPPORT_10_MHZ': 193,
'OPMODE_NOTIF': 194,
'VENDOR_ID': 195,
'VENDOR_SUBCMD': 196,
'VENDOR_DATA': 197,
'VENDOR_EVENTS': 198,
'QOS_MAP': 199,
'MAC_HINT': 200,
'WIPHY_FREQ_HINT': 201,
'MAX_AP_ASSOC_STA': 202,
}
def build_nl80211_attr(id, val):
attr = struct.pack("@HH", 4 + len(val), nl80211_attr[id]) + val
if len(attr) % 4 != 0:
attr += '\0' * (4 - (len(attr) % 4))
return attr
def build_nl80211_attr_u32(id, val):
return build_nl80211_attr(id, struct.pack("@I", val))
def build_nl80211_attr_u16(id, val):
return build_nl80211_attr(id, struct.pack("@H", val))
def build_nl80211_attr_u8(id, val):
return build_nl80211_attr(id, struct.pack("@B", val))
def build_nl80211_attr_flag(id):
return build_nl80211_attr(id, '')
def build_nl80211_attr_mac(id, val):
addr = struct.unpack('6B', binascii.unhexlify(val.replace(':','')))
aval = struct.pack('<6B', *addr)
return build_nl80211_attr(id, aval)
def parse_nl80211_attrs(msg):
attrs = {}
while len(msg) >= 4:
alen,attr = struct.unpack("@HH", msg[0:4])
if alen < 4:
raise Exception("Too short nl80211 attribute")
alen -= 4
msg = msg[4:]
if alen > len(msg):
raise Exception("nl80211 attribute underflow")
attrs[attr] = msg[0:alen]
msg = msg[alen:]
return attrs
|
api/osapi/app.py | alephdata/opensanctions | 102 | 12678536 | import json
import logging
from urllib.parse import urljoin
from typing import Optional
from fastapi import FastAPI, Path, Query, Form, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from followthemoney.types import registry
from starlette.responses import RedirectResponse
from followthemoney import model
from followthemoney.exc import InvalidData
from api.osapi.data import (
get_freebase_entity,
get_freebase_property,
get_matchable_schemata,
)
from opensanctions.core.entity import Entity
from opensanctions.core.logs import configure_logging
from osapi import settings
from osapi.models import EntityResponse, SearchResponse
from osapi.data import dataset, resolver
from osapi.data import get_loader, get_index, get_schemata
from osapi.data import get_freebase_type, get_freebase_types
from osapi.util import match_prefix
log = logging.getLogger(__name__)
app = FastAPI(
title="OpenSanctions Matching API",
version=settings.VERSION,
contact=settings.CONTACT,
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
configure_logging(level=logging.INFO)
@app.on_event("startup")
async def startup_event():
loader = get_loader()
get_index(loader)
@app.get("/")
async def index():
"""Get system configuration information."""
loader = get_loader()
index = get_index(loader)
return {
"dataset": dataset.to_dict(),
"model": model.to_dict(),
"index": {"terms": len(index.terms), "tokens": len(index.inverted)},
}
@app.get("/healthz")
async def healthz():
"""No-op basic health check."""
return {"status": "ok"}
@app.get("/entities/{entity_id}", response_model=EntityResponse)
async def get_entity(
entity_id: str = Path(None, title="The ID of the entity to retrieve")
):
"""Retrieve a single entity by its ID."""
loader = get_loader()
canonical_id = resolver.get_canonical(entity_id)
if canonical_id != entity_id:
url = app.url_path_for("get_entity", entity_id=canonical_id)
return RedirectResponse(url=url)
entity = loader.get_entity(entity_id)
if entity is None:
raise HTTPException(status_code=404, detail="No such entity!")
return entity.to_nested_dict(loader)
@app.get("/search", response_model=SearchResponse)
async def search(
q: str,
schema: str = Query(settings.BASE_SCHEMA, title="Types of entities that can match"),
limit: int = Query(10, title="Number of results to return"),
fuzzy: bool = Query(False, title="Enable n-gram matching of partial names"),
nested: bool = Query(False, title="Include adjacent entities in response"),
):
"""Search matching entities based on a simple piece of text, e.g. a name."""
loader = get_loader()
index = get_index(loader)
query = Entity(schema)
query.add("name", q)
query.add("notes", q)
results = []
for result, score in index.match_entities(query, limit=limit, fuzzy=fuzzy):
result_data = None
if nested:
result_data = result.to_nested_dict(loader)
else:
result_data = result.to_dict()
result_data["score"] = score
results.append(result_data)
return {"results": results}
@app.get("/reconcile")
def reconcile(queries: Optional[str] = None):
"""Reconciliation API, emulates Google Refine API. This endpoint can be used
to bulk match entities against the system using an end-user application like
[OpenRefine](https://openrefine.org).
See: [Reconciliation API docs](https://reconciliation-api.github.io/specs/latest/#structure-of-a-reconciliation-query)
"""
if queries is not None:
return reconcile_queries(queries)
base_url = urljoin(settings.ENDPOINT_URL, "/reconcile")
return {
"versions": ["0.2"],
"name": f"{dataset.title} ({app.title})",
"identifierSpace": "https://opensanctions.org/reference/#schema",
"schemaSpace": "https://opensanctions.org/reference/#schema",
"view": {"url": ("https://opensanctions.org/entities/{{id}}/")},
"suggest": {
"entity": {
"service_url": base_url,
"service_path": "/suggest/entity",
},
"type": {
"service_url": base_url,
"service_path": "/suggest/type",
},
"property": {
"service_url": base_url,
"service_path": "/suggest/property",
},
},
"defaultTypes": get_freebase_types(),
}
@app.post("/reconcile")
def reconcile_post(queries: str = Form("")):
"""Reconciliation API, emulates Google Refine API."""
return reconcile_queries(queries)
def reconcile_queries(queries):
# multiple requests in one query
try:
queries = json.loads(queries)
except ValueError:
raise HTTPException(status_code=400, detail="Cannot decode query")
results = {}
for k, q in queries.items():
results[k] = reconcile_query(q)
# log.info("RESULTS: %r" % results)
return results
def reconcile_query(query):
"""Reconcile operation for a single query."""
# log.info("Reconcile: %r", query)
limit = int(query.get("limit", 5))
type = query.get("type", settings.BASE_SCHEMA)
loader = get_loader()
index = get_index(loader)
proxy = Entity(type)
proxy.add("name", query.get("query"))
proxy.add("notes", query.get("query"))
for p in query.get("properties", []):
prop = model.get_qname(p.get("pid"))
if prop is None:
continue
try:
proxy.add_cast(prop.schema, prop.name, p.get("v"), fuzzy=True)
except InvalidData:
log.exception("Invalid property is set.")
results = []
# log.info("QUERY %r %s", proxy.to_dict(), limit)
for result, score in index.match_entities(proxy, limit=limit, fuzzy=True):
results.append(get_freebase_entity(result, score))
return {"result": results}
@app.get("/reconcile/suggest/entity")
def reconcile_suggest_entity(prefix: str = "", limit: int = 10):
"""Suggest an entity API, emulates Google Refine API.
This is functionally very similar to the basic search API, but returns
data in the structure assumed by the
[Reconciliation API](https://reconciliation-api.github.io/specs/latest/#suggest-services).
Searches are conducted based on name and text content, using all matchable
entities in the system index."""
loader = get_loader()
index = get_index(loader)
query = Entity(settings.BASE_SCHEMA)
query.add("name", prefix)
query.add("notes", prefix)
results = []
for result, score in index.match_entities(query, limit=limit, fuzzy=True):
results.append(get_freebase_entity(result, score))
return {
"code": "/api/status/ok",
"status": "200 OK",
"prefix": prefix,
"result": results,
}
@app.get("/reconcile/suggest/property")
def reconcile_suggest_property(prefix: str = ""):
"""Given a search prefix, return all the type/schema properties which match
the given text. This is used to auto-complete property selection for detail
filters in OpenRefine."""
matches = []
for prop in model.properties:
if not prop.schema.is_a(settings.BASE_SCHEMA):
continue
if prop.hidden or prop.type == prop.type == registry.entity:
continue
if match_prefix(prefix, prop.name, prop.label):
matches.append(get_freebase_property(prop))
return {
"code": "/api/status/ok",
"status": "200 OK",
"prefix": prefix,
"result": matches,
}
@app.get("/reconcile/suggest/type")
def suggest_type(prefix: str = ""):
"""Given a search prefix, return all the types (i.e. schema) which match
the given text. This is used to auto-complete type selection for the
configuration of reconciliation in OpenRefine."""
matches = []
for schema in get_matchable_schemata():
if match_prefix(prefix, schema.name, schema.label):
matches.append(get_freebase_type(schema))
return {
"code": "/api/status/ok",
"status": "200 OK",
"prefix": prefix,
"result": matches,
}
|
vumi/transports/mtech_ussd/tests/test_mtech_ussd.py | seidu626/vumi | 199 | 12678541 | from twisted.internet.defer import inlineCallbacks, returnValue
from vumi.utils import http_request_full
from vumi.message import TransportUserMessage
from vumi.transports.mtech_ussd import MtechUssdTransport
from vumi.transports.mtech_ussd.mtech_ussd import MtechUssdResponse
from vumi.transports.tests.helpers import TransportHelper
from vumi.tests.helpers import VumiTestCase
class TestMtechUssdTransport(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.config = {
'transport_type': 'ussd',
'ussd_string_prefix': '*120*666#',
'web_path': "/foo",
'web_host': "127.0.0.1",
'web_port': 0,
'username': 'testuser',
'password': '<PASSWORD>',
}
self.tx_helper = self.add_helper(TransportHelper(MtechUssdTransport))
self.transport = yield self.tx_helper.get_transport(self.config)
self.transport_url = self.transport.get_transport_url().rstrip('/')
self.url = "%s%s" % (self.transport_url, self.config['web_path'])
yield self.transport.session_manager.redis._purge_all() # just in case
def make_ussd_request_full(self, session_id, **kwargs):
lines = [
'<?xml version="1.0" encoding="UTF-8"?>',
'<page version="2.0">',
' <session_id>%s</session_id>' % (session_id,),
]
for k, v in kwargs.items():
lines.append(' <%s>%s</%s>' % (k, v, k))
lines.append('</page>')
data = '\n'.join(lines)
return http_request_full(self.url, data, method='POST')
def make_ussd_request(self, session_id, **kwargs):
return self.make_ussd_request_full(session_id, **kwargs).addCallback(
lambda r: r.delivered_body)
@inlineCallbacks
def reply_to_message(self, content, **kw):
[msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
yield self.tx_helper.make_dispatch_reply(msg, content, **kw)
returnValue(msg)
@inlineCallbacks
def test_empty_request(self):
response = yield http_request_full(self.url, "", method='POST')
self.assertEqual(response.code, 400)
@inlineCallbacks
def test_bad_request(self):
response = yield http_request_full(self.url, "blah", method='POST')
self.assertEqual(response.code, 400)
@inlineCallbacks
def test_inbound_new_continue(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response_d = self.make_ussd_request(
sid, mobile_number='2348085832481', page_id='0',
data='testmenu', gate='gateid')
msg = yield self.reply_to_message("OK\n1 < 2")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_NEW)
self.assertEqual(msg['from_addr'], '2348085832481')
# self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'testmenu')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK<br />1 < 2</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_resume_continue(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_nack(self):
msg = yield self.tx_helper.make_dispatch_outbound("outbound")
[nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(nack['user_message_id'], msg['message_id'])
self.assertEqual(nack['sent_message_id'], msg['message_id'])
self.assertEqual(nack['nack_reason'],
'Missing in_reply_to, content or session_id')
@inlineCallbacks
def test_inbound_missing_session(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response = yield self.make_ussd_request_full(
sid, page_id="indexX", data="foo")
self.assertEqual(400, response.code)
self.assertEqual('', response.delivered_body)
@inlineCallbacks
def test_inbound_new_and_resume(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
response_d = self.make_ussd_request(
sid, mobile_number='2348085832481', page_id='0',
data='testmenu', gate='gateid')
msg = yield self.reply_to_message("OK\n1 < 2")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_NEW)
self.assertEqual(msg['from_addr'], '2348085832481')
# self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'testmenu')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK<br />1 < 2</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
self.tx_helper.clear_all_dispatched()
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK")
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], 'gateid')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'<navigation>',
'<link accesskey="*" pageId="indexX" />',
'</navigation>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_resume_close(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response_d = self.make_ussd_request(sid, page_id="indexX", data="foo")
msg = yield self.reply_to_message("OK", continue_session=False)
self.assertEqual(msg['transport_name'], self.tx_helper.transport_name)
self.assertEqual(msg['transport_type'], "ussd")
self.assertEqual(msg['transport_metadata'], {"session_id": sid})
self.assertEqual(msg['session_event'],
TransportUserMessage.SESSION_RESUME)
self.assertEqual(msg['from_addr'], '2348085832481')
self.assertEqual(msg['to_addr'], '*120*666#')
self.assertEqual(msg['content'], 'foo')
response = yield response_d
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'<div>OK</div>',
'</page>',
])
self.assertEqual(response, correct_response)
@inlineCallbacks
def test_inbound_cancel(self):
sid = 'a41739890287485d968ea66e8b44bfd3'
yield self.transport.save_session(sid, '2348085832481', '*120*666#')
response = yield self.make_ussd_request(sid, status="1")
correct_response = ''.join([
"<?xml version='1.0' encoding='UTF-8'?>",
'<page version="2.0">',
'<session_id>a41739890287485d968ea66e8b44bfd3</session_id>',
'</page>',
])
self.assertEqual(response, correct_response)
class TestMtechUssdResponse(VumiTestCase):
def setUp(self):
self.mur = MtechUssdResponse("sid123")
def assert_message_xml(self, *lines):
xml_str = ''.join(
["<?xml version='1.0' encoding='UTF-8'?>"] + list(lines))
self.assertEqual(self.mur.to_xml(), xml_str)
def test_empty_response(self):
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'</page>')
def test_free_text(self):
self.mur.add_text("Please enter your name")
self.mur.add_freetext_option()
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<div>Please enter your name</div>',
'<navigation><link accesskey="*" pageId="indexX" /></navigation>',
'</page>')
def test_menu_options(self):
self.mur.add_text("Please choose:")
self.mur.add_menu_item('chicken', '1')
self.mur.add_menu_item('beef', '2')
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<div>Please choose:</div>',
'<navigation>',
'<link accesskey="1" pageId="index1">chicken</link>',
'<link accesskey="2" pageId="index2">beef</link>',
'</navigation>',
'</page>')
def test_menu_options_title(self):
self.mur.add_title("LUNCH")
self.mur.add_text("Please choose:")
self.mur.add_menu_item('chicken', '1')
self.mur.add_menu_item('beef', '2')
self.assert_message_xml(
'<page version="2.0">',
'<session_id>sid123</session_id>',
'<title>LUNCH</title>',
'<div>Please choose:</div>',
'<navigation>',
'<link accesskey="1" pageId="index1">chicken</link>',
'<link accesskey="2" pageId="index2">beef</link>',
'</navigation>',
'</page>')
|
tools/telemetry/telemetry/page/actions/click_element.py | iplo/Chain | 231 | 12678596 | <gh_stars>100-1000
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.page.actions import page_action
def _EscapeSelector(selector):
return selector.replace('\'', '\\\'')
class ClickElementAction(page_action.PageAction):
def __init__(self, attributes=None):
super(ClickElementAction, self).__init__(attributes)
def RunAction(self, page, tab, previous_action):
def DoClick():
if hasattr(self, 'selector'):
code = ('document.querySelector(\'' + _EscapeSelector(self.selector) +
'\').click();')
try:
tab.ExecuteJavaScript(code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with selector ' + self.selector)
elif hasattr(self, 'text'):
callback_code = 'function(element) { element.click(); }'
try:
util.FindElementAndPerformAction(tab, self.text, callback_code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with text ' + self.text)
elif hasattr(self, 'xpath'):
code = ('document.evaluate("%s",'
'document,'
'null,'
'XPathResult.FIRST_ORDERED_NODE_TYPE,'
'null)'
'.singleNodeValue.click()' % re.escape(self.xpath))
try:
tab.ExecuteJavaScript(code)
except exceptions.EvaluateException:
raise page_action.PageActionFailed(
'Cannot find element with xpath ' + self.xpath)
else:
raise page_action.PageActionFailed(
'No condition given to click_element')
DoClick()
|
pygubu/builder/widgets/tkinterscrolledtext.py | larryw3i/pygubu | 1,716 | 12678598 | <filename>pygubu/builder/widgets/tkinterscrolledtext.py
# encoding: utf8
from __future__ import unicode_literals
try:
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
except ImportError:
import Tkinter as tk
from ScrolledText import ScrolledText
from pygubu.builder.builderobject import BuilderObject, register_widget
from pygubu.builder.tkstdwidgets import TKText
class TkinterScrolledTextBO(TKText):
class_ = ScrolledText
register_widget('pygubu.builder.widgets.tkinterscrolledtext',
TkinterScrolledTextBO,
'ScrolledText', ('Control & Display', 'tk'))
|
data_structures/array/number_of_elements_that_can_searched_using_binary_search.py | ruler30cm/python-ds | 1,723 | 12678603 | <gh_stars>1000+
"""
In an input of unsorted integer array, find the number of elements
that can be searched using binary search
The idea is the an element is binary searchable if the elements to the
left of it are smaller than it and the elements to the right of it
are bigger than it
So maintain two arrays - left_max and right_min such that in i'th index -
* left_max[i] contains the max element between 0 and i-1 (left to right movement)
* right_min[i] contains the min element between i+1 and n-1 (right to left movement)
Now for every element in the array, if its index its i, then it is binary searchable
if left_max[i] < arr[i] < right_min[i]
"""
import sys
def get_searchable_numbers(arr, n):
left_max = [None] * n
right_min = [None] * n
left_max[0] = float('-inf')
right_min[n-1] = float('inf')
for i in range(1, n):
left_max[i] = max(left_max[i-1], arr[i-1])
for i in range(len(arr) - 2, -1, -1):
right_min[i] = min(right_min[i+1], arr[i+1])
res = []
count = 0
for i in range(0, n):
num = arr[i]
left = left_max[i]
right = right_min[i]
if left < num < right:
res.append(num)
count += 1
return count, res
if __name__ == '__main__':
#arr = [5,1,4,3,6,8,10,7,9]
arr = [4,1,3,9,8,10,11]
count, res = get_searchable_numbers(arr, len(arr))
print(count, res)
|
Robinhood.py | Travisivart/robinhood-to-csv | 256 | 12678604 | import json
import requests
import urllib
try:
from urllib.request import urlretrieve #py3
except ImportError:
from urllib import urlretrieve # py2
class Robinhood:
endpoints = {
"accounts": "https://api.robinhood.com/accounts/",
"ach_iav_auth": "https://api.robinhood.com/ach/iav/auth/",
"ach_relationships": "https://api.robinhood.com/ach/relationships/",
"ach_transfers": "https://api.robinhood.com/ach/transfers/",
"applications": "https://api.robinhood.com/applications/",
"document_requests": "https://api.robinhood.com/upload/document_requests/",
"dividends": "https://api.robinhood.com/dividends/",
"edocuments": "https://api.robinhood.com/documents/",
"employment": "https://api.robinhood.com/user/employment",
"investment_profile": "https://api.robinhood.com/user/investment_profile/",
"instruments": "https://api.robinhood.com/instruments/",
"login": "https://api.robinhood.com/oauth2/token/",
"margin_upgrades": "https://api.robinhood.com/margin/upgrades/",
"markets": "https://api.robinhood.com/markets/",
"notification_settings": "https://api.robinhood.com/settings/notifications/",
"notifications": "https://api.robinhood.com/notifications/",
"orders": "https://api.robinhood.com/orders/",
"password_reset": "https://api.robinhood.com/password_reset/request/",
"portfolios": "https://api.robinhood.com/portfolios/",
"positions": "https://api.robinhood.com/positions/",
"quotes": "https://api.robinhood.com/quotes/",
"user": "https://api.robinhood.com/user/",
"watchlists": "https://api.robinhood.com/watchlists/",
"optionsOrders":"https://api.robinhood.com/options/orders/",
"optionsPositions":"https://api.robinhood.com/options/positions/"
}
session = None
username = None
password = <PASSWORD>
headers = None
auth_token = None
positions = None
client_id = "c82SH0WZOsabOXGP2sxqcj34FxkvfnWRZBKlBjFS"
##############################
#Logging in and initializing
##############################
def __init__(self):
self.session = requests.session()
try:
self.session.proxies = urllib.getproxies() #py2
except:
self.session.proxies = urllib.request.getproxies() #py3
self.headers = {
"Accept": "*/*",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "en;q=1, fr;q=0.9, de;q=0.8, ja;q=0.7, nl;q=0.6, it;q=0.5",
"Content-Type": "application/x-www-form-urlencoded; charset=utf-8",
"X-Robinhood-API-Version": "1.0.0",
"Connection": "keep-alive",
"User-Agent": "Robinhood/823 (iPhone; iOS 7.1.2; Scale/2.00)"
}
self.session.headers = self.headers
def login(self, username, password, device_token ,mfa_code=None):
self.username = username
self.password = password
self.mfa_code = mfa_code
self.device_token = device_token
if mfa_code:
fields = {
'password' : <PASSWORD>,
'username' : self.username,
'mfa_code': self.mfa_code,
'grant_type': 'password',
'client_id': self.client_id,
'device_token': self.device_token
}
else:
fields = {
'password' : <PASSWORD>,
'username' : self.username,
'grant_type': 'password',
'client_id': self.client_id,
'device_token':self.device_token
}
try:
data = urllib.urlencode(fields) #py2
except:
data = urllib.parse.urlencode(fields) #py3
res = self.session.post(self.endpoints['login'], data=data)
res = res.json()
try:
self.auth_token = res['access_token']
except KeyError:
return res
self.headers['Authorization'] = 'Bearer ' + self.auth_token
return True
##############################
#GET DATA
##############################
def get_endpoint(self, endpoint=None):
res = self.session.get(self.endpoints[endpoint])
return json.loads(res.content.decode('utf-8'))
def get_custom_endpoint(self, endpoint=None):
res = self.session.get(endpoint)
return json.loads(res.content.decode('utf-8'))
def investment_profile(self):
self.session.get(self.endpoints['investment_profile'])
def instruments(self, stock=None):
if stock == None:
res = self.session.get(self.endpoints['instruments'])
else:
res = self.session.get(self.endpoints['instruments'], params={'query':stock.upper()})
res = res.json()
return res['results']
def quote_data(self, stock=None):
#Prompt for stock if not entered
if stock is None:
stock = raw_input("Symbol: ");
url = str(self.endpoints['quotes']) + str(stock) + "/"
#Check for validity of symbol
try:
res = json.loads((urllib.urlopen(url)).read());
if len(res) > 0:
return res;
else:
raise NameError("Invalid Symbol: " + stock);
except (ValueError):
raise NameError("Invalid Symbol: " + stock);
def get_quote(self, stock=None):
data = self.quote_data(stock)
return data["symbol"]
def print_quote(self, stock=None):
data = self.quote_data(stock)
print(data["symbol"] + ": $" + data["last_trade_price"]);
def print_quotes(self, stocks):
for i in range(len(stocks)):
self.print_quote(stocks[i]);
def ask_price(self, stock=None):
return self.quote_data(stock)['ask_price'];
def ask_size(self, stock=None):
return self.quote_data(stock)['ask_size'];
def bid_price(self, stock=None):
return self.quote_data(stock)['bid_price'];
def bid_size(self, stock=None):
return self.quote_data(stock)['bid_size'];
def last_trade_price(self, stock=None):
return self.quote_data(stock)['last_trade_price'];
def last_trade_price(self, stock=None):
return self.quote_data(stock)['last_trade_price'];
def previous_close(self, stock=None):
return self.quote_data(stock)['previous_close'];
def previous_close_date(self, stock=None):
return self.quote_data(stock)['previous_close_date'];
def adjusted_previous_close(self, stock=None):
return self.quote_data(stock)['adjusted_previous_close'];
def symbol(self, stock=None):
return self.quote_data(stock)['symbol'];
def last_updated_at(self, stock=None):
return self.quote_data(stock)['updated_at'];
##############################
#PLACE ORDER
##############################
def place_order(self, instrument, quantity=1, bid_price = None, transaction=None):
# cache the account ID that's needed for placing orders
if self.positions == None:
self.positions = self.get_endpoint("positions")['results']
if bid_price == None:
bid_price = self.quote_data(instrument['symbol'])[0]['bid_price']
data = 'account=%s&instrument=%s&price=%f&quantity=%d&side=buy&symbol=%s&time_in_force=gfd&trigger=immediate&type=market' % (urllib.quote(self.positions[0]['account']), urllib.unquote(instrument['url']), float(bid_price), quantity, instrument['symbol'])
res = self.session.post(self.endpoints['orders'], data=data)
return res
def place_buy_order(self, instrument, quantity, bid_price=None):
transaction = "buy"
return self.place_order(instrument, quantity, bid_price, transaction)
def place_sell_order(self, instrument, quantity, bid_price=None):
transaction = "sell"
return self.place_order(instrument, quantity, bid_price, transaction)
|
pypyr/steps/contextmerge.py | mofm/pypyr | 261 | 12678613 | """pypyr step that merges the input mappings into context.
Whereas contextcopy and contextsetf overwrites values that are in context
already, contextmerge merges its input into context, preserving the existing
hierarchy while just updating the values where specified in the contextmerge
input.
Applies string interpolation as it merges. String interpolation applies to keys
and values.
"""
import logging
# logger means the log level will be set correctly
logger = logging.getLogger(__name__)
def run_step(context):
"""Merge hierarchy into context with substitutions.
context is a dictionary or dictionary-like.
context['contextMerge'] must exist. It's a dictionary.
Will iterate context['contextMerge'] and save the values as new keys to the
context where they exist already, and add these as new values where they
don't already exist. While it's doing so, it will leave unspecified values
in the existing hierarchy untouched.
List merging is purely additive, with no checks for uniqueness or already
existing list items. E.g context [0,1,2] with contextMerge=[2,3,4]
will result in [0,1,2,2,3,4]
Keep this in mind especially where complex types like
dicts nest inside a list - a merge will always add a new dict list item,
not merge it into whatever dicts might exist on the list already.
For example, say input context is:
key1: value1
key2: value2
key3:
k31: value31
k32: value32
contextMerge:
key2: 'aaa_{key1}_zzz'
key3:
k33: value33
key4: 'bbb_{key2}_yyy'
This will result in return context:
key1: value1
key2: aaa_value1_zzz
key3:
k31: value31
k32: value32
k33: value33
key4: <KEY>
"""
logger.debug("started")
context.assert_key_has_value(key='contextMerge', caller=__name__)
context.merge(context['contextMerge'])
logger.info("merged %d context items.", len(context['contextMerge']))
logger.debug("done")
|
kedro/framework/cli/micropkg.py | daniel-falk/kedro | 2,047 | 12678614 | <gh_stars>1000+
"""A collection of CLI commands for working with Kedro micro-packages."""
import re
import shutil
import sys
import tarfile
import tempfile
from importlib import import_module
from pathlib import Path
from typing import Iterable, List, Optional, Set, Tuple, Union
import click
import pkg_resources
from rope.base.project import Project
from rope.contrib import generate
from rope.refactor.move import MoveModule
from rope.refactor.rename import Rename
from kedro.framework.cli.pipeline import (
_assert_pkg_name_ok,
_check_pipeline_name,
_get_artifacts_to_package,
_sync_dirs,
)
from kedro.framework.cli.utils import (
KedroCliError,
_clean_pycache,
call,
command_with_verbosity,
env_option,
python_call,
)
from kedro.framework.startup import ProjectMetadata
_SETUP_PY_TEMPLATE = """# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name="{name}",
version="{version}",
description="Micro-package `{name}`",
packages=find_packages(),
include_package_data=True,
install_requires={install_requires},
)
"""
def _check_module_path(ctx, param, value): # pylint: disable=unused-argument
if value and not re.match(r"^[\w.]+$", value):
message = (
"The micro-package location you provided is not a valid Python module path"
)
raise KedroCliError(message)
return value
# pylint: disable=missing-function-docstring
@click.group(name="Kedro")
def micropkg_cli(): # pragma: no cover
pass
@micropkg_cli.group()
def micropkg():
"""Commands for working with micro-packages."""
@command_with_verbosity(micropkg, "pull")
@click.argument("package_path", nargs=1, required=False)
@click.option(
"--all",
"-a",
"all_flag",
is_flag=True,
help="Pull and unpack all micro-packages in the `pyproject.toml` package manifest section.",
)
@env_option(
help="Environment to install the micro-package configuration to. Defaults to `base`."
)
@click.option("--alias", type=str, default="", help="Rename the package.")
@click.option(
"-d",
"--destination",
type=click.Path(file_okay=False, dir_okay=False),
default=None,
help="Module location where to unpack under.",
)
@click.option(
"--fs-args",
type=click.Path(
exists=True, file_okay=True, dir_okay=False, readable=True, resolve_path=True
),
default=None,
help="Location of a configuration file for the fsspec filesystem used to pull the package.",
)
@click.pass_obj # this will pass the metadata as first argument
def pull_package( # pylint:disable=unused-argument, too-many-arguments
metadata: ProjectMetadata,
package_path,
env,
alias,
destination,
fs_args,
all_flag,
**kwargs,
) -> None:
"""Pull and unpack a modular pipeline and other micro-packages in your project."""
if not package_path and not all_flag:
click.secho(
"Please specify a package path or add '--all' to pull all micro-packages in the "
"`pyproject.toml` package manifest section."
)
sys.exit(1)
if all_flag:
_pull_packages_from_manifest(metadata)
return
_pull_package(
package_path,
metadata,
env=env,
alias=alias,
destination=destination,
fs_args=fs_args,
)
as_alias = f" as `{alias}`" if alias else ""
message = f"Micro-package {package_path} pulled and unpacked{as_alias}!"
click.secho(message, fg="green")
# pylint: disable=too-many-arguments, too-many-locals
def _pull_package(
package_path: str,
metadata: ProjectMetadata,
env: str = None,
alias: str = None,
destination: str = None,
fs_args: str = None,
):
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir_path = Path(temp_dir).resolve()
_unpack_sdist(package_path, temp_dir_path, fs_args)
sdist_file_name = Path(package_path).name.rstrip(".tar.gz")
egg_info_file = list((temp_dir_path / sdist_file_name).glob("*.egg-info"))
if len(egg_info_file) != 1:
raise KedroCliError(
f"More than 1 or no egg-info files found from {package_path}. "
f"There has to be exactly one egg-info directory."
)
package_name = egg_info_file[0].stem
package_requirements = temp_dir_path / sdist_file_name / "setup.py"
# Finds a string representation of 'install_requires' list from setup.py
reqs_list_pattern = r"install_requires\=(.*?)\,\n"
list_reqs = re.findall(
reqs_list_pattern, package_requirements.read_text(encoding="utf-8")
)
# Finds all elements from the above string representation of a list
reqs_element_pattern = r"\'(.*?)\'"
package_reqs = re.findall(reqs_element_pattern, list_reqs[0])
if package_reqs:
requirements_txt = metadata.source_dir / "requirements.txt"
_append_package_reqs(requirements_txt, package_reqs, package_name)
_clean_pycache(temp_dir_path)
_install_files(
metadata,
package_name,
temp_dir_path / sdist_file_name,
env,
alias,
destination,
)
def _pull_packages_from_manifest(metadata: ProjectMetadata) -> None:
# pylint: disable=import-outside-toplevel
import anyconfig # for performance reasons
config_dict = anyconfig.load(metadata.config_file)
config_dict = config_dict["tool"]["kedro"]
build_specs = config_dict.get("micropkg", {}).get("pull")
if not build_specs:
click.secho(
"Nothing to pull. Please update the `pyproject.toml` package manifest section.",
fg="yellow",
)
return
for package_path, specs in build_specs.items():
if "alias" in specs:
_assert_pkg_name_ok(specs["alias"].split(".")[-1])
_pull_package(package_path, metadata, **specs)
click.secho(f"Pulled and unpacked `{package_path}`!")
click.secho("Micro-packages pulled and unpacked!", fg="green")
def _package_micropkgs_from_manifest(metadata: ProjectMetadata) -> None:
# pylint: disable=import-outside-toplevel
import anyconfig # for performance reasons
config_dict = anyconfig.load(metadata.config_file)
config_dict = config_dict["tool"]["kedro"]
build_specs = config_dict.get("micropkg", {}).get("package")
if not build_specs:
click.secho(
"Nothing to package. Please update the `pyproject.toml` package manifest section.",
fg="yellow",
)
return
for package_name, specs in build_specs.items():
if "alias" in specs:
_assert_pkg_name_ok(specs["alias"])
_package_micropkg(package_name, metadata, **specs)
click.secho(f"Packaged `{package_name}` micro-package!")
click.secho("Micro-packages packaged!", fg="green")
@micropkg.command("package")
@env_option(
help="Environment where the micro-package configuration lives. Defaults to `base`."
)
@click.option(
"--alias",
type=str,
default="",
callback=_check_pipeline_name,
help="Alternative name to package under.",
)
@click.option(
"-d",
"--destination",
type=click.Path(resolve_path=True, file_okay=False),
help="Location where to create the source distribution file. Defaults to `dist/`.",
)
@click.option(
"--all",
"-a",
"all_flag",
is_flag=True,
help="Package all micro-packages in the `pyproject.toml` package manifest section.",
)
@click.argument("module_path", nargs=1, required=False, callback=_check_module_path)
@click.pass_obj # this will pass the metadata as first argument
def package_micropkg(
metadata: ProjectMetadata, module_path, env, alias, destination, all_flag
): # pylint: disable=too-many-arguments
"""Package up a modular pipeline or micro-package as a Python source distribution."""
if not module_path and not all_flag:
click.secho(
"Please specify a micro-package name or add '--all' to package all micro-packages in "
"the `pyproject.toml` package manifest section."
)
sys.exit(1)
if all_flag:
_package_micropkgs_from_manifest(metadata)
return
result_path = _package_micropkg(
module_path, metadata, alias=alias, destination=destination, env=env
)
as_alias = f" as `{alias}`" if alias else ""
message = (
f"`{metadata.package_name}.{module_path}` packaged{as_alias}! "
f"Location: {result_path}"
)
click.secho(message, fg="green")
def _get_fsspec_filesystem(location: str, fs_args: Optional[str]):
# pylint: disable=import-outside-toplevel
import anyconfig
import fsspec
from kedro.io.core import get_protocol_and_path
protocol, _ = get_protocol_and_path(location)
fs_args_config = anyconfig.load(fs_args) if fs_args else {}
try:
return fsspec.filesystem(protocol, **fs_args_config)
except Exception as exc: # pylint: disable=broad-except
# Specified protocol is not supported by `fsspec`
# or requires extra dependencies
click.secho(str(exc), fg="red")
click.secho("Trying to use 'pip download'...", fg="red")
return None
def _unpack_sdist(location: str, destination: Path, fs_args: Optional[str]) -> None:
filesystem = _get_fsspec_filesystem(location, fs_args)
if location.endswith(".tar.gz") and filesystem and filesystem.exists(location):
with filesystem.open(location) as fs_file:
with tarfile.open(fileobj=fs_file, mode="r:gz") as tar_file:
tar_file.extractall(destination)
else:
python_call(
"pip", ["download", "--no-deps", "--dest", str(destination), location]
)
sdist_file = list(destination.glob("*.tar.gz"))
# `--no-deps` should fetch only one source distribution file, and CLI should fail if that's
# not the case.
if len(sdist_file) != 1:
file_names = [sf.name for sf in sdist_file]
raise KedroCliError(
f"More than 1 or no sdist files found: {file_names}. "
f"There has to be exactly one source distribution file."
)
with tarfile.open(sdist_file[0], "r:gz") as fs_file:
fs_file.extractall(destination)
def _rename_files(conf_source: Path, old_name: str, new_name: str):
config_files_to_rename = (
each
for each in conf_source.rglob("*")
if each.is_file() and old_name in each.name
)
for config_file in config_files_to_rename:
new_config_name = config_file.name.replace(old_name, new_name)
config_file.rename(config_file.parent / new_config_name)
def _refactor_code_for_unpacking(
project: Project,
package_path: Path,
tests_path: Path,
alias: Optional[str],
destination: Optional[str],
project_metadata: ProjectMetadata,
) -> Tuple[Path, Path]:
"""This is the reverse operation of `_refactor_code_for_package`, i.e
we go from:
<temp_dir> # also the root of the Rope project
|__ <micro_package> # or <alias>
|__ __init__.py
|__ tests # only tests for <micro_package>
|__ __init__.py
|__ tests.py
to:
<temp_dir> # also the root of the Rope project
|__ <project_package>
|__ __init__.py
|__ <path_to_micro_package>
|__ __init__.py
|__ <micro_package>
|__ __init__.py
|__ tests
|__ __init__.py
|__ <path_to_micro_package>
|__ __init__.py
|__ <micro_package>
|__ __init__.py
"""
def _move_package_with_conflicting_name(
target: Path, original_name: str, desired_name: str = None
) -> Path:
_rename_package(project, original_name, "tmp_name")
full_path = _create_nested_package(project, target)
_move_package(project, "tmp_name", target.as_posix())
desired_name = desired_name or original_name
_rename_package(project, (target / "tmp_name").as_posix(), desired_name)
return full_path
package_name = package_path.stem
package_target = Path(project_metadata.package_name)
tests_target = Path("tests")
if destination:
destination_path = Path(destination)
package_target = package_target / destination_path
tests_target = tests_target / destination_path
if alias and alias != package_name:
_rename_package(project, package_name, alias)
package_name = alias
if package_name == project_metadata.package_name:
full_path = _move_package_with_conflicting_name(package_target, package_name)
else:
full_path = _create_nested_package(project, package_target)
_move_package(project, package_name, package_target.as_posix())
refactored_package_path = full_path / package_name
if not tests_path.exists():
return refactored_package_path, tests_path
# we can't rename the tests package to <package_name>
# because it will conflict with existing top-level package;
# hence we give it a temp name, create the expected
# nested folder structure, move the contents there,
# then rename the temp name to <package_name>.
full_path = _move_package_with_conflicting_name(
tests_target, original_name="tests", desired_name=package_name
)
refactored_tests_path = full_path / package_name
return refactored_package_path, refactored_tests_path
def _install_files( # pylint: disable=too-many-arguments, too-many-locals
project_metadata: ProjectMetadata,
package_name: str,
source_path: Path,
env: str = None,
alias: str = None,
destination: str = None,
):
env = env or "base"
package_source, test_source, conf_source = _get_package_artifacts(
source_path, package_name
)
if conf_source.is_dir() and alias:
_rename_files(conf_source, package_name, alias)
module_path = alias or package_name
if destination:
module_path = f"{destination}.{module_path}"
package_dest, test_dest, conf_dest = _get_artifacts_to_package(
project_metadata, module_path=module_path, env=env
)
if conf_source.is_dir():
_sync_dirs(conf_source, conf_dest)
# `config` dir was packaged under `package_name` directory with
# `kedro micropkg package`. Since `config` was already synced,
# we don't want to copy it again when syncing the package, so we remove it.
shutil.rmtree(str(conf_source))
project = Project(source_path)
refactored_package_source, refactored_test_source = _refactor_code_for_unpacking(
project, package_source, test_source, alias, destination, project_metadata
)
project.close()
if refactored_test_source.is_dir():
_sync_dirs(refactored_test_source, test_dest)
# Sync everything under package directory, except `config`
# since it has already been copied.
if refactored_package_source.is_dir():
_sync_dirs(refactored_package_source, package_dest)
def _find_config_files(
source_config_dir: Path, glob_patterns: List[str]
) -> List[Tuple[Path, str]]:
config_files = [] # type: List[Tuple[Path, str]]
if source_config_dir.is_dir():
config_files = [
(path, path.parent.relative_to(source_config_dir).as_posix())
for glob_pattern in glob_patterns
for path in source_config_dir.glob(glob_pattern)
if path.is_file()
]
return config_files
def _get_default_version(metadata: ProjectMetadata, micropkg_module_path: str) -> str:
# default to micropkg package version
try:
micropkg_module = import_module(
f"{metadata.package_name}.{micropkg_module_path}"
)
return micropkg_module.__version__ # type: ignore
except (AttributeError, ModuleNotFoundError):
# if micropkg version doesn't exist, take the project one
project_module = import_module(f"{metadata.package_name}")
return project_module.__version__ # type: ignore
def _package_micropkg(
micropkg_module_path: str,
metadata: ProjectMetadata,
alias: str = None,
destination: str = None,
env: str = None,
) -> Path:
micropkg_name = micropkg_module_path.split(".")[-1]
package_dir = metadata.source_dir / metadata.package_name
env = env or "base"
package_source, package_tests, package_conf = _get_artifacts_to_package(
metadata, module_path=micropkg_module_path, env=env
)
# as the source distribution will only contain parameters, we aren't listing other
# config files not to confuse users and avoid useless file copies
configs_to_package = _find_config_files(
package_conf,
[f"parameters*/**/{micropkg_name}.yml", f"parameters*/**/{micropkg_name}/**/*"],
)
source_paths = (package_source, package_tests, configs_to_package)
# Check that micropkg directory exists and not empty
_validate_dir(package_source)
destination = Path(destination) if destination else metadata.project_path / "dist"
version = _get_default_version(metadata, micropkg_module_path)
_generate_sdist_file(
micropkg_name=micropkg_name,
destination=destination.resolve(),
source_paths=source_paths,
version=version,
metadata=metadata,
alias=alias,
)
_clean_pycache(package_dir)
_clean_pycache(metadata.project_path)
return destination
def _validate_dir(path: Path) -> None:
if not path.is_dir():
raise KedroCliError(f"Directory '{path}' doesn't exist.")
if not list(path.iterdir()):
raise KedroCliError(f"'{path}' is an empty directory.")
def _get_sdist_name(name, version):
return f"{name}-{version}.tar.gz"
def _sync_path_list(source: List[Tuple[Path, str]], target: Path) -> None:
for source_path, suffix in source:
target_with_suffix = (target / suffix).resolve()
_sync_dirs(source_path, target_with_suffix)
def _make_install_requires(requirements_txt: Path) -> List[str]:
"""Parses each line of requirements.txt into a version specifier valid to put in
install_requires."""
if not requirements_txt.exists():
return []
requirements = pkg_resources.parse_requirements(requirements_txt.read_text())
return [str(requirement) for requirement in requirements]
def _create_nested_package(project: Project, package_path: Path) -> Path:
# fails if parts of the path exists already
packages = package_path.parts
parent = generate.create_package(project, packages[0])
nested_path = Path(project.address) / packages[0]
for package in packages[1:]:
parent = generate.create_package(project, package, sourcefolder=parent)
nested_path = nested_path / package
return nested_path
def _move_package(project: Project, source: str, target: str) -> None:
"""
Move a Python package, refactoring relevant imports along the way.
A target of empty string means moving to the root of the `project`.
Args:
project: rope.base.Project holding the scope of the refactoring.
source: Name of the Python package to be moved. Can be a fully
qualified module path relative to the `project` root, e.g.
"package.pipelines.pipeline" or "package/pipelines/pipeline".
target: Destination of the Python package to be moved. Can be a fully
qualified module path relative to the `project` root, e.g.
"package.pipelines.pipeline" or "package/pipelines/pipeline".
"""
src_folder = project.get_module(source).get_resource()
target_folder = project.get_module(target).get_resource()
change = MoveModule(project, src_folder).get_changes(dest=target_folder)
project.do(change)
def _rename_package(project: Project, old_name: str, new_name: str) -> None:
"""
Rename a Python package, refactoring relevant imports along the way,
as well as references in comments.
Args:
project: rope.base.Project holding the scope of the refactoring.
old_name: Old module name. Can be a fully qualified module path,
e.g. "package.pipelines.pipeline" or "package/pipelines/pipeline",
relative to the `project` root.
new_name: New module name. Can't be a fully qualified module path.
"""
folder = project.get_folder(old_name)
change = Rename(project, folder).get_changes(new_name, docs=True)
project.do(change)
def _refactor_code_for_package(
project: Project,
package_path: Path,
tests_path: Path,
alias: Optional[str],
project_metadata: ProjectMetadata,
) -> None:
"""In order to refactor the imports properly, we need to recreate
the same nested structure as in the project. Therefore, we create:
<temp_dir> # also the root of the Rope project
|__ <project_package>
|__ __init__.py
|__ <path_to_micro_package>
|__ __init__.py
|__ <micro_package>
|__ __init__.py
|__ tests
|__ __init__.py
|__ path_to_micro_package
|__ __init__.py
|__ <micro_package>
|__ __init__.py
We then move <micro_package> outside of package src to top level ("")
in temp_dir, and rename folder & imports if alias provided.
For tests, we need to extract all the contents of <micro_package>
at into top-level `tests` folder. This is not possible in one go with
the Rope API, so we have to do it in a bit of a hacky way.
We rename <micro_package> to a `tmp_name` and move it at top-level ("")
in temp_dir. We remove the old `tests` folder and rename `tmp_name` to `tests`.
The final structure should be:
<temp_dir> # also the root of the Rope project
|__ <micro_package> # or <alias>
|__ __init__.py
|__ tests # only tests for <micro_package>
|__ __init__.py
|__ test.py
"""
def _move_package_with_conflicting_name(target: Path, conflicting_name: str):
tmp_name = "tmp_name"
tmp_module = target.parent / tmp_name
_rename_package(project, target.as_posix(), tmp_name)
_move_package(project, tmp_module.as_posix(), "")
shutil.rmtree(Path(project.address) / conflicting_name)
_rename_package(project, tmp_name, conflicting_name)
# Copy source in appropriate folder structure
package_target = package_path.relative_to(project_metadata.source_dir)
full_path = _create_nested_package(project, package_target)
# overwrite=True to update the __init__.py files generated by create_package
_sync_dirs(package_path, full_path, overwrite=True)
# Copy tests in appropriate folder structure
if tests_path.exists():
tests_target = tests_path.relative_to(project_metadata.source_dir)
full_path = _create_nested_package(project, tests_target)
# overwrite=True to update the __init__.py files generated by create_package
_sync_dirs(tests_path, full_path, overwrite=True)
# Refactor imports in src/package_name/.../micro_package
# and imports of `micro_package` in tests.
micro_package_name = package_target.stem
if micro_package_name == project_metadata.package_name:
_move_package_with_conflicting_name(package_target, micro_package_name)
else:
_move_package(project, package_target.as_posix(), "")
shutil.rmtree(Path(project.address) / project_metadata.package_name)
if alias:
_rename_package(project, micro_package_name, alias)
if tests_path.exists():
# we can't move the relevant tests folder as is because
# it will conflict with the top-level package <micro_package>;
# we can't rename it "tests" and move it, because it will conflict
# with the existing "tests" folder at top level;
# hence we give it a temp name, move it, delete tests/ and
# rename the temp name to tests.
_move_package_with_conflicting_name(tests_target, "tests")
_SourcePathType = Union[Path, List[Tuple[Path, str]]]
# pylint: disable=too-many-arguments,too-many-locals
def _generate_sdist_file(
micropkg_name: str,
destination: Path,
source_paths: Tuple[_SourcePathType, ...],
version: str,
metadata: ProjectMetadata,
alias: str = None,
) -> None:
package_name = alias or micropkg_name
package_source, tests_source, conf_source = source_paths
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir_path = Path(temp_dir).resolve()
project = Project(temp_dir_path) # project where to do refactoring
_refactor_code_for_package(
project, package_source, tests_source, alias, metadata # type: ignore
)
project.close()
# Copy & "refactor" config
_, _, conf_target = _get_package_artifacts(temp_dir_path, package_name)
_sync_path_list(conf_source, conf_target) # type: ignore
if conf_target.is_dir() and alias:
_rename_files(conf_target, micropkg_name, alias)
# Build a setup.py on the fly
try:
install_requires = _make_install_requires(
package_source / "requirements.txt" # type: ignore
)
except Exception as exc:
click.secho("FAILED", fg="red")
cls = exc.__class__
raise KedroCliError(f"{cls.__module__}.{cls.__qualname__}: {exc}") from exc
_generate_manifest_file(temp_dir_path)
setup_file = _generate_setup_file(
package_name, version, install_requires, temp_dir_path
)
package_file = destination / _get_sdist_name(name=package_name, version=version)
if package_file.is_file():
click.secho(
f"Package file {package_file} will be overwritten!", fg="yellow"
)
# python setup.py sdist --formats=gztar --dist-dir <destination>
call(
[
sys.executable,
str(setup_file.resolve()),
"sdist",
"--formats=gztar",
"--dist-dir",
str(destination),
],
cwd=temp_dir,
)
def _generate_manifest_file(output_dir: Path):
manifest_file = output_dir / "MANIFEST.in"
manifest_file.write_text(
"""
global-include README.md
global-include config/parameters*
global-include config/**/parameters*
global-include config/parameters*/**
global-include config/parameters*/**/*
"""
)
def _generate_setup_file(
package_name: str, version: str, install_requires: List[str], output_dir: Path
) -> Path:
setup_file = output_dir / "setup.py"
setup_file_context = dict(
name=package_name, version=version, install_requires=install_requires
)
setup_file.write_text(_SETUP_PY_TEMPLATE.format(**setup_file_context))
return setup_file
def _get_package_artifacts(
source_path: Path, package_name: str
) -> Tuple[Path, Path, Path]:
"""From existing package, returns in order:
source_path, tests_path, config_path
"""
artifacts = (
source_path / package_name,
source_path / "tests",
# package_data (non-python files) needs to live inside one of the packages
source_path / package_name / "config",
)
return artifacts
def _append_package_reqs(
requirements_txt: Path, package_reqs: List[str], package_name: str
) -> None:
"""Appends micro-package requirements to project level requirements.txt"""
incoming_reqs = _safe_parse_requirements(package_reqs)
if requirements_txt.is_file():
existing_reqs = _safe_parse_requirements(requirements_txt.read_text())
reqs_to_add = set(incoming_reqs) - set(existing_reqs)
if not reqs_to_add:
return
sorted_reqs = sorted(str(req) for req in reqs_to_add)
sep = "\n"
with open(requirements_txt, "a", encoding="utf-8") as file:
file.write(
f"\n\n# Additional requirements from micro-package `{package_name}`:\n"
)
file.write(sep.join(sorted_reqs))
click.secho(
f"Added the following requirements from micro-package `{package_name}` to "
f"requirements.txt:\n{sep.join(sorted_reqs)}"
)
else:
click.secho(
"No project requirements.txt found. Copying contents from project requirements.txt..."
)
sorted_reqs = sorted(str(req) for req in incoming_reqs)
sep = "\n"
with open(requirements_txt, "a", encoding="utf-8") as file:
file.write(sep.join(sorted_reqs))
click.secho(
"Use `kedro build-reqs` to compile and `pip install -r src/requirements.lock` to install "
"the updated list of requirements."
)
def _safe_parse_requirements(
requirements: Union[str, Iterable[str]]
) -> Set[pkg_resources.Requirement]:
"""Safely parse a requirement or set of requirements. This effectively replaces
pkg_resources.parse_requirements, which blows up with a ValueError as soon as it
encounters a requirement it cannot parse (e.g. `-r requirements.txt`). This way
we can still extract all the parseable requirements out of a set containing some
unparseable requirements.
"""
parseable_requirements = set()
for requirement in pkg_resources.yield_lines(requirements):
try:
parseable_requirements.add(pkg_resources.Requirement.parse(requirement))
except ValueError:
continue
return parseable_requirements
|
docs/lessons/lesson01_simple.py | jayvdb/live-py-plugin | 224 | 12678664 | """ Simple plot
In this section, we want to draw the cosine and sine functions
on the same plot. Starting from the default settings, we'll
enrich the figure step by step to make it nicer.
First step is to get the data for the sine and cosine functions:
:lesson goal file: goal01.py
"""
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(-np.pi, np.pi, 256, endpoint=True)
c, s = np.cos(x), np.sin(x)
# x is now a numpy array with 256 values ranging from -pi to +pi
# (included). c is the cosine (256 values) and s is the sine
# (256 values).
# To see the plot in PyCharm, first run this file normally.
# That should show the plot in a new window. If it shows up in
# the tool window inside PyCharm, you should probably disable
# the Python Scientific mode under File: Settings.
# Next, choose Run: Start Live Turtle. That should show you two
# plots: the current plot and the goal plot.
# Can you add the sine data to make the first plot match the
# second one?
plt.plot(x, c) # Copy this line and change it.
# Once they match exactly, the goal plot should disappear.
# Then you can open lesson 2.
plt.show()
|
natlas-server/app/models/__init__.py | purplesecops/natlas | 500 | 12678695 | <filename>natlas-server/app/models/__init__.py
from app.models.agent import Agent
from app.models.agent_config import AgentConfig
from app.models.agent_script import AgentScript
from app.models.config_item import ConfigItem
from app.models.natlas_services import NatlasServices
from app.models.rescan_task import RescanTask
from app.models.scope_item import ScopeItem
from app.models.tag import Tag
from app.models.user import User
from app.models.user_invitation import UserInvitation
from app.models.scope_log import ScopeLog
__all__ = [
"Agent",
"AgentConfig",
"AgentScript",
"ConfigItem",
"NatlasServices",
"RescanTask",
"ScopeItem",
"ScopeLog",
"Tag",
"User",
"UserInvitation",
]
|
RecoTauTag/HLTProducers/python/PixelTracksL2Tau_cfi.py | ckamtsikis/cmssw | 852 | 12678699 | import FWCore.ParameterSet.Config as cms
from RecoPixelVertexing.PixelTrackFitting.pixelTracks_cfi import pixelTracks as _pixelTracks
from RecoTauTag.HLTProducers.trackingRegionsFromBeamSpotAndL2Tau_cfi import trackingRegionsFromBeamSpotAndL2Tau
# Note from new seeding framework migration
# Previously the TrackingRegion was set as a parameter of PixelTrackProducer
# Now the TrackingRegion EDProducer must be inserted in a sequence, and set as an input to HitPairEDProducer
pixelTracksL2Tau = _pixelTracks.clone(
passLabel = 'pixelTracksL2Tau'
)
|
NVIDIA/benchmarks/transformer/implementations/pytorch/fairseq/criterions/__init__.py | mengkai94/training_results_v0.6 | 140 | 12678714 | <gh_stars>100-1000
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_criterion import FairseqCriterion
CRITERION_REGISTRY = {}
CRITERION_CLASS_NAMES = set()
def build_criterion(args, task):
return CRITERION_REGISTRY[args.criterion](args, task)
def register_criterion(name):
"""Decorator to register a new criterion."""
def register_criterion_cls(cls):
if name in CRITERION_REGISTRY:
raise ValueError('Cannot register duplicate criterion ({})'.format(name))
if not issubclass(cls, FairseqCriterion):
raise ValueError('Criterion ({}: {}) must extend FairseqCriterion'.format(name, cls.__name__))
if cls.__name__ in CRITERION_CLASS_NAMES:
# We use the criterion class name as a unique identifier in
# checkpoints, so all criterions must have unique class names.
raise ValueError('Cannot register criterion with duplicate class name ({})'.format(cls.__name__))
CRITERION_REGISTRY[name] = cls
CRITERION_CLASS_NAMES.add(cls.__name__)
return cls
return register_criterion_cls
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.criterions.' + module)
|
mnist_cifar/main.py | elony314/sparse_learning | 365 | 12678735 | <filename>mnist_cifar/main.py<gh_stars>100-1000
from __future__ import print_function
import sys
import os
import shutil
import time
import argparse
import logging
import hashlib
import copy
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import numpy as np
import sparselearning
from sparselearning.core import Masking, CosineDecay, LinearDecay
from sparselearning.models import AlexNet, VGG16, LeNet_300_100, LeNet_5_Caffe, WideResNet
from sparselearning.utils import get_mnist_dataloaders, get_cifar10_dataloaders, plot_class_feature_histograms
from extensions import magnitude_variance_pruning, variance_redistribution
cudnn.benchmark = True
cudnn.deterministic = True
if not os.path.exists('./models'): os.mkdir('./models')
if not os.path.exists('./logs'): os.mkdir('./logs')
logger = None
models = {}
models['lenet5'] = (LeNet_5_Caffe,[])
models['lenet300-100'] = (LeNet_300_100,[])
models['alexnet-s'] = (AlexNet, ['s', 10])
models['alexnet-b'] = (AlexNet, ['b', 10])
models['vgg-c'] = (VGG16, ['C', 10])
models['vgg-d'] = (VGG16, ['D', 10])
models['vgg-like'] = (VGG16, ['like', 10])
models['wrn-28-2'] = (WideResNet, [28, 2, 10, 0.3])
models['wrn-22-8'] = (WideResNet, [22, 8, 10, 0.3])
models['wrn-16-8'] = (WideResNet, [16, 8, 10, 0.3])
models['wrn-16-10'] = (WideResNet, [16, 10, 10, 0.3])
def setup_logger(args):
global logger
if logger == None:
logger = logging.getLogger()
else: # wish there was a logger.close()
for handler in logger.handlers[:]: # make a copy of the list
logger.removeHandler(handler)
args_copy = copy.deepcopy(args)
# copy to get a clean hash
# use the same log file hash if iterations or verbose are different
# these flags do not change the results
args_copy.iters = 1
args_copy.verbose = False
args_copy.log_interval = 1
args_copy.seed = 0
log_path = './logs/{0}_{1}_{2}.log'.format(args.model, args.density, hashlib.md5(str(args_copy).encode('utf-8')).hexdigest()[:8])
logger.setLevel(logging.INFO)
formatter = logging.Formatter(fmt='%(asctime)s: %(message)s', datefmt='%H:%M:%S')
fh = logging.FileHandler(log_path)
fh.setFormatter(formatter)
logger.addHandler(fh)
def print_and_log(msg):
global logger
print(msg)
logger.info(msg)
def train(args, model, device, train_loader, optimizer, epoch, lr_scheduler, mask=None):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if lr_scheduler is not None: lr_scheduler.step()
data, target = data.to(device), target.to(device)
if args.fp16: data = data.half()
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
if mask is not None: mask.step()
else: optimizer.step()
if batch_idx % args.log_interval == 0:
print_and_log('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader)*args.batch_size,
100. * batch_idx / len(train_loader), loss.item()))
def evaluate(args, model, device, test_loader, is_test_set=False):
model.eval()
test_loss = 0
correct = 0
n = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
if args.fp16: data = data.half()
model.t = target
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
n += target.shape[0]
test_loss /= float(n)
print_and_log('\n{}: Average loss: {:.4f}, Accuracy: {}/{} ({:.3f}%)\n'.format(
'Test evaluation' if is_test_set else 'Evaluation',
test_loss, correct, n, 100. * correct / float(n)))
return correct / float(n)
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=100, metavar='N',
help='input batch size for training (default: 100)')
parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
help='input batch size for testing (default: 100)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 100)')
parser.add_argument('--lr', type=float, default=0.1, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=17, metavar='S', help='random seed (default: 17)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--optimizer', type=str, default='sgd', help='The optimizer to use. Default: sgd. Options: sgd, adam.')
parser.add_argument('--save-model', type=str, default='./models/model.pt', help='For Saving the current Model')
parser.add_argument('--data', type=str, default='mnist')
parser.add_argument('--decay_frequency', type=int, default=25000)
parser.add_argument('--l1', type=float, default=0.0)
parser.add_argument('--fp16', action='store_true', help='Run in fp16 mode.')
parser.add_argument('--valid_split', type=float, default=0.1)
parser.add_argument('--resume', type=str)
parser.add_argument('--start-epoch', type=int, default=1)
parser.add_argument('--model', type=str, default='')
parser.add_argument('--l2', type=float, default=5.0e-4)
parser.add_argument('--iters', type=int, default=1, help='How many times the model should be run after each other. Default=1')
parser.add_argument('--save-features', action='store_true', help='Resumes a saved model and saves its feature data to disk for plotting.')
parser.add_argument('--bench', action='store_true', help='Enables the benchmarking of layers and estimates sparse speedups')
parser.add_argument('--max-threads', type=int, default=10, help='How many threads to use for data loading.')
parser.add_argument('--decay-schedule', type=str, default='cosine', help='The decay schedule for the pruning rate. Default: cosine. Choose from: cosine, linear.')
sparselearning.core.add_sparse_args(parser)
args = parser.parse_args()
setup_logger(args)
print_and_log(args)
if args.fp16:
try:
from apex.fp16_utils import FP16_Optimizer
except:
print('WARNING: apex not installed, ignoring --fp16 option')
args.fp16 = False
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print_and_log('\n\n')
print_and_log('='*80)
torch.manual_seed(args.seed)
for i in range(args.iters):
print_and_log("\nIteration start: {0}/{1}\n".format(i+1, args.iters))
if args.data == 'mnist':
train_loader, valid_loader, test_loader = get_mnist_dataloaders(args, validation_split=args.valid_split)
else:
train_loader, valid_loader, test_loader = get_cifar10_dataloaders(args, args.valid_split, max_threads=args.max_threads)
if args.model not in models:
print('You need to select an existing model via the --model argument. Available models include: ')
for key in models:
print('\t{0}'.format(key))
raise Exception('You need to select a model')
else:
cls, cls_args = models[args.model]
model = cls(*(cls_args + [args.save_features, args.bench])).to(device)
print_and_log(model)
print_and_log('='*60)
print_and_log(args.model)
print_and_log('='*60)
print_and_log('='*60)
print_and_log('Prune mode: {0}'.format(args.prune))
print_and_log('Growth mode: {0}'.format(args.growth))
print_and_log('Redistribution mode: {0}'.format(args.redistribution))
print_and_log('='*60)
# add custom prune/growth/redisribution here
if args.prune == 'magnitude_variance':
print('Using magnitude-variance pruning. Switching to Adam optimizer...')
args.prune = magnitude_variance_pruning
args.optimizer = 'adam'
if args.redistribution == 'variance':
print('Using variance redistribution. Switching to Adam optimizer...')
args.redistribution = variance_redistribution
args.optimizer = 'adam'
optimizer = None
if args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(),lr=args.lr,momentum=args.momentum,weight_decay=args.l2, nesterov=True)
elif args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(),lr=args.lr,weight_decay=args.l2)
else:
print('Unknown optimizer: {0}'.format(args.optimizer))
raise Exception('Unknown optimizer.')
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, args.decay_frequency, gamma=0.1)
if args.resume:
if os.path.isfile(args.resume):
print_and_log("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print_and_log("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
print_and_log('Testing...')
evaluate(args, model, device, test_loader)
model.feats = []
model.densities = []
plot_class_feature_histograms(args, model, device, train_loader, optimizer)
else:
print_and_log("=> no checkpoint found at '{}'".format(args.resume))
if args.fp16:
print('FP16')
optimizer = FP16_Optimizer(optimizer,
static_loss_scale = None,
dynamic_loss_scale = True,
dynamic_loss_args = {'init_scale': 2 ** 16})
model = model.half()
mask = None
if not args.dense:
if args.decay_schedule == 'cosine':
decay = CosineDecay(args.prune_rate, len(train_loader)*(args.epochs))
elif args.decay_schedule == 'linear':
decay = LinearDecay(args.prune_rate, len(train_loader)*(args.epochs))
mask = Masking(optimizer, decay, prune_rate=args.prune_rate, prune_mode=args.prune, growth_mode=args.growth, redistribution_mode=args.redistribution,
verbose=args.verbose, fp16=args.fp16)
mask.add_module(model, density=args.density)
for epoch in range(1, args.epochs + 1):
t0 = time.time()
train(args, model, device, train_loader, optimizer, epoch, lr_scheduler, mask)
if args.valid_split > 0.0:
val_acc = evaluate(args, model, device, valid_loader)
save_checkpoint({'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict()},
is_best=False, filename=args.save_model)
if not args.dense and epoch < args.epochs:
mask.at_end_of_epoch()
print_and_log('Current learning rate: {0}. Time taken for epoch: {1:.2f} seconds.\n'.format(optimizer.param_groups[0]['lr'], time.time() - t0))
evaluate(args, model, device, test_loader, is_test_set=True)
print_and_log("\nIteration end: {0}/{1}\n".format(i+1, args.iters))
if __name__ == '__main__':
main()
|
static_precompiler/tests/test_management.py | codexterous/django-static-precompiler | 160 | 12678751 | import os
import pytest
from django.core import management
import static_precompiler.settings
from static_precompiler.management.commands import compilestatic
def test_get_scanned_dirs():
assert compilestatic.get_scanned_dirs() == sorted(
[
os.path.join(os.path.dirname(__file__), "compilestatic"),
os.path.join(os.path.dirname(__file__), "staticfiles_dir"),
os.path.join(os.path.dirname(__file__), "staticfiles_dir_with_prefix"),
static_precompiler.settings.STATIC_ROOT,
]
)
@pytest.mark.django_db
@pytest.mark.parametrize(
"verbosity",
(
0,
1,
),
)
def test_compilestatic_command(verbosity, capsys, monkeypatch, tmpdir):
monkeypatch.setattr(
"static_precompiler.management.commands.compilestatic.get_scanned_dirs",
lambda: (os.path.join(os.path.dirname(__file__), "compilestatic"),),
)
monkeypatch.setattr("static_precompiler.settings.ROOT", tmpdir.strpath)
management.call_command("compilestatic", verbosity=verbosity)
output_path = os.path.join(tmpdir.strpath, static_precompiler.settings.OUTPUT_DIR)
compiled_files = []
for root, dirs, files in os.walk(output_path):
for filename in files:
compiled_files.append(os.path.join(root[len(output_path) :].lstrip("/"), filename))
compiled_files.sort()
assert compiled_files == [
"coffee/test.js",
"less/test.css",
"scss/test.css",
]
stdout, _ = capsys.readouterr()
if verbosity >= 1:
assert stdout == (
"Compiled 'coffee/test.coffee' to 'COMPILED/coffee/test.js'\n"
"Compiled 'less/test.less' to 'COMPILED/less/test.css'\n"
"Compiled 'scss/test.scss' to 'COMPILED/scss/test.css'\n"
)
else:
assert stdout == ""
@pytest.mark.skip("Re-enable when pytest-django>3.1.2 is released")
@pytest.mark.django_db
def test_ignore_dependencies_option(django_assert_num_queries, monkeypatch, tmpdir):
monkeypatch.setattr(
"static_precompiler.management.commands.compilestatic.get_scanned_dirs",
lambda: (os.path.join(os.path.dirname(__file__), "compilestatic"),),
)
monkeypatch.setattr("static_precompiler.settings.ROOT", tmpdir.strpath)
with django_assert_num_queries(0):
management.call_command("compilestatic", ignore_dependencies=True)
@pytest.mark.django_db
def test_delete_stale_files(monkeypatch, tmpdir):
output_path = os.path.join(tmpdir.strpath, static_precompiler.settings.OUTPUT_DIR)
if not os.path.exists(output_path):
os.makedirs(output_path)
unmanaged_file = os.path.join(tmpdir.strpath, "unmanaged.js")
with open(unmanaged_file, "w+") as f:
f.write("unmanaged")
with open(os.path.join(output_path, "stale.js"), "w+") as f:
f.write("stale")
monkeypatch.setattr(
"static_precompiler.management.commands.compilestatic.get_scanned_dirs",
lambda: (os.path.join(os.path.dirname(__file__), "compilestatic"),),
)
monkeypatch.setattr("static_precompiler.settings.ROOT", tmpdir.strpath)
management.call_command("compilestatic", delete_stale_files=True)
compiled_files = []
for root, dirs, files in os.walk(output_path):
for filename in files:
compiled_files.append(os.path.join(root[len(output_path) :].lstrip("/"), filename))
compiled_files.sort()
assert compiled_files == [
"coffee/test.js",
"less/test.css",
"scss/test.css",
]
# Files outside of `COMPILED` directory are untouched
assert os.path.exists(unmanaged_file)
|
examples/rmg/pruning_test/input.py | tza0035/RMG-Py | 250 | 12678756 | # Data sources
database(
thermoLibraries = ['primaryThermoLibrary'],
reactionLibraries = [],
seedMechanisms = [],
kineticsDepositories = ['training'],
kineticsFamilies = 'default',
kineticsEstimator = 'rate rules',
)
# List of species
species(
label='ethane',
reactive=True,
structure=SMILES("CC"),
)
species(
label='O2',
reactive=True,
structure=SMILES('[O][O]')
)
species(
label='N2',
reactive=False,
structure=SMILES('N#N'),
)
# Reaction systems
simpleReactor(
temperature=[(1000,'K'),(1500,'K')],
pressure=[(1.0,'bar'),(10.0,'bar')],
nSims=3,
initialMoleFractions={
"ethane": [0.05,0.15],
"O2": 0.1,
"N2": 0.9,
},
terminationConversion={
'ethane': 0.1,
},
terminationTime=(1e1,'s'),
balanceSpecies = "N2",
)
simulator(
atol=1e-16,
rtol=1e-8,
)
model(
toleranceKeepInEdge=0.001,
toleranceMoveToCore=0.01,
toleranceInterruptSimulation=1e8,
maximumEdgeSpecies=20,
filterReactions=True,
minCoreSizeForPrune=5,
)
options(
units='si',
generateOutputHTML=False,
generatePlots=False,
saveEdgeSpecies=False,
saveSimulationProfiles=False,
)
|
lib/forms.py | srvz/F2E.im | 340 | 12678759 | <reponame>srvz/F2E.im<gh_stars>100-1000
#
# Copyright (c) 2008 <NAME> <EMAIL>
#
# forms.py 31-Jul-2011
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# under the License.
#
#
"""
.. _WTForms: http://wtforms.simplecodes.com/
A simple wrapper for WTForms_.
Basically we only need to map the request handler's `arguments` to the
`wtforms.form.Form` input. Quick example::
from wtforms import TextField, validators
from tornadotools.forms import Form
class SampleForm(Form):
username = TextField('Username', [
validators.Length(min=4, message="Too short")
])
email = TextField('Email', [
validators.Length(min=4, message="Not a valid mail address"),
validators.Email()
])
Then, in the `RequestHandler`::
def get(self):
form = SampleForm(self)
if form.validate():
# do something with form.username or form.email
pass
self.render('template.html', form=form)
"""
from wtforms import Form
class Form(Form):
"""
`WTForms` wrapper for Tornado.
"""
def __init__(self, formdata=None, obj=None, prefix='', **kwargs):
"""
Wrap the `formdata` with the `TornadoInputWrapper` and call the base
constuctor.
"""
self._handler = formdata
super(Form, self).__init__(TornadoInputWrapper(formdata),
obj=obj, prefix=prefix, **kwargs)
def _get_translations(self):
return TornadoLocaleWrapper(self._handler.get_user_locale())
class TornadoInputWrapper(object):
def __init__(self, handler):
self._handler = handler
def __iter__(self):
return iter(self._handler.request.arguments)
def __len__(self):
return len(self._handler.request.arguments)
def __contains__(self, name):
return (name in self._handler.request.arguments)
def getlist(self, name):
return self._handler.get_arguments(name)
class TornadoLocaleWrapper(object):
def __init__(self, locale):
self.locale = locale
def gettext(self, message):
return self.locale.translate(message) if self.locale else message
def ngettext(self, message, plural_message, count):
return self.locale.translate(message, plural_message, count) if self.locale else message
|
alipay/aop/api/response/AlipayOpenMiniInnerversionLastQueryResponse.py | antopen/alipay-sdk-python-all | 213 | 12678781 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenMiniInnerversionLastQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenMiniInnerversionLastQueryResponse, self).__init__()
self._app_desc = None
self._app_name = None
self._app_version = None
self._bundle_id = None
self._category_ids = None
self._english_name = None
self._logo_url = None
self._mini_app_id = None
self._service_phone = None
self._slogan = None
self._status = None
self._sub_application_type = None
@property
def app_desc(self):
return self._app_desc
@app_desc.setter
def app_desc(self, value):
self._app_desc = value
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def app_version(self):
return self._app_version
@app_version.setter
def app_version(self, value):
self._app_version = value
@property
def bundle_id(self):
return self._bundle_id
@bundle_id.setter
def bundle_id(self, value):
self._bundle_id = value
@property
def category_ids(self):
return self._category_ids
@category_ids.setter
def category_ids(self, value):
self._category_ids = value
@property
def english_name(self):
return self._english_name
@english_name.setter
def english_name(self, value):
self._english_name = value
@property
def logo_url(self):
return self._logo_url
@logo_url.setter
def logo_url(self, value):
self._logo_url = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def service_phone(self):
return self._service_phone
@service_phone.setter
def service_phone(self, value):
self._service_phone = value
@property
def slogan(self):
return self._slogan
@slogan.setter
def slogan(self, value):
self._slogan = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def sub_application_type(self):
return self._sub_application_type
@sub_application_type.setter
def sub_application_type(self, value):
self._sub_application_type = value
def parse_response_content(self, response_content):
response = super(AlipayOpenMiniInnerversionLastQueryResponse, self).parse_response_content(response_content)
if 'app_desc' in response:
self.app_desc = response['app_desc']
if 'app_name' in response:
self.app_name = response['app_name']
if 'app_version' in response:
self.app_version = response['app_version']
if 'bundle_id' in response:
self.bundle_id = response['bundle_id']
if 'category_ids' in response:
self.category_ids = response['category_ids']
if 'english_name' in response:
self.english_name = response['english_name']
if 'logo_url' in response:
self.logo_url = response['logo_url']
if 'mini_app_id' in response:
self.mini_app_id = response['mini_app_id']
if 'service_phone' in response:
self.service_phone = response['service_phone']
if 'slogan' in response:
self.slogan = response['slogan']
if 'status' in response:
self.status = response['status']
if 'sub_application_type' in response:
self.sub_application_type = response['sub_application_type']
|
z3/exodus.py | Wikunia/hakank | 279 | 12678806 | #!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Exodus puzzle (Dell Logic Puzzles) in Z3
#
# From
# http://brownbuffalo.sourceforge.net/ExodusClues.html
# """
# Title: Exodus
# Author: <NAME>
# Publication: Dell Logic Puzzles
# Issue: April, 1998
# Page: 14
# Stars: 2
# In preparation for Passover, five children at Hebrew school
# (Bernice,Carl,Debby,Sammy, and Ted)
# have been chosen to present
# different parts of the story of the Exodus from Egypt
# (burning bush, captivity,
# Moses's youth, Passover, or the Ten Commandments).
# Each child is a different age
# (three, five, seven, eight, or ten),
# and the family of each child has recently made its own exodus
# to America from a different country
# (Ethiopia, Kazakhstan, Lithuania, Morocco, or Yemen).
# Can you find the age of each child, his or her family's country of
# origin, and the part of the Exodus story each related?
# 1. Debby's family is from Lithuania.
# 2. The child who told the story of the Passover is two years older
# than Bernice.
# 3. The child whose family is from Yemen is younger than the child from
# the Ethiopian family.
# 4. The child from the Moroccan family is three years older than Ted.
# 5. Sammy is three years older than the child who told the story of
# Moses's youth in the house of the Pharaoh.
# 6. Carl related the story of the captivity of the Israelites in Egypt.
# 7. The five-year-old child told the story of the Ten Commandments.
# 8. The child who told the story of the burning bush is either two or
# three years older than the one whose family came from
# Kazakhstan.
#
# Determine: Age -- Child -- Country -- Story
# """
#
# This Z3 model was written by <NAME> (<EMAIL>)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 5
people = range(n)
[Bernice,Carl,Debby,Sammy,Ted] = people
# variables
Story = makeIntArray(sol,"Story",n, 0,n-1)
BurningBush, Captivity, MosessYouth, Passover, TenCommandments = [Story[i] for i in range(n)]
vals = [3,5,7,8,10]
Age = makeIntArray(sol, "Age",n, min(vals),max(vals))
for i in range(n):
sol.add(Or([Age[i] == v for v in vals]))
Country = makeIntArray(sol,"Country",n, 0,n-1)
[Ethiopia, Kazakhstan, Lithuania, Morocco, Yemen] = [Country[i] for i in range(n)]
# constraints
sol.add(Distinct([Story[i] for i in range(n)]))
sol.add(Distinct([Age[i] for i in range(n)]))
sol.add(Distinct([Country[i] for i in range(n)]))
sol.add(Debby == Lithuania)
sol.add(Age[Passover] == Age[Bernice] + 2)
sol.add(Age[Yemen] < Age[Ethiopia])
sol.add(Age[Morocco] == Age[Ted] + 3)
sol.add(Age[Sammy] == Age[MosessYouth] + 3)
sol.add(Carl == Captivity)
sol.add(Age[TenCommandments] == 5)
sol.add(
Or(Age[BurningBush] == Age[Kazakhstan] + 2
,
Age[BurningBush] == Age[Kazakhstan] + 3
)
)
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print("People :", [people[i] for i in range(n)])
print("Story :", [mod.eval(Story[i]) for i in range(n)])
print("Country:", [mod.eval(Country[i]) for i in range(n)])
print("Age :", [mod.eval(Age[i]) for i in range(n)])
print()
getDifferentSolution(sol,mod,
[Story[i] for i in range(n)],
[Age[i] for i in range(n)],
[Country[i] for i in range(n)])
print("num_solutions:", num_solutions)
|
scripts/init-account-url.py | horacexd/clist | 166 | 12678809 | #!/usr/bin/env python3
from django.db.models.signals import m2m_changed
from django.db.models import Q
from django.db import transaction
from tqdm import tqdm
from ranking.models import Account, update_account_url
def run(*args):
qs = Account.objects.filter(Q(url__isnull=True) | Q(coders__isnull=False))
total = qs.count()
iterator = qs.select_related('resource').prefetch_related('coders').iterator()
with tqdm(total=total) as pbar:
while True:
with transaction.atomic():
batch = 0
for a in iterator:
update_account_url(m2m_changed, a, action='post_save')
pbar.update()
batch += 1
total -= 1
if batch == 10000:
break
if batch == 0:
break
|
setup.py | ewels/rich-click | 185 | 12678815 | <reponame>ewels/rich-click
from setuptools import setup
setup(
name="rich-click",
install_requires=[
"click>=7",
"rich>=10.7.0",
"importlib-metadata; python_version < '3.8'",
],
extras_require={
"typer": "typer>=0.4",
"dev": "pre-commit",
},
package_data={"rich_click": ["py.typed"]},
)
|
sdk/python/tests/test_benchmark_collections.py | rpatil524/arvados | 222 | 12678843 | # Copyright (C) The Arvados Authors. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
from __future__ import absolute_import
import arvados
import sys
from . import run_test_server
from . import arvados_testutil as tutil
from . import manifest_examples
from .performance.performance_profiler import profiled
class CollectionBenchmark(run_test_server.TestCaseWithServers,
tutil.ArvadosBaseTestCase,
manifest_examples.ManifestExamples):
MAIN_SERVER = {}
TEST_BLOCK_SIZE = 0
@classmethod
def list_recursive(cls, coll, parent_name=None):
if parent_name is None:
current_name = coll.stream_name()
else:
current_name = '{}/{}'.format(parent_name, coll.name)
try:
for name in coll:
for item in cls.list_recursive(coll[name], current_name):
yield item
except TypeError:
yield current_name
@classmethod
def setUpClass(cls):
super(CollectionBenchmark, cls).setUpClass()
run_test_server.authorize_with('active')
cls.api_client = arvados.api('v1')
cls.keep_client = arvados.KeepClient(api_client=cls.api_client,
local_store=cls.local_store)
@profiled
def profile_new_collection_from_manifest(self, manifest_text):
return arvados.collection.Collection(manifest_text)
@profiled
def profile_new_collection_from_server(self, uuid):
return arvados.collection.Collection(uuid)
@profiled
def profile_new_collection_copying_bytes_from_collection(self, src):
dst = arvados.collection.Collection()
with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):
for name in self.list_recursive(src):
with src.open(name, 'rb') as srcfile, dst.open(name, 'wb') as dstfile:
dstfile.write(srcfile.read())
dst.save_new()
@profiled
def profile_new_collection_copying_files_from_collection(self, src):
dst = arvados.collection.Collection()
with tutil.mock_keep_responses('x'*self.TEST_BLOCK_SIZE, 200):
for name in self.list_recursive(src):
dst.copy(name, name, src)
dst.save_new()
@profiled
def profile_collection_list_files(self, coll):
return sum(1 for name in self.list_recursive(coll))
def test_medium_sized_manifest(self):
"""Exercise manifest-handling code.
Currently, this test puts undue emphasis on some code paths
that don't reflect typical use because the contrived example
manifest has some unusual characteristics:
* Block size is zero.
* Every block is identical, so block caching patterns are
unrealistic.
* Every file begins and ends at a block boundary.
"""
specs = {
'streams': 100,
'files_per_stream': 100,
'blocks_per_file': 20,
'bytes_per_block': self.TEST_BLOCK_SIZE,
}
my_manifest = self.make_manifest(**specs)
coll = self.profile_new_collection_from_manifest(my_manifest)
coll.save_new()
self.profile_new_collection_from_server(coll.manifest_locator())
num_items = self.profile_collection_list_files(coll)
self.assertEqual(num_items, specs['streams'] * specs['files_per_stream'])
self.profile_new_collection_copying_bytes_from_collection(coll)
self.profile_new_collection_copying_files_from_collection(coll)
|
deps/cld/binding.gyp | LaudateCorpus1/node-cld | 206 | 12678848 | <reponame>LaudateCorpus1/node-cld
{
"targets": [
{
"target_name": "cld-c",
"type": "static_library",
"include_dirs": [
"internal",
],
"sources": [
"internal/cldutil.cc",
"internal/cldutil_shared.cc",
"internal/compact_lang_det.cc",
"internal/compact_lang_det_hint_code.cc",
"internal/compact_lang_det_impl.cc",
"internal/debug.cc",
"internal/fixunicodevalue.cc",
"internal/generated_entities.cc",
"internal/generated_language.cc",
"internal/generated_ulscript.cc",
"internal/getonescriptspan.cc",
"internal/lang_script.cc",
"internal/offsetmap.cc",
"internal/scoreonescriptspan.cc",
"internal/tote.cc",
"internal/utf8statetable.cc",
"internal/cld_generated_cjk_uni_prop_80.cc",
"internal/cld2_generated_cjk_compatible.cc",
"internal/cld_generated_cjk_delta_bi_32.cc",
"internal/generated_distinct_bi_0.cc",
"internal/cld2_generated_quad0122.cc",
"internal/cld2_generated_deltaocta0122.cc",
"internal/cld2_generated_deltaoctachrome.cc",
"internal/cld2_generated_distinctocta0122.cc",
"internal/cld2_generated_distinctoctachrome.cc",
"internal/cld2_generated_quadchrome_16.cc",
"internal/cld2_generated_quadchrome_2.cc",
"internal/cld_generated_score_quad_octa_0122.cc",
"internal/cld_generated_score_quad_octa_2.cc"
],
"defines": [],
"cflags_cc": ["-w", "-std=gnu++98"],
"cflags_cc!": ["-std=gnu++0x"],
"link_settings" : {
"ldflags": ["-z", "muldefs"]
},
"xcode_settings": {
"OTHER_CFLAGS": ["-w"],
'CLANG_CXX_LANGUAGE_STANDARD': 'c++98'
}
}
]
}
|
examples/eratosthenes/sieve_cpython.py | bennn/PyonR | 132 | 12678859 | <filename>examples/eratosthenes/sieve_cpython.py
def sieve(n):
primes = [True] * (n+1)
counter = 0
for i in range(2,n):
if primes[i]:
counter = counter + 1
for j in range(i*i, n, i):
primes[j] = False
return counter
import time
a = time.time()
print sieve(10000000)
b=time.time()
print b-a, 'seconds' |
pythonforandroid/recipes/babel/__init__.py | lstwzd/python-for-android | 6,278 | 12678861 | from pythonforandroid.recipe import PythonRecipe
class BabelRecipe(PythonRecipe):
name = 'babel'
version = '2.2.0'
url = 'https://pypi.python.org/packages/source/B/Babel/Babel-{version}.tar.gz'
depends = ['setuptools', 'pytz']
call_hostpython_via_targetpython = False
install_in_hostpython = True
recipe = BabelRecipe()
|
recognition/valid.py | w-garcia/insightface | 108 | 12678884 | <reponame>w-garcia/insightface
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sys
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import yaml
from recognition.backbones.resnet_v1 import ResNet_v1_50
from recognition.data.generate_data import GenerateData
from recognition.models.models import MyModel
from recognition.predict import get_embeddings
tf.enable_eager_execution()
class Valid_Data:
def __init__(self, model, data):
self.model = model
self.data = data
@staticmethod
def _cal_cos_sim(emb1, emb2):
return tf.reduce_sum(emb1 * emb2, axis=-1)
def _get_sim_label(self):
sims = None
labels = None
for image1, image2, label in self.data:
emb1 = get_embeddings(self.model, image1)
emb2 = get_embeddings(self.model, image2)
sim = self._cal_cos_sim(emb1, emb2)
if sims is None:
sims = sim
else:
sims = tf.concat([sims, sim], axis=0)
if labels is None:
labels = label
else:
labels = tf.concat([labels, label], axis=0)
return sims, labels
@staticmethod
def _cal_metric(sim, label, thresh):
tp = tn = fp = fn = 0
predict = tf.greater_equal(sim, thresh)
for i in range(len(predict)):
if predict[i] and label[i]:
tp += 1
elif predict[i] and not label[i]:
fp += 1
elif not predict[i] and label[i]:
fn += 1
else:
tn += 1
acc = (tp + tn) / len(predict)
p = 0 if tp + fp == 0 else tp / (tp + fp)
r = 0 if tp + fn == 0 else tp / (tp + fn)
fpr = 0 if fp + tn == 0 else fp / (fp + tn)
return acc, p, r, fpr
def _cal_metric_fpr(self, sim, label, below_fpr=0.001):
acc = p = r = thresh = 0
for t in np.linspace(-1, 1, 100):
thresh = t
acc, p, r, fpr = self._cal_metric(sim, label, thresh)
if fpr <= below_fpr:
break
return acc, p, r, thresh
def get_metric(self, thresh=0.2, below_fpr=0.001):
sim, label = self._get_sim_label()
acc, p, r, fpr = self._cal_metric(sim, label, thresh)
acc_fpr, p_fpr, r_fpr, thresh_fpr = self._cal_metric_fpr(sim, label, below_fpr)
return acc, p, r, fpr, acc_fpr, p_fpr, r_fpr, thresh_fpr
def draw_curve(self):
P = []
R = []
TPR = []
FPR = []
sim, label = self._get_sim_label()
for thresh in np.linspace(-1, 1, 100):
acc, p, r, fpr = self._cal_metric(sim, label, thresh)
P.append(p)
R.append(r)
TPR.append(r)
FPR.append(fpr)
plt.axis([0, 1, 0, 1])
plt.xlabel("R")
plt.ylabel("P")
plt.plot(R, P, color="r", linestyle="--", marker="*", linewidth=1.0)
plt.show()
plt.axis([0, 1, 0, 1])
plt.xlabel("FRP")
plt.ylabel("TPR")
plt.plot(FPR, TPR, color="r", linestyle="--", marker="*", linewidth=1.0)
plt.show()
def parse_args(argv):
parser = argparse.ArgumentParser(description='valid model')
parser.add_argument('--config_path', type=str, help='path to config path', default='configs/config.yaml')
args = parser.parse_args(argv)
return args
def main():
args = parse_args(sys.argv[1:])
# logger.info(args)
with open(args.config_path) as cfg:
config = yaml.load(cfg, Loader=yaml.FullLoader)
gd = GenerateData(config)
valid_data = gd.get_val_data(config['valid_num'])
model = MyModel(ResNet_v1_50, embedding_size=config['embedding_size'])
import os
ckpt_dir = os.path.expanduser(config['ckpt_dir'])
ckpt = tf.train.Checkpoint(backbone=model.backbone)
ckpt.restore(tf.train.latest_checkpoint(ckpt_dir)).expect_partial()
print("Restored from {}".format(tf.train.latest_checkpoint(ckpt_dir)))
vd = Valid_Data(model, valid_data)
acc, p, r, fpr, acc_fpr, p_fpr, r_fpr, thresh_fpr = vd.get_metric(0.2, 0.001)
print(acc, p, r, fpr, acc_fpr, p_fpr, r_fpr, thresh_fpr)
vd.draw_curve()
if __name__ == '__main__':
# logger.info("hello, insightface/recognition")
main()
|
fuzzers/ECP5/103-gsr/fuzzer.py | Keno/prjtrellis | 256 | 12678896 | from fuzzconfig import FuzzConfig
import nonrouting
import pytrellis
import fuzzloops
import interconnect
cfg = FuzzConfig(job="GSR", family="ECP5", device="LFE5U-45F", ncl="empty.ncl",
tiles=["MIB_R71C4:EFB0_PICB0", "MIB_R34C41:VIQ_BUF"])
def get_substs(gsrmode="ACTIVE_LOW", syncmode="NONE"):
if gsrmode == "NONE":
comment = "//"
else:
comment = ""
if syncmode == "NONE":
syncmode = "#OFF"
return dict(comment=comment, gsrmode=gsrmode, syncmode=syncmode)
def main():
pytrellis.load_database("../../../database")
cfg.setup()
empty_bitfile = cfg.build_design(cfg.ncl, {})
cfg.ncl = "gsr.ncl"
nonrouting.fuzz_enum_setting(cfg, "GSR.GSRMODE", ["NONE", "ACTIVE_LOW", "ACTIVE_HIGH"],
lambda x: get_substs(gsrmode=x), empty_bitfile, False)
nonrouting.fuzz_enum_setting(cfg, "GSR.SYNCMODE", ["NONE", "ASYNC", "SYNC"],
lambda x: get_substs(syncmode=x), empty_bitfile, False)
for rcfg, rc, prefix in [
(FuzzConfig(job="GSR", family="ECP5", device="LFE5U-25F", ncl="gsr_routing_25k.ncl",
tiles=["MIB_R50C4:EFB0_PICB0"]), "R49C4", "25K_"),
(FuzzConfig(job="GSR", family="ECP5", device="LFE5U-45F", ncl="gsr_routing.ncl",
tiles=["MIB_R71C4:EFB0_PICB0"]), "R70C4", "45K_"),
(FuzzConfig(job="GSR", family="ECP5", device="LFE5U-85F", ncl="gsr_routing_85k.ncl",
tiles=["MIB_R95C4:EFB0_PICB0"]), "R94C4", "85K_"),
]:
rcfg.setup()
interconnect.fuzz_interconnect_with_netnames(
rcfg,
["{}_JGSR_GSR".format(rc), "{}_JCLK_GSR".format(rc)],
bidir=True,
nonlocal_prefix=prefix
)
if __name__ == "__main__":
main()
|
tests/st/ops/ascend/vector/test_quantized_avg_pool_001.py | tianjiashuo/akg | 286 | 12678897 | <gh_stars>100-1000
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""quantized_avg_pool test case"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.quantized_avg_pool_run import quantized_avg_pool_run
class TestQuantizedAvgPool(TestBase):
"""test case class for quantized_avg_pool"""
def setup(self):
case_name = "test_akg_quantized_avg_pool_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
"""setup case parameters for test"""
self.caseresult = True
self._log.info("=================%s Setup case=================", self.casename)
self.testarg_mini = [
# testflag, opfunc, (shape, dtype1, shape_list, dtype2,
# ksize, strides, padding, data_format,
# quant_algo, scale_mode, scale_sqrt), dimArgs
("qavgpool_mini_01", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "VALID", "NC1HWC0",
[1, 0], 2, 0)),
("qavgpool_mini_02", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 4, 4), (1, 1, 3, 3), "VALID", "NCHW", [1, 0], 2, 0)),
("qavgpool_mini_03", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 4, 4, 1), (1, 3, 3, 1), "VALID", "NHWC", [1, 0], 2, 0)),
("qavgpool_mini_04", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", None, None,
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "VALID", "NC1HWC0",
None, None, None)),
("qavgpool_mini_05", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "VALID", "NC1HWC0",
[0, 0], 2, 0)),
("qavgpool_mini_06", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "SAME", "NC1HWC0",
[1, 0], 2, 0)),
("qavgpool_mini_07", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 4, 4), (1, 1, 3, 3), "SAME", "NCHW", [1, 0], 2, 0)),
("qavgpool_mini_08", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 4, 4, 1), (1, 3, 3, 1), "SAME", "NHWC", [1, 0], 2, 0)),
("qavgpool_mini_09", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", None, None,
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "SAME", "NC1HWC0",
None, None, None)),
("qavgpool_mini_10", quantized_avg_pool_run, (
(1, 1, 16, 16, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "SAME", "NC1HWC0",
[0, 0], 2, 0)),
]
self.testarg_cloud = [
("qavgpool_mini_05", quantized_avg_pool_run, (
(1, 1, 64, 64, 16), "float16", None, None,
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "VALID", "NC1HWC0",
None, None, None)),
("qavgpool_mini_05", quantized_avg_pool_run, (
(1, 1, 64, 64, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 4, 4, 1), (1, 1, 3, 3, 1), "VALID", "NC1HWC0",
[0, 0], 2, 0)),
("qavgpool_cld_big", quantized_avg_pool_run, (
(32, 4, 112, 112, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 3, 3, 1), (1, 1, 2, 2, 1), "SAME", "NC1HWC0",
[0, 0], 2, 0)),
("qavgpool_cld_big", quantized_avg_pool_run, (
(32, 4, 112, 112, 16), "float16", ((1,), (1,)), "float16",
(1, 1, 3, 3, 1), (1, 1, 2, 2, 1), "SAME", "NC1HWC0",
[1, 0], 2, 0)),
]
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_mini_run(self):
"""run case for mini"""
self.common_run(self.testarg_mini[0:3])
def test_cloud_run(self):
"""run case for cloud"""
self.common_run(self.testarg_cloud)
def teardown(self):
"""clean environment"""
self._log.info("=============%s Teardown===========", self.casename)
|
fengshen/data/cbart_dataloader/cbart_dataset.py | TianHongZXY/Fengshenbang-LM | 265 | 12678901 | import os
import time
import torch
import glob
import numpy as np
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
from dataclasses import dataclass
from transformers.data.data_collator import DataCollatorMixin
from fengshen.data.MMapIndexDataset import MMapIndexDataset
def safe_check(a, type='uint8'):
d = {'uint8': [0, 255],
'uint16': [0, 65535]
}
range = d[type]
for l in a:
for e in l:
assert e >= range[0] and e <= range[1]
@dataclass
class CBartDataCollator(DataCollatorMixin):
tokenizer: None
return_tensors: str = "pt"
def __init__(self, args):
self.masked_lm = args.masked_lm
self.encoder_loss_type = args.encoder_loss_type
@staticmethod
def create_decoder_inputs(encoder_inputs, encoder_labels, mask_token_id):
"""
:param encoder_inputs: list, each element is an int
:param encoder_labels: list, each element is an int
:return:
"""
decoder_inputs = []
for i, l in zip(encoder_inputs, encoder_labels):
if l == 0:
decoder_inputs.append(i)
elif l == 1:
decoder_inputs.append(mask_token_id)
else:
decoder_inputs += [mask_token_id] * (l - 1)
decoder_inputs.append(i)
return torch.tensor(decoder_inputs, dtype=torch.long)
@staticmethod
def torch_call(self, features):
encoder_inputs = [s[0] for s in features]
encoder_labels = [s[1] for s in features]
decoder_labels = [s[2] for s in features]
# Mask to avoid performing attention on padding token indices in encoder_inputs.
_mask = pad_sequence(
encoder_inputs, batch_first=True, padding_value=-100)
attention_mask = torch.zeros(_mask.shape, dtype=torch.float32)
attention_mask = attention_mask.masked_fill(_mask != -100, 1)
encoder_inputs = pad_sequence(encoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
encoder_labels = pad_sequence(
encoder_labels, batch_first=True, padding_value=-100)
if self.encoder_loss_type == 1: # labels for mse loss
encoder_labels = encoder_labels.float()
decoder_labels = pad_sequence(
decoder_labels, batch_first=True, padding_value=-100)
# avoid computing loss on the first token, i.e. bos_token
decoder_labels[:, 0] = -100
# this method is for non-autoregressive decoding.
decoder_inputs = [self.create_decoder_inputs(
s[0], s[1], self.tokenizer.mask_token_id) for s in features]
# replace the eos_token_id with pad_token_id
for i, _ in enumerate(decoder_inputs):
decoder_inputs[i][-1] = self.tokenizer.pad_token_id
decoder_inputs = pad_sequence(decoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
# create decoder_inputs by shifting the decoder_labels right,
_tmp = decoder_inputs.clone()
decoder_inputs[:, 1:] = _tmp[:, :-1]
decoder_inputs[:, 0] = self.tokenizer.eos_token_id
# construct labels for masked lm loss
masked_lm_labels = decoder_labels.clone()
masked_lm_labels[_tmp != self.tokenizer.mask_token_id] = -100
if self.masked_lm:
decoder_labels = masked_lm_labels
return {
"input_ids": encoder_inputs,
"encoder_labels": encoder_labels,
"decoder_input_ids": decoder_inputs,
"labels": decoder_labels,
"attention_mask": attention_mask,
}
class BARTDataset(Dataset):
def __init__(self, dataset, mode, tokenizer=None, num_labels=-1, insert_mode=-1, max_sentence_length=40,
encoder_loss_type=0, statistics=True):
self.encoder_loss_type = encoder_loss_type
assert mode in ["train", "test", 'dev']
self.mode = mode
if self.mode == 'test' or self.mode == 'dev':
self.is_train = False
else:
self.is_train = True
self.tokenizer = tokenizer
self.max_sentence_length = max_sentence_length + 2 # the bos and eos tokens
self.input_dataset = []
self.encoder_labels_dataset = []
self.decoder_labels_dataset = []
data_dict_path_format = '/cognitive_comp/gaoxinyu/data/{}/{}_synthetic_max_insert_label{}_insert_mode{}_*.pt'.format(
dataset, mode, num_labels - 2, insert_mode)
data_dict_paths = glob.glob(data_dict_path_format)
for data_dict_path in data_dict_paths:
if os.path.exists(data_dict_path):
print(f'''Loading data from {data_dict_path}''', flush=True)
filename = ''.join(data_dict_path.rsplit('.pt', 1))
self.input_dataset += [MMapIndexDataset(filename + "_incorrect_input_ids_list")]
self.encoder_labels_dataset += [MMapIndexDataset(
filename + "_label_ids_list")]
self.decoder_labels_dataset += [MMapIndexDataset(
filename + "_target_ids_list")]
else:
print(
f'Please create the synthetic datafile {data_dict_path} with create_synthetic_data.py.')
self.len = 0
for ds in self.input_dataset:
self.len += len(ds)
# TODO make sure the encoder loss weighting logic applys to every rank !
if statistics:
# print('Statistics for sentence length:')
# lengths = [len(e) for e in self.decoder_labels]
# (unique, counts) = np.unique(lengths, return_counts=True)
# for k, v in zip(unique,counts):
# print(f'sentence length{k}: {v}')
# print('Statistics for sentence labels:')
labels = []
# too slow!!
# for ds in self.encoder_labels_dataset:
# for i in range(0, len(ds)):
# labels.extend(ds.__getitem__(i))
# use only one dataset to calc
for i in self.encoder_labels_dataset[0]:
labels.extend(i)
print(len(labels))
(unique, counts) = np.unique(labels, return_counts=True)
all_label_counts = 0
for k, v in zip(unique, counts):
print(f'Label {k}: {v}')
all_label_counts += v
# ZZ: calculate weights for differnet labels, labels with higher numbers get lower weights proportionally!
revert_label_weights = 1 / \
np.array([v / all_label_counts for k, v in zip(unique, counts)])
self.label_weights = revert_label_weights / \
np.sum(revert_label_weights)
else:
# ZZ: if statistics is not triggered, manually assign weights to different class
if num_labels == 7:
# the cross entropy loss weighst does not need to sum to 1
self.label_weights = [0.01, 0.05, 0.1, 0.1, 0.5, 0.5, 0.5]
else:
self.label_weights = [1 / num_labels] * num_labels
print(f"label weights for encoder will be {self.label_weights}")
def __getitem__(self, idx):
for i in range(0, len(self.input_dataset)):
if idx >= len(self.input_dataset[i]):
idx -= len(self.input_dataset[i])
else:
break
return torch.tensor(self.input_dataset[i].__getitem__(idx), dtype=torch.long), \
torch.tensor(self.encoder_labels_dataset[i].__getitem__(idx), dtype=torch.long), \
torch.tensor(self.decoder_labels_dataset[i].__getitem__(idx), dtype=torch.long)
def __len__(self):
return self.len
def create_decoder_inputs(self, encoder_inputs, encoder_labels, mask_token_id):
"""
:param encoder_inputs: list, each element is an int
:param encoder_labels: list, each element is an int
:return:
"""
decoder_inputs = []
for i, l in zip(encoder_inputs, encoder_labels):
if l == 0:
decoder_inputs.append(i)
elif l == 1:
decoder_inputs.append(mask_token_id)
else:
decoder_inputs += [mask_token_id] * (l - 1)
decoder_inputs.append(i)
return torch.tensor(decoder_inputs, dtype=torch.long)
def create_mini_batch(self, samples):
encoder_inputs = [s[0] for s in samples]
encoder_labels = [s[1] for s in samples]
decoder_labels = [s[2] for s in samples]
# Mask to avoid performing attention on padding token indices in encoder_inputs.
_mask = pad_sequence(encoder_inputs, batch_first=True, padding_value=-100)
attention_mask = torch.zeros(_mask.shape, dtype=torch.float32)
attention_mask = attention_mask.masked_fill(_mask != -100, 1)
encoder_inputs = pad_sequence(encoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
encoder_labels = pad_sequence(encoder_labels, batch_first=True, padding_value=-100)
if self.encoder_loss_type == 1: # labels for mse loss
encoder_labels = encoder_labels.float()
decoder_labels = pad_sequence(decoder_labels, batch_first=True, padding_value=-100)
# avoid computing loss on the first token, i.e. bos_token
decoder_labels[:, 0] = -100
# this method is for non-autoregressive decoding.
decoder_inputs = [self.create_decoder_inputs(
s[0], s[1], self.tokenizer.mask_token_id) for s in samples]
# replace the eos_token_id with pad_token_id
for i, _ in enumerate(decoder_inputs):
decoder_inputs[i][-1] = self.tokenizer.pad_token_id
decoder_inputs = pad_sequence(decoder_inputs, batch_first=True,
padding_value=self.tokenizer.pad_token_id)
# create decoder_inputs by shifting the decoder_labels right,
_tmp = decoder_inputs.clone()
decoder_inputs[:, 1:] = _tmp[:, :-1]
decoder_inputs[:, 0] = self.tokenizer.eos_token_id
# construct labels for masked lm loss
masked_lm_labels = decoder_labels.clone()
masked_lm_labels[_tmp != self.tokenizer.mask_token_id] = -100
return {
"input_ids": encoder_inputs,
"encoder_labels": encoder_labels,
"decoder_input_ids": decoder_inputs,
"labels": decoder_labels,
"attention_mask": attention_mask,
}
def get_train_dev_dataset(args, tokenizer):
trainset = BARTDataset(
args.dataset, "train", tokenizer=tokenizer, num_labels=args.num_labels,
insert_mode=args.insert_mode, encoder_loss_type=args.encoder_loss_type)
testset = BARTDataset(
args.dataset, mode='dev', tokenizer=tokenizer, num_labels=args.num_labels,
insert_mode=args.insert_mode, encoder_loss_type=args.encoder_loss_type)
return trainset, testset
|
torchgan/logging/visualize.py | torchgan/torchgan | 1,300 | 12678930 | <reponame>torchgan/torchgan<filename>torchgan/logging/visualize.py
import torch
import torchvision
from ..models.model import Discriminator, Generator
from .backends import *
if TENSORBOARD_LOGGING == 1:
from tensorboardX import SummaryWriter
if VISDOM_LOGGING == 1:
import visdom
__all__ = [
"Visualize",
"LossVisualize",
"MetricVisualize",
"GradientVisualize",
"ImageVisualize",
]
class Visualize(object):
r"""Base class for all Visualizations.
Args:
visualize_list (list, optional): List of the functions needed for visualization.
visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be
manually started at this port else an error will be thrown and the code will crash.
This is ignored if ``VISDOM_LOGGING`` is ``0``.
log_dir (str, optional): Directory where TensorboardX should store the logs. This is
ignored if ``TENSORBOARD_LOGGING`` is ``0``.
writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you
don't want to start a new SummaryWriter.
"""
def __init__(
self, visualize_list, visdom_port=8097, log_dir=None, writer=None
):
self.logs = {}
for item in visualize_list:
name = type(item).__name__
self.logs[name] = []
self.step = 1
if TENSORBOARD_LOGGING == 1:
self._build_tensorboard(log_dir, writer)
if VISDOM_LOGGING == 1:
self._build_visdom(visdom_port)
def _build_tensorboard(self, log_dir, writer):
r"""Starts the tensorboard logging utilities.
Args:
log_dir (str, optional): Directory where TensorboardX should store the logs.
writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you
don't want to start a new SummaryWriter.
"""
self.writer = SummaryWriter(log_dir) if writer is None else writer
def _build_visdom(self, port):
r"""Starts the visdom logging utilities.
Args:
port (int, optional): Port to log using ``visdom``. A deafult server is started at port
``8097``. So manually a new server has to be started if the post is changed.
"""
self.vis = visdom.Visdom(port=port)
def step_update(self):
r"""Helper function which updates the step at the end of
one print iteration.
"""
self.step += 1
def log_tensorboard(self):
r"""Tensorboard logging function. Needs to be defined in the subclass
:raises NotImplementedError:
"""
raise NotImplementedError
def log_console(self):
r"""Console logging function. Needs to be defined in the subclass
:raises NotImplementedError:
"""
raise NotImplementedError
def log_visdom(self):
r"""Visdom logging function. Needs to be defined in the subclass
:raises NotImplementedError:
"""
raise NotImplementedError
def __call__(
self,
*args,
lock_console=False,
lock_tensorboard=False,
lock_visdom=False,
**kwargs
):
if not lock_console and CONSOLE_LOGGING == 1:
self.log_console(*args, **kwargs)
if not lock_tensorboard and TENSORBOARD_LOGGING == 1:
self.log_tensorboard(*args, **kwargs)
if not lock_visdom and VISDOM_LOGGING == 1:
self.log_visdom(*args, **kwargs)
self.step_update()
class LossVisualize(Visualize):
r"""This class provides the Visualizations for Generator and Discriminator Losses.
Args:
visualize_list (list, optional): List of the functions needed for visualization.
visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be
manually started at this port else an error will be thrown and the code will crash.
This is ignored if ``VISDOM_LOGGING`` is ``0``.
log_dir (str, optional): Directory where TensorboardX should store the logs. This is
ignored if ``TENSORBOARD_LOGGING`` is ``0``.
writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you
don't want to start a new SummaryWriter.
"""
def log_tensorboard(self, running_losses):
r"""Tensorboard logging function. This function logs the following:
- ``Running Discriminator Loss``
- ``Running Generator Loss``
- ``Running Losses``
- Loss Values of the individual Losses.
Args:
running_losses (dict): A dict with 2 items namely, ``Running Discriminator Loss``,
and ``Running Generator Loss``.
"""
self.writer.add_scalar(
"Running Discriminator Loss",
running_losses["Running Discriminator Loss"],
self.step,
)
self.writer.add_scalar(
"Running Generator Loss",
running_losses["Running Generator Loss"],
self.step,
)
self.writer.add_scalars("Running Losses", running_losses, self.step)
for name, value in self.logs.items():
val = value[-1]
if type(val) is tuple:
self.writer.add_scalar(
"Losses/{}-Generator".format(name), val[0], self.step
)
self.writer.add_scalar(
"Losses/{}-Discriminator".format(name), val[1], self.step
)
else:
self.writer.add_scalar(
"Losses/{}".format(name), val, self.step
)
def log_console(self, running_losses):
r"""Console logging function. This function logs the mean ``generator`` and ``discriminator``
losses.
Args:
running_losses (dict): A dict with 2 items namely, ``Running Discriminator Loss``,
and ``Running Generator Loss``.
"""
for name, val in running_losses.items():
print("Mean {} : {}".format(name, val))
def log_visdom(self, running_losses):
r"""Visdom logging function. This function logs the following:
- ``Running Discriminator Loss``
- ``Running Generator Loss``
- ``Running Losses``
- Loss Values of the individual Losses.
Args:
running_losses (dict): A dict with 2 items namely, ``Running Discriminator Loss``,
and ``Running Generator Loss``.
"""
self.vis.line(
[running_losses["Running Discriminator Loss"]],
[self.step],
win="Running Discriminator Loss",
update="append",
opts=dict(
title="Running Discriminator Loss",
xlabel="Time Step",
ylabel="Running Loss",
),
)
self.vis.line(
[running_losses["Running Generator Loss"]],
[self.step],
win="Running Generator Loss",
update="append",
opts=dict(
title="Running Generator Loss",
xlabel="Time Step",
ylabel="Running Loss",
),
)
self.vis.line(
[
[
running_losses["Running Discriminator Loss"],
running_losses["Running Generator Loss"],
]
],
[self.step],
win="Running Losses",
update="append",
opts=dict(
title="Running Losses",
xlabel="Time Step",
ylabel="Running Loss",
legend=["Discriminator", "Generator"],
),
)
for name, value in self.logs.items():
val = value[-1]
if type(val) is tuple:
name1 = "{}-Generator".format(name)
name2 = "{}-Discriminator".format(name)
self.vis.line(
[val[0]],
[self.step],
win=name1,
update="append",
opts=dict(
title=name1, xlabel="Time Step", ylabel="Loss Value"
),
)
self.vis.line(
[val[1]],
[self.step],
win=name2,
update="append",
opts=dict(
title=name2, xlabel="Time Step", ylabel="Loss Value"
),
)
else:
self.vis.line(
[val],
[self.step],
win=name,
update="append",
opts=dict(
title=name, xlabel="Time Step", ylabel="Loss Value"
),
)
def __call__(self, trainer, **kwargs):
running_generator_loss = (
trainer.loss_information["generator_losses"]
/ trainer.loss_information["generator_iters"]
)
running_discriminator_loss = (
trainer.loss_information["discriminator_losses"]
/ trainer.loss_information["discriminator_iters"]
)
running_losses = {
"Running Discriminator Loss": running_discriminator_loss,
"Running Generator Loss": running_generator_loss,
}
super(LossVisualize, self).__call__(running_losses, **kwargs)
class MetricVisualize(Visualize):
r"""This class provides the Visualizations for Metrics.
Args:
visualize_list (list, optional): List of the functions needed for visualization.
visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be
manually started at this port else an error will be thrown and the code will crash.
This is ignored if ``VISDOM_LOGGING`` is ``0``.
log_dir (str, optional): Directory where TensorboardX should store the logs. This is
ignored if ``TENSORBOARD_LOGGING`` is ``0``.
writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you
don't want to start a new SummaryWriter.
"""
def log_tensorboard(self):
r"""Tensorboard logging function. This function logs the values of the individual metrics."""
for name, value in self.logs.items():
self.writer.add_scalar(
"Metrics/{}".format(name), value[-1], self.step
)
def log_console(self):
r"""Console logging function. This function logs the mean metrics."""
for name, val in self.logs.items():
print("{} : {}".format(name, val[-1]))
def log_visdom(self):
r"""Visdom logging function. This function logs the values of the individual metrics."""
for name, value in self.logs.items():
self.vis.line(
[value[-1]],
[self.step],
win=name,
update="append",
opts=dict(
title=name, xlabel="Time Step", ylabel="Metric Value"
),
)
class GradientVisualize(Visualize):
r"""This class provides the Visualizations for the Gradients.
Args:
visualize_list (list, optional): List of the functions needed for visualization.
visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be
manually started at this port else an error will be thrown and the code will crash.
This is ignored if ``VISDOM_LOGGING`` is ``0``.
log_dir (str, optional): Directory where TensorboardX should store the logs. This is
ignored if ``TENSORBOARD_LOGGING`` is ``0``.
writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you
don't want to start a new SummaryWriter.
"""
def __init__(
self, visualize_list, visdom_port=8097, log_dir=None, writer=None
):
if visualize_list is None or len(visualize_list) == 0:
raise Exception("Gradient Visualizer requires list of model names")
self.logs = {}
for item in visualize_list:
self.logs[item] = [0.0]
self.step = 1
if TENSORBOARD_LOGGING == 1:
self._build_tensorboard(log_dir, writer)
if VISDOM_LOGGING == 1:
self._build_visdom(visdom_port)
def log_tensorboard(self, name):
r"""Tensorboard logging function. This function logs the values of the individual gradients.
Args:
name (str): Name of the model whose gradients are to be logged.
"""
self.writer.add_scalar(
"Gradients/{}".format(name),
self.logs[name][len(self.logs[name]) - 1],
self.step,
)
def log_console(self, name):
r"""Console logging function. This function logs the mean gradients.
Args:
name (str): Name of the model whose gradients are to be logged.
"""
print(
"{} Gradients : {}".format(
name, self.logs[name][len(self.logs[name]) - 1]
)
)
def log_visdom(self, name):
r"""Visdom logging function. This function logs the values of the individual gradients.
Args:
name (str): Name of the model whose gradients are to be logged.
"""
self.vis.line(
[self.logs[name][len(self.logs[name]) - 1]],
[self.step],
win=name,
update="append",
opts=dict(title=name, xlabel="Time Step", ylabel="Gradient"),
)
def update_grads(self, name, model, eps=1e-5):
r"""Updates the gradient logs.
Args:
name (str): Name of the model.
model (torch.nn.Module): Either a ``torchgan.models.Generator`` or a
``torchgan.models.Discriminator`` or their subclass.
eps (float, optional): Tolerance value.
"""
gradsum = 0.0
for p in model.parameters():
if p.grad is not None:
gradsum += torch.sum(p.grad ** 2).clone().item()
if gradsum > eps:
self.logs[name][len(self.logs[name]) - 1] += gradsum
model.zero_grad()
def report_end_epoch(self):
r"""Prints to the console at the end of the epoch."""
if CONSOLE_LOGGING == 1:
for key, val in self.logs.items():
print(
"{} Mean Gradients : {}".format(key, sum(val) / len(val))
)
def __call__(self, trainer, **kwargs):
for name in trainer.model_names:
super(GradientVisualize, self).__call__(name, **kwargs)
self.logs[name].append(0.0)
class ImageVisualize(Visualize):
r"""This class provides the Logging for the Images.
Args:
trainer (torchgan.trainer.Trainer): The base trainer used for training.
visdom_port (int, optional): Port to log using ``visdom``. The visdom server needs to be
manually started at this port else an error will be thrown and the code will crash.
This is ignored if ``VISDOM_LOGGING`` is ``0``.
log_dir (str, optional): Directory where TensorboardX should store the logs. This is
ignored if ``TENSORBOARD_LOGGING`` is ``0``.
writer (tensorboardX.SummaryWriter, optonal): Send a `SummaryWriter` if you
don't want to start a new SummaryWriter.
test_noise (torch.Tensor, optional): If provided then it will be used as the noise for image
sampling.
nrow (int, optional): Number of rows in which the image is to be stored.
"""
def __init__(
self,
trainer,
visdom_port=8097,
log_dir=None,
writer=None,
test_noise=None,
nrow=8,
):
super(ImageVisualize, self).__init__(
[], visdom_port=visdom_port, log_dir=log_dir, writer=writer
)
self.test_noise = []
for model in trainer.model_names:
if isinstance(getattr(trainer, model), Generator):
self.test_noise.append(
getattr(trainer, model).sampler(
trainer.sample_size, trainer.device
)
if test_noise is None
else test_noise
)
self.step = 1
self.nrow = nrow
def log_tensorboard(self, trainer, image, model):
r"""Logs a generated image in tensorboard at the end of an epoch.
Args:
trainer (torchgan.trainer.Trainer): The base trainer used for training.
image (Image): The generated image.
model (str): The name of the model which generated the ``image``.
"""
self.writer.add_image(
"Generated Samples/{}".format(model), image, self.step
)
def log_console(self, trainer, image, model):
r"""Saves a generated image at the end of an epoch. The path where the image is
being stored is controlled by the ``trainer``.
Args:
trainer (torchgan.trainer.Trainer): The base trainer used for training.
image (Image): The generated image.
model (str): The name of the model which generated the ``image``.
"""
save_path = "{}/epoch{}_{}.png".format(trainer.recon, self.step, model)
print("Generating and Saving Images to {}".format(save_path))
torchvision.utils.save_image(image, save_path)
def log_visdom(self, trainer, image, model):
r"""Logs a generated image in visdom at the end of an epoch.
Args:
trainer (torchgan.trainer.Trainer): The base trainer used for training.
image (Image): The generated image.
model (str): The name of the model which generated the ``image``.
"""
self.vis.image(
image, opts=dict(caption="Generated Samples/{}".format(model))
)
def __call__(self, trainer, **kwargs):
pos = 0
for model in trainer.model_names:
if isinstance(getattr(trainer, model), Generator):
generator = getattr(trainer, model)
with torch.no_grad():
image = generator(*self.test_noise[pos])
image = torchvision.utils.make_grid(
image, nrow=self.nrow, normalize=True, range=(-1, 1)
)
super(ImageVisualize, self).__call__(
trainer, image, model, **kwargs
)
self.step -= 1
pos = pos + 1
self.step += 1 if pos > 0 else 0
|
python/regex.py | honux77/practice | 152 | 12678939 | <filename>python/regex.py
import re
# simple code to extract serial from xml
# use regex and findall method
with open('keys.txt') as fp:
data = fp.read()
p = re.compile(r'\w{5}-\w{5}-\w{5}-\w{5}-\w{5}')
keys = p.findall(data)
with open('newkey.csv', 'w') as fp:
fp.writelines('win7pro-key\n')
for key in keys:
fp.write(key +'\n')
|
demos/place_recognition_demo/python/place_recognition_demo/place_recognition.py | APrigarina/open_model_zoo | 1,031 | 12678940 | <reponame>APrigarina/open_model_zoo
"""
Copyright (c) 2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
from tqdm import tqdm
from place_recognition_demo.common import crop_resize
from openvino.inference_engine import IECore # pylint: disable=no-name-in-module
class IEModel: # pylint: disable=too-few-public-methods
""" Class that allows working with Inference Engine model. """
def __init__(self, model_path, device, cpu_extension):
ie = IECore()
if cpu_extension and device == 'CPU':
ie.add_extension(cpu_extension, 'CPU')
self.net = ie.read_network(model_path, model_path.with_suffix('.bin'))
self.input_name = next(iter(self.net.input_info))
self.output_name = next(iter(self.net.outputs))
self.input_size = self.net.input_info[self.input_name].input_data.shape
self.exec_net = ie.load_network(network=self.net, device_name=device)
def predict(self, image):
''' Takes input image and returns L2-normalized embedding vector. '''
assert len(image.shape) == 4
image = np.transpose(image, (0, 3, 1, 2))
out = self.exec_net.infer(inputs={self.input_name: image})[self.output_name]
return out
class PlaceRecognition:
""" Class representing Place Recognition algorithm. """
def __init__(self, model_path, device, gallery_path, cpu_extension, gallery_size):
self.impaths = (list(gallery_path.rglob("*.jpg")))[:gallery_size or None]
self.model = IEModel(model_path, device, cpu_extension)
self.input_size = self.model.input_size[2:]
self.embeddings = self.compute_gallery_embeddings()
def compute_embedding(self, image):
''' Takes input image and computes embedding vector. '''
image = crop_resize(image, self.input_size)
embedding = self.model.predict(image)
return embedding
def search_in_gallery(self, embedding):
''' Takes input embedding vector and searches it in the gallery. '''
distances = np.linalg.norm(embedding - self.embeddings, axis=1, ord=2)
sorted_indexes = np.argsort(distances)
return sorted_indexes, distances
def compute_gallery_embeddings(self):
''' Computes embedding vectors for the gallery images. '''
images = []
for full_path in tqdm(self.impaths, desc='Reading gallery images.'):
image = cv2.imread(str(full_path))
if image is None:
print("ERROR: cannot process image, full_path =", str(full_path))
continue
image = crop_resize(image, self.input_size)
images.append(image)
embeddings = np.vstack([self.model.predict(image) for image in tqdm(
images, desc='Computing embeddings of gallery images.')])
return embeddings
|
terrascript/resource/cappyzawa/concourse.py | mjuenema/python-terrascript | 507 | 12679007 | # terrascript/resource/cappyzawa/concourse.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:14:28 UTC)
import terrascript
class concourse_team(terrascript.Resource):
pass
__all__ = [
"concourse_team",
]
|
crabageprediction/venv/Lib/site-packages/fontTools/ttLib/tables/_v_m_t_x.py | 13rianlucero/CrabAgePrediction | 38,667 | 12679011 | from fontTools import ttLib
superclass = ttLib.getTableClass("hmtx")
class table__v_m_t_x(superclass):
headerTag = 'vhea'
advanceName = 'height'
sideBearingName = 'tsb'
numberOfMetricsName = 'numberOfVMetrics'
|
StockAnalysisSystem/plugin/Collector/market_data_tushare_pro.py | SleepySoft/StockAnalysisSystem | 138 | 12679023 | <gh_stars>100-1000
import pandas as pd
import tushare as ts
from StockAnalysisSystem.core.config import TS_TOKEN
from StockAnalysisSystem.core.Utility.common import *
from StockAnalysisSystem.core.Utility.time_utility import *
from StockAnalysisSystem.core.Utility.CollectorUtility import *
# ------------------------------------------------------- Fields -------------------------------------------------------
FIELDS = {
'Market.SecuritiesInfo': {
'ts_code': 'TS代码',
'symbol': '股票代码',
'name': '股票名称',
'area': '所在地域',
'industry': '所属行业',
'fullname': '股票全称',
'enname': '英文全称',
'market': '市场类型', # 主板/中小板/创业板/科创板
'exchange': '交易所代码',
'curr_type': '交易货币',
'list_status': '上市状态', # L上市;D退市;P暂停上市
'list_date': '上市日期',
'delist_date': '退市日期',
'is_hs': '是否沪深港通标的', # N否;H沪股通;S深股通
},
'Market.IndexInfo': {
'ts_code': 'TS代码',
'name': '简称',
'fullname': '指数全称',
'market': '市场',
'publisher': '发布方',
'index_type': '指数风格',
'category': '指数类别',
'base_date': '基期',
'base_point': '基点',
'list_date': '发布日期',
'weight_rule': '加权方式',
'desc': '描述',
'exp_date': '终止日期',
},
'Market.TradeCalender': {
'exchange': '交易所', # SSE上交所;SZSE深交所
'cal_date': '日历日期',
'is_open': '是否交易', # 0休市;1交易
'pretrade_date': '上一个交易日',
},
'Market.NamingHistory': {
'ts_code': 'TS代码',
'name': '证券名称',
'start_date': '开始日期',
'end_date': '结束日期',
'ann_date': '公告日期',
'change_reason': '变更原因',
},
'Market.IndexComponent': {
'ts_code': 'TS代码',
'symbol': '股票代码',
'name': '股票名称',
'area': '所在地域',
'industry': '所属行业',
'fullname': '股票全称',
'enname': '英文全称',
'market': '市场类型', # 主板/中小板/创业板/科创板
'exchange': '交易所代码',
'curr_type': '交易货币',
'list_status': '上市状态', # L上市;D退市;P暂停上市
'list_date': '上市日期',
'delist_date': '退市日期',
'is_hs': '是否沪深港通标的', # N否;H沪股通;S深股通
},
'Market.SecuritiesTags': {
},
}
# -------------------------------------------------------- Prob --------------------------------------------------------
def plugin_prob() -> dict:
return {
'plugin_name': 'market_data_tushare_pro',
'plugin_version': '0.0.0.1',
'tags': ['tusharepro']
}
def plugin_adapt(uri: str) -> bool:
return uri in FIELDS.keys()
def plugin_capacities() -> list:
return list(FIELDS.keys())
# ----------------------------------------------------------------------------------------------------------------------
# stock_basic: https://tushare.pro/document/2?doc_id=25
def __fetch_securities_info(**kwargs) -> pd.DataFrame or None:
result = check_execute_test_flag(**kwargs)
if result is None:
pro = ts.pro_api(TS_TOKEN)
# If we specify the exchange parameter, it raises error.
result = pro.stock_basic(fields=list(FIELDS.get('Market.SecuritiesInfo').keys()))
check_execute_dump_flag(result, **kwargs)
if result is not None:
convert_ts_date_field(result, 'list_date', 'listing_date')
convert_ts_date_field(result, 'delist_date')
# result['list_date'] = pd.to_datetime(result['list_date'], format='%Y-%m-%d')
# result['delist_date'] = pd.to_datetime(result['delist_date'], format='%Y-%m-%d')
# result['listing_date'] = pd.to_datetime(result['list_date'], format='%Y-%m-%d')
if 'code' not in result.columns:
result['code'] = result['ts_code'].apply(lambda val: val.split('.')[0])
if 'exchange' not in result.columns:
result['exchange'] = result['ts_code'].apply(lambda val: val.split('.')[1])
result['exchange'] = result['exchange'].apply(lambda val: 'SSE' if val == 'SH' else val)
result['exchange'] = result['exchange'].apply(lambda val: 'SZSE' if val == 'SZ' else val)
result['stock_identity'] = result['code'] + '.' + result['exchange']
return result
# concept_detail: https://tushare.pro/document/2?doc_id=126
def __fetch_stock_concept(**kwargs) -> pd.DataFrame or None:
ts_code = pickup_ts_code(kwargs)
result = check_execute_test_flag(**kwargs)
if result is None:
pro = ts.pro_api(TS_TOKEN)
ts_delay('concept_detail')
result = pro.concept_detail(ts_code=ts_code, fields=[
'id', 'concept_name', 'ts_code', 'name', 'in_date', 'out_date'])
check_execute_dump_flag(result, **kwargs)
if result is not None:
convert_ts_code_field(result)
# del result['ts_code']
# result['ts_concept'] = result.to_dict('records')
# result['stock_identity'] = ts_code_to_stock_identity(ts_code)
return result
# index_basic: https://tushare.pro/document/2?doc_id=94
def __fetch_indexes_info(**kwargs) -> pd.DataFrame or None:
SUPPORT_MARKETS = ['SSE', 'SZSE', 'CSI', 'CICC', 'SW', 'MSCI', 'OTH']
result = check_execute_test_flag(**kwargs)
if result is None:
pro = ts.pro_api(TS_TOKEN)
result = None
for market in SUPPORT_MARKETS:
sub_result = pro.index_basic(market=market, fields=list(FIELDS.get('Market.IndexInfo').keys()))
result = pd.concat([result, sub_result])
check_execute_dump_flag(result, **kwargs)
if result is not None:
result['exchange'] = result['market']
result['code'] = result['ts_code'].apply(lambda val: val.split('.')[0])
result['listing_date'] = pd.to_datetime(result['list_date'], format='%Y-%m-%d')
result['index_identity'] = result['code'].astype(str) + '.' + result['exchange']
return result
# trade_cal: https://tushare.pro/document/2?doc_id=26
def __fetch_trade_calender(**kwargs) -> pd.DataFrame or None:
exchange = kwargs.get('exchange', '')
if str_available(exchange) and exchange not in ['SSE', 'SZSE', 'A-SHARE']:
return None
result = check_execute_test_flag(**kwargs)
if result is None:
time_serial = kwargs.get('trade_date', None)
since, until = normalize_time_serial(time_serial, default_since(), today())
ts_since = since.strftime('%Y%m%d')
ts_until = until.strftime('%Y%m%d')
pro = ts.pro_api(TS_TOKEN)
# If we specify the exchange parameter, it raises error.
result = pro.trade_cal('', start_date=ts_since, end_date=ts_until)
check_execute_dump_flag(result, **kwargs)
if result is not None:
result.rename(columns={'exchange': 'exchange', 'cal_date': 'trade_date', 'is_open': 'status'}, inplace=True)
# Because tushare only support SSE and they are the same
if exchange == 'SZSE' or exchange == 'A-SHARE':
result.drop(result[result.exchange != 'SSE'].index, inplace=True)
result['exchange'] = exchange
else:
result.drop(result[result.exchange != exchange].index, inplace=True)
result['trade_date'] = pd.to_datetime(result['trade_date'])
return result
# namechange: https://tushare.pro/document/2?doc_id=100
def __fetch_naming_history(**kwargs):
result = check_execute_test_flag(**kwargs)
if result is None:
ts_code = pickup_ts_code(kwargs)
period = kwargs.get('naming_date')
since, until = normalize_time_serial(period, default_since(), today())
ts_since = since.strftime('%Y%m%d')
ts_until = until.strftime('%Y%m%d')
pro = ts.pro_api(TS_TOKEN)
# 抱歉,您每分钟最多访问该接口100次
ts_delay('namechange')
result = pro.namechange(ts_code=ts_code, start_date=ts_since, end_date=ts_until,
fields='ts_code,name,start_date,end_date,ann_date,change_reason')
check_execute_dump_flag(result, **kwargs)
if result is not None:
if 'start_date' in result.columns:
result['naming_date'] = pd.to_datetime(result['start_date'], format='%Y-%m-%d')
if 'stock_identity' not in result.columns:
result['stock_identity'] = result['ts_code'].apply(ts_code_to_stock_identity)
return result
# ----------------------------------------------------------------------------------------------------------------------
def query(**kwargs) -> pd.DataFrame or None:
uri = kwargs.get('uri')
if uri == 'Market.SecuritiesInfo':
return __fetch_securities_info(**kwargs)
elif uri == 'Market.IndexInfo':
return __fetch_indexes_info(**kwargs)
elif uri == 'Market.TradeCalender':
return __fetch_trade_calender(**kwargs)
elif uri == 'Market.NamingHistory':
return __fetch_naming_history(**kwargs)
elif uri == 'Market.IndexComponent':
return None
elif uri == 'Market.SecuritiesTags':
return __fetch_stock_concept(**kwargs)
else:
return None
def validate(**kwargs) -> bool:
nop(kwargs)
return True
def fields() -> dict:
return FIELDS
|
airflow/providers/tableau/operators/tableau.py | ChaseKnowlden/airflow | 15,947 | 12679030 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Optional
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.tableau.hooks.tableau import (
TableauHook,
TableauJobFailedException,
TableauJobFinishCode,
)
RESOURCES_METHODS = {
'datasources': ['delete', 'refresh'],
'groups': ['delete'],
'projects': ['delete'],
'schedule': ['delete'],
'sites': ['delete'],
'subscriptions': ['delete'],
'tasks': ['delete', 'run'],
'users': ['remove'],
'workbooks': ['delete', 'refresh'],
}
class TableauOperator(BaseOperator):
"""
Execute a Tableau API Resource
https://tableau.github.io/server-client-python/docs/api-ref
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TableauOperator`
:param resource: The name of the resource to use.
:type resource: str
:param method: The name of the resource's method to execute.
:type method: str
:param find: The reference of resource that will receive the action.
:type find: str
:param match_with: The resource field name to be matched with find parameter.
:type match_with: Optional[str]
:param site_id: The id of the site where the workbook belongs to.
:type site_id: Optional[str]
:param blocking_refresh: By default will be blocking means it will wait until it has finished.
:type blocking_refresh: bool
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
:type check_interval: float
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
:type tableau_conn_id: str
"""
def __init__(
self,
*,
resource: str,
method: str,
find: str,
match_with: str = 'id',
site_id: Optional[str] = None,
blocking_refresh: bool = True,
check_interval: float = 20,
tableau_conn_id: str = 'tableau_default',
**kwargs,
) -> None:
super().__init__(**kwargs)
self.resource = resource
self.method = method
self.find = find
self.match_with = match_with
self.check_interval = check_interval
self.site_id = site_id
self.blocking_refresh = blocking_refresh
self.tableau_conn_id = tableau_conn_id
def execute(self, context: dict) -> str:
"""
Executes the Tableau API resource and pushes the job id or downloaded file URI to xcom.
:param context: The task context during execution.
:type context: dict
:return: the id of the job that executes the extract refresh or downloaded file URI.
:rtype: str
"""
available_resources = RESOURCES_METHODS.keys()
if self.resource not in available_resources:
error_message = f'Resource not found! Available Resources: {available_resources}'
raise AirflowException(error_message)
available_methods = RESOURCES_METHODS[self.resource]
if self.method not in available_methods:
error_message = f'Method not found! Available methods for {self.resource}: {available_methods}'
raise AirflowException(error_message)
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
resource = getattr(tableau_hook.server, self.resource)
method = getattr(resource, self.method)
resource_id = self._get_resource_id(tableau_hook)
response = method(resource_id)
if self.method == 'refresh':
job_id = response.id
if self.blocking_refresh:
if not tableau_hook.wait_for_state(
job_id=job_id,
check_interval=self.check_interval,
target_state=TableauJobFinishCode.SUCCESS,
):
raise TableauJobFailedException(f'The Tableau Refresh {self.resource} Job failed!')
return job_id
def _get_resource_id(self, tableau_hook: TableauHook) -> str:
if self.match_with == 'id':
return self.find
for resource in tableau_hook.get_all(resource_name=self.resource):
if getattr(resource, self.match_with) == self.find:
resource_id = resource.id
self.log.info('Found matching with id %s', resource_id)
return resource_id
raise AirflowException(f'{self.resource} with {self.match_with} {self.find} not found!')
|
programs/test_java_lookup.py | Unknoob/buck | 8,027 | 12679039 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import unittest
from programs.buck_tool import BuckToolException
from programs.java_lookup import _get_java_path_for_highest_minor_version, get_java_path
from programs.subprocutils import which
ANY_JAVA_VERSION = 8
JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY = 500
class TestJavaPath(unittest.TestCase):
def setUp(self):
self.java_home = tempfile.mkdtemp()
self.java_exec = "java.exe" if os.name == "nt" else "java"
bin_dir = os.path.join(self.java_home, "bin")
os.mkdir(bin_dir)
open(os.path.join(bin_dir, self.java_exec), "w")
def test_with_java_home_valid(self):
os.environ["JAVA_HOME"] = self.java_home
self.assertEqual(
get_java_path(ANY_JAVA_VERSION).lower(),
os.path.join(self.java_home, "bin", self.java_exec).lower(),
)
def test_with_java_home_invalid(self):
os.environ["JAVA_HOME"] = "/nosuchfolder/89aabebc-42cb-4cd8-bcf7-d964371daf3e"
self.assertRaises(BuckToolException)
def test_without_java_home(self):
self.assertEquals(
get_java_path(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY).lower(),
which("java").lower(),
)
def test_java_home_for_wrong_version_ignored(self):
os.environ["JAVA_HOME"] = (
"/Library/Java/JavaVirtualMachines/jdk-"
+ str(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY + 1)
+ ".jdk/Contents/Home"
)
self.assertEquals(
get_java_path(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY).lower(),
which("java").lower(),
)
def test_java_home_for_wrong_version_not_ignored_if_respect_java_home_set(self):
os.environ["JAVA_HOME"] = (
"/Library/Java/JavaVirtualMachines/jdk-"
+ str(JAVA_VERSION_THAT_OBVIOUSLY_CANT_EXIST_LOCALLY + 1)
+ ".jdk/Contents/Home"
)
os.environ["BUCK_RESPECT_JAVA_HOME"] = "1"
self.assertRaises(BuckToolException)
def test_java_8_highest_version_lookup(self):
java_base_path = tempfile.mkdtemp()
os.mkdir(os.path.join(java_base_path, "jdk1.7.0"))
os.mkdir(os.path.join(java_base_path, "jdk1.8.0_100"))
os.mkdir(os.path.join(java_base_path, "jdk1.8.0_200"))
os.mkdir(os.path.join(java_base_path, "jdk1.8.1"))
os.mkdir(os.path.join(java_base_path, "jdk1.8.1_100"))
self.assertEquals(
_get_java_path_for_highest_minor_version(java_base_path, 8),
os.path.join(java_base_path, "jdk1.8.1_100"),
)
def test_openjdk_8_highest_version_lookup(self):
java_base_path = tempfile.mkdtemp()
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-7.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-8.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-9.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-18.jdk"))
self.assertEquals(
_get_java_path_for_highest_minor_version(java_base_path, 8),
os.path.join(java_base_path, "adoptopenjdk-8.jdk"),
)
def test_java_11_highest_version_lookup(self):
java_base_path = tempfile.mkdtemp()
os.mkdir(os.path.join(java_base_path, "jdk-10.0.1"))
os.mkdir(os.path.join(java_base_path, "jdk-11.0.1"))
os.mkdir(os.path.join(java_base_path, "jdk-11.0.2"))
os.mkdir(os.path.join(java_base_path, "jdk-11.0.2_100"))
os.mkdir(os.path.join(java_base_path, "jdk-11.0.2_200"))
os.mkdir(os.path.join(java_base_path, "jdk-12"))
os.mkdir(os.path.join(java_base_path, "jdk-13"))
self.assertEquals(
_get_java_path_for_highest_minor_version(java_base_path, 11),
os.path.join(java_base_path, "jdk-11.0.2_200"),
)
def test_openjdk_11_highest_version_lookup(self):
java_base_path = tempfile.mkdtemp()
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-7.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-8.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-9.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-10.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-11.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-11.0.2.jdk"))
os.mkdir(os.path.join(java_base_path, "adoptopenjdk-12.jdk"))
self.assertEquals(
_get_java_path_for_highest_minor_version(java_base_path, 11),
os.path.join(java_base_path, "adoptopenjdk-11.0.2.jdk"),
)
self.assertEquals(
_get_java_path_for_highest_minor_version(java_base_path, 12),
os.path.join(java_base_path, "adoptopenjdk-12.jdk"),
)
def tearDown(self):
if "JAVA_HOME" in os.environ:
del os.environ["JAVA_HOME"]
if "BUCK_RESPECT_JAVA_HOME" in os.environ:
del os.environ["BUCK_RESPECT_JAVA_HOME"]
shutil.rmtree(self.java_home)
if __name__ == "__main__":
unittest.main()
|
text/src/autogluon/text/text_prediction/metrics.py | zhiqiangdon/autogluon | 4,462 | 12679055 | __all__ = ['calculate_metric_by_expr', 'infer_eval_log_metrics']
import ast
import operator as op
from autogluon.core.constants import MULTICLASS, BINARY, REGRESSION
# supported operators
operators = {ast.Add: op.add, ast.Sub: op.sub, ast.Mult: op.mul,
ast.Div: op.truediv, ast.Pow: op.pow, ast.BitXor: op.xor,
ast.USub: op.neg}
def infer_eval_log_metrics(problem_type, eval_metric=None):
"""Decide default evaluation, stopping, and logging metrics (based on type of prediction problem).
Parameters
----------
problem_type
Type of the problem. Either regression, multiclass, or binary
eval_metric
The eval metric provided by the user
Returns
-------
eval_metric
The updated evaluation metric
log_metrics
The updated logging metric
"""
if problem_type == MULTICLASS:
if eval_metric is None:
eval_metric = 'acc'
log_metrics = ['acc', 'log_loss']
elif problem_type == BINARY:
if eval_metric is None:
eval_metric = 'acc'
log_metrics = ['f1', 'mcc', 'roc_auc', 'acc', 'log_loss']
elif problem_type == REGRESSION:
if eval_metric is None:
eval_metric = 'rmse'
log_metrics = ['r2', 'rmse', 'mae']
else:
raise NotImplementedError('The problem type is not supported yet!')
if eval_metric not in log_metrics:
log_metrics.append(eval_metric)
return eval_metric, log_metrics
def eval_math_expr(expr):
"""Evaluate an expression
Parameters
----------
expr
expression
Returns
-------
ret
Returned value
Examples
--------
>>> eval_math_expr('2^6')
4
>>> eval_math_expr('2**6')
64
>>> eval_math_expr('1 + 2*3**(4^5) / (6 + -7)')
-5.0
"""
return eval_(ast.parse(expr, mode='eval').body)
def eval_(node):
if isinstance(node, ast.Num): # <number>
return node.n
elif isinstance(node, ast.BinOp): # <left> <operator> <right>
return operators[type(node.op)](eval_(node.left), eval_(node.right))
elif isinstance(node, ast.UnaryOp): # <operator> <operand> e.g., -1
return operators[type(node.op)](eval_(node.operand))
else:
raise TypeError(node)
def calculate_metric_by_expr(label_metric_scores: dict, label_names: list, expr: str) -> float:
"""Calculate the metric scores based on the given expression.
Parameters
----------
label_metric_scores
All metric scores reported in the validation phase.
It will be a dict of metric scores.
label_names
Name of the labels
expr
The expression. Supports different possibilities:
- A single metric like 'acc', 'f1', or 'auc'
This means to use this value as the final result.
If there are multiple labels, we use the average of all the individual metrics
- Combined metric, we use the syntax `label.metric_name` to describe the metric of
a given label
- expr = (acc + f1) / 2
The average of the accuracy and f1 of all labels
- expr = (label1.auc + label2.auc) / 2
The average of the auc of "label1" and the auc of "label2"
- expr = 0.8 * intent.acc + 0.2 * slot.f1
0.8 * the accuracy of a label named "intent" +
0.2 * the f1 score of a label named "slot"
- expr = 0.1 * label1.f1 + 0.9 * acc
0.1 * the F1 of label 1 + 0.9 * the average accuracy
Returns
-------
score
The returned score.
"""
original_expr = expr
possible_metric_names = set()
for label_name in label_names:
assert label_name in label_metric_scores,\
'Invalid label_metric_scores,' \
' all provided labels should be in the aggregated label metric scores. ' \
'label_names={}, label_metric_scores={}'.format(label_names, label_metric_scores)
metric_scores = label_metric_scores[label_name]
for metric_name, value, in metric_scores.items():
expr = expr.replace('{}.{}'.format(label_name, metric_name), str(value))
possible_metric_names.add(metric_name)
for metric_name in possible_metric_names:
if metric_name in expr:
avg_metric = 0
for label_name in label_names:
avg_metric += label_metric_scores[label_name][metric_name]
avg_metric /= len(label_names)
expr = expr.replace(metric_name, str(avg_metric))
try:
ret = eval_math_expr(expr)
except Exception:
raise ValueError('Cannot successfully parse the given expression. '
'The original expression = "{}". After the parsing, it becomes {} but '
'still cannot be evalauted.'.format(original_expr, expr))
return ret
|
guild/tests/samples/projects/get-started/train.py | msarahan/guildai | 694 | 12679093 | import numpy as np
# Hyperparameters
x = 0.1
noise = 0.1
print("x: %f" % x)
print("noise: %f" % noise)
# Simulated training loss
loss = np.sin(5 * x) * (1 - np.tanh(x ** 2)) + np.random.randn() * noise
print("loss: %f" % loss)
|
aztk/models/plugins/__init__.py | lachiemurray/aztk | 161 | 12679095 | <gh_stars>100-1000
from .plugin_file import *
from .plugin_configuration import *
|
test/nn/conv/test_cheb_conv.py | NucciTheBoss/pytorch_geometric | 2,350 | 12679099 | <filename>test/nn/conv/test_cheb_conv.py
import torch
from torch_geometric.nn import ChebConv
from torch_geometric.testing import is_full_test
def test_cheb_conv():
in_channels, out_channels = (16, 32)
edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
num_nodes = edge_index.max().item() + 1
edge_weight = torch.rand(edge_index.size(1))
x = torch.randn((num_nodes, in_channels))
conv = ChebConv(in_channels, out_channels, K=3)
assert conv.__repr__() == 'ChebConv(16, 32, K=3, normalization=sym)'
out1 = conv(x, edge_index)
assert out1.size() == (num_nodes, out_channels)
out2 = conv(x, edge_index, edge_weight)
assert out2.size() == (num_nodes, out_channels)
out3 = conv(x, edge_index, edge_weight, lambda_max=3.0)
assert out3.size() == (num_nodes, out_channels)
if is_full_test():
jit = torch.jit.script(conv.jittable())
assert jit(x, edge_index).tolist() == out1.tolist()
assert jit(x, edge_index, edge_weight).tolist() == out2.tolist()
assert jit(x, edge_index, edge_weight,
lambda_max=torch.tensor(3.0)).tolist() == out3.tolist()
batch = torch.tensor([0, 0, 1, 1])
edge_index = torch.tensor([[0, 1, 2, 3], [1, 0, 3, 2]])
num_nodes = edge_index.max().item() + 1
edge_weight = torch.rand(edge_index.size(1))
x = torch.randn((num_nodes, in_channels))
lambda_max = torch.tensor([2.0, 3.0])
out4 = conv(x, edge_index, edge_weight, batch)
assert out4.size() == (num_nodes, out_channels)
out5 = conv(x, edge_index, edge_weight, batch, lambda_max)
assert out5.size() == (num_nodes, out_channels)
if is_full_test():
assert jit(x, edge_index, edge_weight, batch).tolist() == out4.tolist()
assert jit(x, edge_index, edge_weight, batch,
lambda_max).tolist() == out5.tolist()
|
dephell/controllers/_resolver.py | jayvdb/dephell | 1,880 | 12679100 | <filename>dephell/controllers/_resolver.py
# built-in
import re
from logging import getLogger
from typing import TYPE_CHECKING, Optional
# external
from packaging.markers import Marker
from yaspin import yaspin
# app
from ..context_tools import nullcontext
from ..models import RootDependency
from ._conflict import analyze_conflict
if TYPE_CHECKING:
# project
from dephell.controllers._graph import Graph
from dephell.controllers._mutator import Mutator
logger = getLogger('dephell.resolver')
REX_BASE_VERSION = re.compile(r'[0-9\.]+')
class Resolver:
def __init__(self, graph: 'Graph', mutator: 'Mutator') -> None:
self.graph = graph
self.mutator = mutator
def apply(self, parent, recursive: bool = False):
"""
Returns conflicting (incompatible) dependency.
"""
for new_dep in parent.dependencies:
other_dep = self.graph.get(new_dep.name)
if other_dep is None:
# add new dep to graph
other_dep = new_dep.copy()
self.graph.add(other_dep)
elif isinstance(other_dep, RootDependency):
# if some of the dependencies cyclicaly depends on root
# then ignore these deps
continue
else:
# if dep is locked, but not used, let's just unlock it
if other_dep.locked and not other_dep.used:
other_dep.unlock()
# merge deps
try:
other_dep += new_dep
except TypeError: # conflict happened
return other_dep
# `recursive` used only in re-application of dependencies,
# when the graph already was built before.
if recursive:
self.apply(other_dep, recursive=True)
# check
if not other_dep.compat:
return other_dep
parent.applied = True
def unapply(self, dep, *, force: bool = True, soft: bool = False) -> None:
"""
force -- unapply deps that not applied yet
soft -- do not mark dep as not applied.
"""
if not force and not dep.applied:
return
# it must be before actual unapplying to avoid recursion on circular dependencies
if not soft:
dep.applied = False
for child in dep.dependencies:
child_name = child.name
child = self.graph.get(child_name)
if child is None:
logger.debug('child not found', extra=dict(dep=dep.name, child=child_name))
continue
# unapply current dependency for child
child.unapply(dep.name)
# unapply child because he is modified
self.unapply(child, force=False, soft=soft)
if not soft and dep.locked:
dep.unlock()
def resolve(self, debug: bool = False, silent: bool = False, level: Optional[int] = None) -> bool:
if silent:
spinner = nullcontext(type('Mock', (), {}))
else:
spinner = yaspin(text='resolving...')
with spinner as spinner:
while True:
resolved = self._resolve(debug=debug, silent=silent, level=level, spinner=spinner)
if resolved is None:
continue
self.graph.clear() # remove unused deps from graph
return resolved
def _resolve(self, debug: bool, silent: bool, level: Optional[int], spinner) -> Optional[bool]:
if silent:
logger.debug('next iteration', extra=dict(
layers=len(self.graph._layers),
mutations=self.mutator.mutations,
))
else:
spinner.text = 'layers: {layers}, mutations: {mutations}'.format(
layers=len(self.graph._layers),
mutations=self.mutator.mutations,
)
# get not applied deps
deps = self.graph.get_leafs(level=level)
# if we already build deps for all nodes in graph
if not deps:
return True
# check python version
for dep in deps:
if not dep.python_compat:
self.graph.conflict = dep
return False
no_conflicts = self._apply_deps(deps, debug=debug)
if no_conflicts:
return None
# if we have conflict, try to mutate graph
groups = self.mutator.mutate(self.graph)
# if cannot mutate
if groups is None:
return False
self.graph.conflict = None
# apply mutation
for group in groups:
dep = self.graph.get(group.name)
if dep.group.number != group.number:
logger.debug('mutated', extra=dict(
group_from=str(dep.group),
group_to=str(group),
))
self.unapply(dep)
dep.group = group
return None
def apply_envs(self, envs: set, deep: bool = True) -> None:
"""Filter out dependencies from the graph by the given envs.
deep: Helps to avoid fetching dependencies (hence the network requests).
Set it to False for not resolved graph to make filtering faster.
"""
if not any(root.dependencies for root in self.graph.get_layer(0)):
logger.debug('no dependencies, nothing to filter')
return
layer = self.graph.get_layer(1)
# Unapply deps that we don't need
for dep in layer:
if not dep.applied:
continue
if dep.envs & envs:
continue
if dep.inherited_envs & envs:
continue
logger.debug('unapply by envs', extra=dict(dep=dep.name, envs=envs))
# without `soft=True` all deps of this dep will be marked as unapplied
# and ignored in Requirement.from_graph.
# It's bad behavior because deps of this dep can be required for other
# deps that won't be unapplied.
if deep:
self.unapply(dep, soft=True)
dep.applied = False
# Some child deps can be unapplied from other child deps, but we need them.
# For example, if we need A, but don't need B, and A and B depends on C,
# then C will be unapplied from B. Let's return B in the graph by reapplying A.
for dep in self.graph:
if not dep.applied:
continue
if not (dep.envs | dep.inherited_envs) & envs:
continue
logger.debug('reapply', extra=dict(dep=dep.name, envs=envs))
if deep:
self.apply(dep, recursive=True)
dep.applied = True
def apply_markers(self, python) -> None:
implementation = python.implementation
if implementation == 'python':
implementation = 'cpython'
# get only base part of python version because `packagings` drops
# all markers for python prereleases
python_version = str(python.version)
match = REX_BASE_VERSION.match(python_version)
if match:
python_version = match.group()
for dep in self.graph:
if not dep.applied:
continue
if not dep.marker:
continue
fit = Marker(str(dep.marker)).evaluate(dict(
python_version=python_version,
implementation_name=implementation,
))
if fit:
continue
self.unapply(dep, soft=True)
dep.applied = False
def _apply_deps(self, deps, debug: bool = False) -> bool:
for dep in deps:
conflict = self.apply(dep)
if conflict is None:
continue
logger.debug('conflict', extra=dict(
dep=conflict.name,
constraint=conflict.constraint,
))
self.graph.conflict = conflict.copy()
if debug:
print(analyze_conflict(
resolver=self,
suffix=str(self.mutator.mutations),
))
# Dep can be partialy applied. Clean it.
self.unapply(dep)
return False
# only if all deps applied
return True
|
testing/kfctl/kfctl_second_apply.py | zhyon404/kubeflow | 9,272 | 12679118 | import logging
import os
import pytest
from kubeflow.kubeflow.ci import kfctl_go_test_utils as kfctl_util
from kubeflow.testing import util
@pytest.mark.skipif(os.getenv("JOB_TYPE") == "presubmit",
reason="test second apply doesn't run in presubmits")
def test_second_apply(record_xml_attribute, app_path):
"""Test that we can run kfctl apply again with error.
Args:
kfctl_path: The path to kfctl binary.
app_path: The app dir of kubeflow deployment.
"""
_, kfctl_path = kfctl_util.get_kfctl_go_build_dir_binary_path()
if not os.path.exists(kfctl_path):
msg = "kfctl Go binary not found: {path}".format(path=kfctl_path)
logging.error(msg)
raise RuntimeError(msg)
util.run([kfctl_path, "apply", "-V", "-f=" + os.path.join(app_path, "tmp.yaml")], cwd=app_path)
|
patroni/postgresql/validator.py | kostiantyn-nemchenko/patroni | 4,759 | 12679125 | <filename>patroni/postgresql/validator.py<gh_stars>1000+
import abc
import logging
import six
from collections import namedtuple
from urllib3.response import HTTPHeaderDict
from ..utils import parse_bool, parse_int, parse_real
logger = logging.getLogger(__name__)
class CaseInsensitiveDict(HTTPHeaderDict):
def add(self, key, val):
self[key] = val
def __getitem__(self, key):
return self._container[key.lower()][1]
def __repr__(self):
return str(dict(self.items()))
def copy(self):
return CaseInsensitiveDict(self._container.values())
class Bool(namedtuple('Bool', 'version_from,version_till')):
@staticmethod
def transform(name, value):
if parse_bool(value) is not None:
return value
logger.warning('Removing bool parameter=%s from the config due to the invalid value=%s', name, value)
@six.add_metaclass(abc.ABCMeta)
class Number(namedtuple('Number', 'version_from,version_till,min_val,max_val,unit')):
@staticmethod
@abc.abstractmethod
def parse(value, unit):
"""parse value"""
def transform(self, name, value):
num_value = self.parse(value, self.unit)
if num_value is not None:
if num_value < self.min_val:
logger.warning('Value=%s of parameter=%s is too low, increasing to %s%s',
value, name, self.min_val, self.unit or '')
return self.min_val
if num_value > self.max_val:
logger.warning('Value=%s of parameter=%s is too big, decreasing to %s%s',
value, name, self.max_val, self.unit or '')
return self.max_val
return value
logger.warning('Removing %s parameter=%s from the config due to the invalid value=%s',
self.__class__.__name__.lower(), name, value)
class Integer(Number):
@staticmethod
def parse(value, unit):
return parse_int(value, unit)
class Real(Number):
@staticmethod
def parse(value, unit):
return parse_real(value, unit)
class Enum(namedtuple('Enum', 'version_from,version_till,possible_values')):
def transform(self, name, value):
if str(value).lower() in self.possible_values:
return value
logger.warning('Removing enum parameter=%s from the config due to the invalid value=%s', name, value)
class EnumBool(Enum):
def transform(self, name, value):
if parse_bool(value) is not None:
return value
return super(EnumBool, self).transform(name, value)
class String(namedtuple('String', 'version_from,version_till')):
@staticmethod
def transform(name, value):
return value
# Format:
# key - parameter name
# value - tuple or multiple tuples if something was changing in GUC across postgres versions
parameters = CaseInsensitiveDict({
'allow_system_table_mods': Bool(90300, None),
'application_name': String(90300, None),
'archive_command': String(90300, None),
'archive_mode': (
Bool(90300, 90500),
EnumBool(90500, None, ('always',))
),
'archive_timeout': Integer(90300, None, 0, 1073741823, 's'),
'array_nulls': Bool(90300, None),
'authentication_timeout': Integer(90300, None, 1, 600, 's'),
'autovacuum': Bool(90300, None),
'autovacuum_analyze_scale_factor': Real(90300, None, 0, 100, None),
'autovacuum_analyze_threshold': Integer(90300, None, 0, 2147483647, None),
'autovacuum_freeze_max_age': Integer(90300, None, 100000, 2000000000, None),
'autovacuum_max_workers': (
Integer(90300, 90600, 1, 8388607, None),
Integer(90600, None, 1, 262143, None)
),
'autovacuum_multixact_freeze_max_age': Integer(90300, None, 10000, 2000000000, None),
'autovacuum_naptime': Integer(90300, None, 1, 2147483, 's'),
'autovacuum_vacuum_cost_delay': (
Integer(90300, 120000, -1, 100, 'ms'),
Real(120000, None, -1, 100, 'ms')
),
'autovacuum_vacuum_cost_limit': Integer(90300, None, -1, 10000, None),
'autovacuum_vacuum_insert_scale_factor': Real(130000, None, 0, 100, None),
'autovacuum_vacuum_insert_threshold': Integer(130000, None, -1, 2147483647, None),
'autovacuum_vacuum_scale_factor': Real(90300, None, 0, 100, None),
'autovacuum_vacuum_threshold': Integer(90300, None, 0, 2147483647, None),
'autovacuum_work_mem': Integer(90400, None, -1, 2147483647, 'kB'),
'backend_flush_after': Integer(90600, None, 0, 256, '8kB'),
'backslash_quote': EnumBool(90300, None, ('safe_encoding',)),
'backtrace_functions': String(130000, None),
'bgwriter_delay': Integer(90300, None, 10, 10000, 'ms'),
'bgwriter_flush_after': Integer(90600, None, 0, 256, '8kB'),
'bgwriter_lru_maxpages': (
Integer(90300, 100000, 0, 1000, None),
Integer(100000, None, 0, 1073741823, None)
),
'bgwriter_lru_multiplier': Real(90300, None, 0, 10, None),
'bonjour': Bool(90300, None),
'bonjour_name': String(90300, None),
'bytea_output': Enum(90300, None, ('escape', 'hex')),
'check_function_bodies': Bool(90300, None),
'checkpoint_completion_target': Real(90300, None, 0, 1, None),
'checkpoint_flush_after': Integer(90600, None, 0, 256, '8kB'),
'checkpoint_segments': Integer(90300, 90500, 1, 2147483647, None),
'checkpoint_timeout': (
Integer(90300, 90600, 30, 3600, 's'),
Integer(90600, None, 30, 86400, 's')
),
'checkpoint_warning': Integer(90300, None, 0, 2147483647, 's'),
'client_connection_check_interval': Integer(140000, None, '0', '2147483647', 'ms'),
'client_encoding': String(90300, None),
'client_min_messages': Enum(90300, None, ('debug5', 'debug4', 'debug3', 'debug2',
'debug1', 'log', 'notice', 'warning', 'error')),
'cluster_name': String(90500, None),
'commit_delay': Integer(90300, None, 0, 100000, None),
'commit_siblings': Integer(90300, None, 0, 1000, None),
'compute_query_id': EnumBool(140000, None, ('auto',)),
'config_file': String(90300, None),
'constraint_exclusion': EnumBool(90300, None, ('partition',)),
'cpu_index_tuple_cost': Real(90300, None, 0, 1.79769e+308, None),
'cpu_operator_cost': Real(90300, None, 0, 1.79769e+308, None),
'cpu_tuple_cost': Real(90300, None, 0, 1.79769e+308, None),
'cursor_tuple_fraction': Real(90300, None, 0, 1, None),
'data_directory': String(90300, None),
'data_sync_retry': Bool(90400, None),
'DateStyle': String(90300, None),
'db_user_namespace': Bool(90300, None),
'deadlock_timeout': Integer(90300, None, 1, 2147483647, 'ms'),
'debug_pretty_print': Bool(90300, None),
'debug_print_parse': Bool(90300, None),
'debug_print_plan': Bool(90300, None),
'debug_print_rewritten': Bool(90300, None),
'default_statistics_target': Integer(90300, None, 1, 10000, None),
'default_table_access_method': String(120000, None),
'default_tablespace': String(90300, None),
'default_text_search_config': String(90300, None),
'default_toast_compression': Enum(140000, None, ('pglz', 'lz4')),
'default_transaction_deferrable': Bool(90300, None),
'default_transaction_isolation': Enum(90300, None, ('serializable', 'repeatable read',
'read committed', 'read uncommitted')),
'default_transaction_read_only': Bool(90300, None),
'default_with_oids': Bool(90300, 120000),
'dynamic_library_path': String(90300, None),
'dynamic_shared_memory_type': (
Enum(90400, 120000, ('posix', 'sysv', 'mmap', 'none')),
Enum(120000, None, ('posix', 'sysv', 'mmap'))
),
'effective_cache_size': Integer(90300, None, 1, 2147483647, '8kB'),
'effective_io_concurrency': Integer(90300, None, 0, 1000, None),
'enable_async_append': Bool(140000, None),
'enable_bitmapscan': Bool(90300, None),
'enable_gathermerge': Bool(100000, None),
'enable_hashagg': Bool(90300, None),
'enable_hashjoin': Bool(90300, None),
'enable_incremental_sort': Bool(130000, None),
'enable_indexonlyscan': Bool(90300, None),
'enable_indexscan': Bool(90300, None),
'enable_material': Bool(90300, None),
'enable_mergejoin': Bool(90300, None),
'enable_nestloop': Bool(90300, None),
'enable_parallel_append': Bool(110000, None),
'enable_parallel_hash': Bool(110000, None),
'enable_partition_pruning': Bool(110000, None),
'enable_partitionwise_aggregate': Bool(110000, None),
'enable_partitionwise_join': Bool(110000, None),
'enable_seqscan': Bool(90300, None),
'enable_sort': Bool(90300, None),
'enable_tidscan': Bool(90300, None),
'escape_string_warning': Bool(90300, None),
'event_source': String(90300, None),
'exit_on_error': Bool(90300, None),
'extension_destdir': String(140000, None),
'external_pid_file': String(90300, None),
'extra_float_digits': Integer(90300, None, -15, 3, None),
'force_parallel_mode': EnumBool(90600, None, ('regress',)),
'from_collapse_limit': Integer(90300, None, 1, 2147483647, None),
'fsync': Bool(90300, None),
'full_page_writes': Bool(90300, None),
'geqo': Bool(90300, None),
'geqo_effort': Integer(90300, None, 1, 10, None),
'geqo_generations': Integer(90300, None, 0, 2147483647, None),
'geqo_pool_size': Integer(90300, None, 0, 2147483647, None),
'geqo_seed': Real(90300, None, 0, 1, None),
'geqo_selection_bias': Real(90300, None, 1.5, 2, None),
'geqo_threshold': Integer(90300, None, 2, 2147483647, None),
'gin_fuzzy_search_limit': Integer(90300, None, 0, 2147483647, None),
'gin_pending_list_limit': Integer(90500, None, 64, 2147483647, 'kB'),
'hash_mem_multiplier': Real(130000, None, 1, 1000, None),
'hba_file': String(90300, None),
'hot_standby': Bool(90300, None),
'hot_standby_feedback': Bool(90300, None),
'huge_pages': EnumBool(90400, None, ('try',)),
'huge_page_size': Integer(140000, None, '0', '2147483647', 'kB'),
'ident_file': String(90300, None),
'idle_in_transaction_session_timeout': Integer(90600, None, 0, 2147483647, 'ms'),
'idle_session_timeout': Integer(140000, None, '0', '2147483647', 'ms'),
'ignore_checksum_failure': Bool(90300, None),
'ignore_invalid_pages': Bool(130000, None),
'ignore_system_indexes': Bool(90300, None),
'IntervalStyle': Enum(90300, None, ('postgres', 'postgres_verbose', 'sql_standard', 'iso_8601')),
'jit': Bool(110000, None),
'jit_above_cost': Real(110000, None, -1, 1.79769e+308, None),
'jit_debugging_support': Bool(110000, None),
'jit_dump_bitcode': Bool(110000, None),
'jit_expressions': Bool(110000, None),
'jit_inline_above_cost': Real(110000, None, -1, 1.79769e+308, None),
'jit_optimize_above_cost': Real(110000, None, -1, 1.79769e+308, None),
'jit_profiling_support': Bool(110000, None),
'jit_provider': String(110000, None),
'jit_tuple_deforming': Bool(110000, None),
'join_collapse_limit': Integer(90300, None, 1, 2147483647, None),
'krb_caseins_users': Bool(90300, None),
'krb_server_keyfile': String(90300, None),
'krb_srvname': String(90300, 90400),
'lc_messages': String(90300, None),
'lc_monetary': String(90300, None),
'lc_numeric': String(90300, None),
'lc_time': String(90300, None),
'listen_addresses': String(90300, None),
'local_preload_libraries': String(90300, None),
'lock_timeout': Integer(90300, None, 0, 2147483647, 'ms'),
'lo_compat_privileges': Bool(90300, None),
'log_autovacuum_min_duration': Integer(90300, None, -1, 2147483647, 'ms'),
'log_checkpoints': Bool(90300, None),
'log_connections': Bool(90300, None),
'log_destination': String(90300, None),
'log_directory': String(90300, None),
'log_disconnections': Bool(90300, None),
'log_duration': Bool(90300, None),
'log_error_verbosity': Enum(90300, None, ('terse', 'default', 'verbose')),
'log_executor_stats': Bool(90300, None),
'log_file_mode': Integer(90300, None, 0, 511, None),
'log_filename': String(90300, None),
'logging_collector': Bool(90300, None),
'log_hostname': Bool(90300, None),
'logical_decoding_work_mem': Integer(130000, None, 64, 2147483647, 'kB'),
'log_line_prefix': String(90300, None),
'log_lock_waits': Bool(90300, None),
'log_min_duration_sample': Integer(130000, None, -1, 2147483647, 'ms'),
'log_min_duration_statement': Integer(90300, None, -1, 2147483647, 'ms'),
'log_min_error_statement': Enum(90300, None, ('debug5', 'debug4', 'debug3', 'debug2', 'debug1', 'info',
'notice', 'warning', 'error', 'log', 'fatal', 'panic')),
'log_min_messages': Enum(90300, None, ('debug5', 'debug4', 'debug3', 'debug2', 'debug1', 'info',
'notice', 'warning', 'error', 'log', 'fatal', 'panic')),
'log_parameter_max_length': Integer(130000, None, -1, 1073741823, 'B'),
'log_parameter_max_length_on_error': Integer(130000, None, -1, 1073741823, 'B'),
'log_parser_stats': Bool(90300, None),
'log_planner_stats': Bool(90300, None),
'log_recovery_conflict_waits': Bool(140000, None),
'log_replication_commands': Bool(90500, None),
'log_rotation_age': Integer(90300, None, 0, 35791394, 'min'),
'log_rotation_size': Integer(90300, None, 0, 2097151, 'kB'),
'log_statement': Enum(90300, None, ('none', 'ddl', 'mod', 'all')),
'log_statement_sample_rate': Real(130000, None, 0, 1, None),
'log_statement_stats': Bool(90300, None),
'log_temp_files': Integer(90300, None, -1, 2147483647, 'kB'),
'log_timezone': String(90300, None),
'log_transaction_sample_rate': Real(120000, None, 0, 1, None),
'log_truncate_on_rotation': Bool(90300, None),
'maintenance_io_concurrency': Integer(130000, None, 0, 1000, None),
'maintenance_work_mem': Integer(90300, None, 1024, 2147483647, 'kB'),
'max_connections': (
Integer(90300, 90600, 1, 8388607, None),
Integer(90600, None, 1, 262143, None)
),
'max_files_per_process': (
Integer(90300, 130000, 25, 2147483647, None),
Integer(130000, None, 64, 2147483647, None)
),
'max_locks_per_transaction': Integer(90300, None, 10, 2147483647, None),
'max_logical_replication_workers': Integer(100000, None, 0, 262143, None),
'max_parallel_maintenance_workers': Integer(110000, None, 0, 1024, None),
'max_parallel_workers': Integer(100000, None, 0, 1024, None),
'max_parallel_workers_per_gather': Integer(90600, None, 0, 1024, None),
'max_pred_locks_per_page': Integer(100000, None, 0, 2147483647, None),
'max_pred_locks_per_relation': Integer(100000, None, -2147483648, 2147483647, None),
'max_pred_locks_per_transaction': Integer(90300, None, 10, 2147483647, None),
'max_prepared_transactions': (
Integer(90300, 90600, 0, 8388607, None),
Integer(90600, None, 0, 262143, None)
),
'max_replication_slots': (
Integer(90400, 90600, 0, 8388607, None),
Integer(90600, None, 0, 262143, None)
),
'max_slot_wal_keep_size': Integer(130000, None, -1, 2147483647, 'MB'),
'max_stack_depth': Integer(90300, None, 100, 2147483647, 'kB'),
'max_standby_archive_delay': Integer(90300, None, -1, 2147483647, 'ms'),
'max_standby_streaming_delay': Integer(90300, None, -1, 2147483647, 'ms'),
'max_sync_workers_per_subscription': Integer(100000, None, 0, 262143, None),
'max_wal_senders': (
Integer(90300, 90600, 0, 8388607, None),
Integer(90600, None, 0, 262143, None)
),
'max_wal_size': (
Integer(90500, 100000, 2, 2147483647, '16MB'),
Integer(100000, None, 2, 2147483647, 'MB')
),
'max_worker_processes': (
Integer(90400, 90600, 1, 8388607, None),
Integer(90600, None, 0, 262143, None)
),
'min_dynamic_shared_memory': Integer(140000, None, '0', '2147483647', 'MB'),
'min_parallel_index_scan_size': Integer(100000, None, 0, 715827882, '8kB'),
'min_parallel_relation_size': Integer(90600, 100000, 0, 715827882, '8kB'),
'min_parallel_table_scan_size': Integer(100000, None, 0, 715827882, '8kB'),
'min_wal_size': (
Integer(90500, 100000, 2, 2147483647, '16MB'),
Integer(100000, None, 2, 2147483647, 'MB')
),
'old_snapshot_threshold': Integer(90600, None, -1, 86400, 'min'),
'operator_precedence_warning': Bool(90500, 140000),
'parallel_leader_participation': Bool(110000, None),
'parallel_setup_cost': Real(90600, None, 0, 1.79769e+308, None),
'parallel_tuple_cost': Real(90600, None, 0, 1.79769e+308, None),
'password_encryption': (
Bool(90300, 100000),
Enum(100000, None, ('md5', 'scram-sha-256'))
),
'plan_cache_mode': Enum(120000, None, ('auto', 'force_generic_plan', 'force_custom_plan')),
'port': Integer(90300, None, 1, 65535, None),
'post_auth_delay': Integer(90300, None, 0, 2147, 's'),
'pre_auth_delay': Integer(90300, None, 0, 60, 's'),
'quote_all_identifiers': Bool(90300, None),
'random_page_cost': Real(90300, None, 0, 1.79769e+308, None),
'recovery_init_sync_method': Enum(140000, None, ('fsync', 'syncfs')),
'remove_temp_files_after_crash': Bool(140000, None),
'replacement_sort_tuples': Integer(90600, 110000, 0, 2147483647, None),
'restart_after_crash': Bool(90300, None),
'row_security': Bool(90500, None),
'search_path': String(90300, None),
'seq_page_cost': Real(90300, None, 0, 1.79769e+308, None),
'session_preload_libraries': String(90400, None),
'session_replication_role': Enum(90300, None, ('origin', 'replica', 'local')),
'shared_buffers': Integer(90300, None, 16, 1073741823, '8kB'),
'shared_memory_type': Enum(120000, None, ('sysv', 'mmap')),
'shared_preload_libraries': String(90300, None),
'sql_inheritance': Bool(90300, 100000),
'ssl': Bool(90300, None),
'ssl_ca_file': String(90300, None),
'ssl_cert_file': String(90300, None),
'ssl_ciphers': String(90300, None),
'ssl_crl_dir': String(140000, None),
'ssl_crl_file': String(90300, None),
'ssl_dh_params_file': String(100000, None),
'ssl_ecdh_curve': String(90400, None),
'ssl_key_file': String(90300, None),
'ssl_max_protocol_version': Enum(120000, None, ('', 'tlsv1', 'tlsv1.1', 'tlsv1.2', 'tlsv1.3')),
'ssl_min_protocol_version': Enum(120000, None, ('tlsv1', 'tlsv1.1', 'tlsv1.2', 'tlsv1.3')),
'ssl_passphrase_command': String(110000, None),
'ssl_passphrase_command_supports_reload': Bool(110000, None),
'ssl_prefer_server_ciphers': Bool(90400, None),
'ssl_renegotiation_limit': Integer(90300, 90500, 0, 2147483647, 'kB'),
'standard_conforming_strings': Bool(90300, None),
'statement_timeout': Integer(90300, None, 0, 2147483647, 'ms'),
'stats_temp_directory': String(90300, None),
'superuser_reserved_connections': (
Integer(90300, 90600, 0, 8388607, None),
Integer(90600, None, 0, 262143, None)
),
'synchronize_seqscans': Bool(90300, None),
'synchronous_commit': (
EnumBool(90300, 90600, ('local', 'remote_write')),
EnumBool(90600, None, ('local', 'remote_write', 'remote_apply'))
),
'synchronous_standby_names': String(90300, None),
'syslog_facility': Enum(90300, None, ('local0', 'local1', 'local2', 'local3',
'local4', 'local5', 'local6', 'local7')),
'syslog_ident': String(90300, None),
'syslog_sequence_numbers': Bool(90600, None),
'syslog_split_messages': Bool(90600, None),
'tcp_keepalives_count': Integer(90300, None, 0, 2147483647, None),
'tcp_keepalives_idle': Integer(90300, None, 0, 2147483647, 's'),
'tcp_keepalives_interval': Integer(90300, None, 0, 2147483647, 's'),
'tcp_user_timeout': Integer(120000, None, 0, 2147483647, 'ms'),
'temp_buffers': Integer(90300, None, 100, 1073741823, '8kB'),
'temp_file_limit': Integer(90300, None, -1, 2147483647, 'kB'),
'temp_tablespaces': String(90300, None),
'TimeZone': String(90300, None),
'timezone_abbreviations': String(90300, None),
'trace_notify': Bool(90300, None),
'trace_recovery_messages': Enum(90300, None, ('debug5', 'debug4', 'debug3', 'debug2',
'debug1', 'log', 'notice', 'warning', 'error')),
'trace_sort': Bool(90300, None),
'track_activities': Bool(90300, None),
'track_activity_query_size': (
Integer(90300, 110000, 100, 102400, None),
Integer(110000, 130000, 100, 102400, 'B'),
Integer(130000, None, 100, 1048576, 'B')
),
'track_commit_timestamp': Bool(90500, None),
'track_counts': Bool(90300, None),
'track_functions': Enum(90300, None, ('none', 'pl', 'all')),
'track_io_timing': Bool(90300, None),
'track_wal_io_timing': Bool(140000, None),
'transaction_deferrable': Bool(90300, None),
'transaction_isolation': Enum(90300, None, ('serializable', 'repeatable read',
'read committed', 'read uncommitted')),
'transaction_read_only': Bool(90300, None),
'transform_null_equals': Bool(90300, None),
'unix_socket_directories': String(90300, None),
'unix_socket_group': String(90300, None),
'unix_socket_permissions': Integer(90300, None, 0, 511, None),
'update_process_title': Bool(90300, None),
'vacuum_cleanup_index_scale_factor': Real(110000, 140000, 0, 1e+10, None),
'vacuum_cost_delay': (
Integer(90300, 120000, 0, 100, 'ms'),
Real(120000, None, 0, 100, 'ms')
),
'vacuum_cost_limit': Integer(90300, None, 1, 10000, None),
'vacuum_cost_page_dirty': Integer(90300, None, 0, 10000, None),
'vacuum_cost_page_hit': Integer(90300, None, 0, 10000, None),
'vacuum_cost_page_miss': Integer(90300, None, 0, 10000, None),
'vacuum_defer_cleanup_age': Integer(90300, None, 0, 1000000, None),
'vacuum_failsafe_age': Integer(140000, None, '0', '2100000000', None),
'vacuum_freeze_min_age': Integer(90300, None, 0, 1000000000, None),
'vacuum_freeze_table_age': Integer(90300, None, 0, 2000000000, None),
'vacuum_multixact_failsafe_age': Integer(140000, None, '0', '2100000000', None),
'vacuum_multixact_freeze_min_age': Integer(90300, None, 0, 1000000000, None),
'vacuum_multixact_freeze_table_age': Integer(90300, None, 0, 2000000000, None),
'wal_buffers': Integer(90300, None, -1, 262143, '8kB'),
'wal_compression': Bool(90500, None),
'wal_consistency_checking': String(100000, None),
'wal_init_zero': Bool(120000, None),
'wal_keep_segments': Integer(90300, 130000, 0, 2147483647, None),
'wal_keep_size': Integer(130000, None, 0, 2147483647, 'MB'),
'wal_level': (
Enum(90300, 90400, ('minimal', 'archive', 'hot_standby')),
Enum(90400, 90600, ('minimal', 'archive', 'hot_standby', 'logical')),
Enum(90600, None, ('minimal', 'replica', 'logical'))
),
'wal_log_hints': Bool(90400, None),
'wal_receiver_create_temp_slot': Bool(130000, None),
'wal_receiver_status_interval': Integer(90300, None, 0, 2147483, 's'),
'wal_receiver_timeout': Integer(90300, None, 0, 2147483647, 'ms'),
'wal_recycle': Bool(120000, None),
'wal_retrieve_retry_interval': Integer(90500, None, 1, 2147483647, 'ms'),
'wal_sender_timeout': Integer(90300, None, 0, 2147483647, 'ms'),
'wal_skip_threshold': Integer(130000, None, 0, 2147483647, 'kB'),
'wal_sync_method': Enum(90300, None, ('fsync', 'fdatasync', 'open_sync', 'open_datasync')),
'wal_writer_delay': Integer(90300, None, 1, 10000, 'ms'),
'wal_writer_flush_after': Integer(90600, None, 0, 2147483647, '8kB'),
'work_mem': Integer(90300, None, 64, 2147483647, 'kB'),
'xmlbinary': Enum(90300, None, ('base64', 'hex')),
'xmloption': Enum(90300, None, ('content', 'document')),
'zero_damaged_pages': Bool(90300, None)
})
recovery_parameters = CaseInsensitiveDict({
'archive_cleanup_command': String(90300, None),
'pause_at_recovery_target': Bool(90300, 90500),
'primary_conninfo': String(90300, None),
'primary_slot_name': String(90400, None),
'promote_trigger_file': String(120000, None),
'recovery_end_command': String(90300, None),
'recovery_min_apply_delay': Integer(90400, None, 0, 2147483647, 'ms'),
'recovery_target': Enum(90400, None, ('immediate', '')),
'recovery_target_action': Enum(90500, None, ('pause', 'promote', 'shutdown')),
'recovery_target_inclusive': Bool(90300, None),
'recovery_target_lsn': String(100000, None),
'recovery_target_name': String(90400, None),
'recovery_target_time': String(90300, None),
'recovery_target_timeline': String(90300, None),
'recovery_target_xid': String(90300, None),
'restore_command': String(90300, None),
'standby_mode': Bool(90300, 120000),
'trigger_file': String(90300, 120000)
})
def _transform_parameter_value(validators, version, name, value):
validators = validators.get(name)
if validators:
for validator in (validators if isinstance(validators[0], tuple) else [validators]):
if version >= validator.version_from and\
(validator.version_till is None or version < validator.version_till):
return validator.transform(name, value)
logger.warning('Removing unexpected parameter=%s value=%s from the config', name, value)
def transform_postgresql_parameter_value(version, name, value):
if '.' in name:
return value
if name in recovery_parameters:
return None
return _transform_parameter_value(parameters, version, name, value)
def transform_recovery_parameter_value(version, name, value):
return _transform_parameter_value(recovery_parameters, version, name, value)
|
elephant/test/test_utils.py | Moritz-Alexander-Kern/elephant-1 | 121 | 12679153 | <reponame>Moritz-Alexander-Kern/elephant-1<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Unit tests for the synchrofact detection app
"""
import unittest
import neo
import numpy as np
import quantities as pq
from elephant import utils
from numpy.testing import assert_array_equal
class TestUtils(unittest.TestCase):
def test_check_neo_consistency(self):
self.assertRaises(TypeError,
utils.check_neo_consistency,
[], object_type=neo.SpikeTrain)
self.assertRaises(TypeError,
utils.check_neo_consistency,
[neo.SpikeTrain([1]*pq.s, t_stop=2*pq.s),
np.arange(2)], object_type=neo.SpikeTrain)
self.assertRaises(ValueError,
utils.check_neo_consistency,
[neo.SpikeTrain([1]*pq.s,
t_start=1*pq.s,
t_stop=2*pq.s),
neo.SpikeTrain([1]*pq.s,
t_start=0*pq.s,
t_stop=2*pq.s)],
object_type=neo.SpikeTrain)
self.assertRaises(ValueError,
utils.check_neo_consistency,
[neo.SpikeTrain([1]*pq.s, t_stop=2*pq.s),
neo.SpikeTrain([1]*pq.s, t_stop=3*pq.s)],
object_type=neo.SpikeTrain)
self.assertRaises(ValueError,
utils.check_neo_consistency,
[neo.SpikeTrain([1]*pq.ms, t_stop=2000*pq.ms),
neo.SpikeTrain([1]*pq.s, t_stop=2*pq.s)],
object_type=neo.SpikeTrain)
def test_round_binning_errors(self):
with self.assertWarns(UserWarning):
n_bins = utils.round_binning_errors(0.999999, tolerance=1e-6)
self.assertEqual(n_bins, 1)
self.assertEqual(utils.round_binning_errors(0.999999, tolerance=None),
0)
array = np.array([0, 0.7, 1 - 1e-8, 1 - 1e-9])
with self.assertWarns(UserWarning):
corrected = utils.round_binning_errors(array.copy())
assert_array_equal(corrected, [0, 0, 1, 1])
assert_array_equal(
utils.round_binning_errors(array.copy(), tolerance=None),
[0, 0, 0, 0])
if __name__ == '__main__':
unittest.main()
|
tests/medium/plugins/test_tcp_output.py | mickotronic/eventgen | 305 | 12679155 | <gh_stars>100-1000
#!/usr/bin/env python3
# encoding: utf-8
import os
import sys
from mock import MagicMock, patch
from splunk_eventgen.__main__ import parse_args
from splunk_eventgen.eventgen_core import EventGenerator
from splunk_eventgen.lib.plugins.output.tcpout import TcpOutputPlugin
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
class TestTcpOutputPlugin(object):
def test_output_data_to_tcp_port(self):
configfile = "tests/sample_eventgen_conf/medium_test/eventgen.conf.tcpoutput"
testargs = ["eventgen", "generate", configfile]
with patch.object(sys, "argv", testargs):
pargs = parse_args()
assert pargs.subcommand == "generate"
assert pargs.configfile == configfile
eventgen = EventGenerator(args=pargs)
with patch("socket.socket") as mock_requests:
sample = MagicMock()
tcpoutput = TcpOutputPlugin(sample)
mock_requests.send = MagicMock()
mock_requests.connect = MagicMock()
post_resp = MagicMock()
post_resp.raise_for_status = MagicMock()
mock_requests.post.return_value = MagicMock()
mock_requests.connect.return_value = True
eventgen.start()
tcpoutput.s.connect.assert_called_with(("127.0.0.1", 9999))
assert tcpoutput.s.send.call_count == 5
|
ikalog/inputs/input.py | fetus-hina/IkaLog | 285 | 12679161 | <filename>ikalog/inputs/input.py<gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import os
import time
import cv2
from ikalog.utils import IkaUtils
from ikalog.inputs.filters import OffsetFilter
class VideoInput(object):
##
# cap_optimal_input_resolution
# tells if the device feeds the image in optimal resolution
# (720p or 1080p). if True, the input will perform strict check
# for input resolution.
cap_optimal_input_resolution = True
##
# cap_recorded_video
cap_recorded_video = False
out_width = 1280
out_height = 720
##
# _initialize_driver_func()
# Handler for source-specific initialization.
# @param self the object
def _initalize_driver_func(self):
raise
##
# _initialize_driver_func()
# Handler for source-specific cleanup.
# @param self the object
def _cleanup_driver_func(self):
raise
##
# _select_device_by_index_func()
# @param self the object
# @param index the device_id to be selected.
def _select_device_by_index_func(self, index):
raise
##
# _select_device_by_index_func()
# @param self the object
# @param index the device name to be selected.
def _select_device_by_name_func(self, name):
raise
##
# _is_active_func()
# @param self the object
# @return True if the input is active. Otherwise False.
def _is_active_func(self):
raise
##
# _enumerate_devices_func()
# @param self the object
# @return List of enumerated devices.
def _enumerate_sources_func(self):
raise
##
# _read_frame_func()
# @param self the object
# @return the current frame of the input source.
def _read_frame_func(self):
raise
##
# is_active()
# Returns the state of the input source.
# @return True if the input source is active. Otherwise False.
def is_active(self):
return self._is_active_func()
##
# enumerate_sources()
# Returns the list of enumerated devices on the system.
# @return The list of enumerated devices.
def enumerate_sources(self):
# FIXME: Cache.
return self._enumerate_sources_func()
##
# select_source(self,index=None,name=None)
# @param index index(int) of device to be selected.
# @param name name(int) of device to be selected.
# @return True if the device is initialize and ready. Otherwise False.
def select_source(self, index=None, name=None):
by_index = (index is not None)
by_name = (name is not None)
assert by_index or by_name
assert not (by_index and by_name)
if by_index:
r = self._select_device_by_index_func(index)
else:
r = self._select_device_by_name_func(name)
self.set_frame_rate(None) # Default framerate
##
#
def _skip_frame_realtime(self):
current_tick = self.get_tick()
last_tick = self.last_tick
next_tick = current_tick
if self.fps_requested is not None:
next_tick2 = int(last_tick + (1000 / self.fps_requested * 2))
if current_tick < next_tick2:
next_tick = int(last_tick + (1000 / self.fps_requested))
while current_tick < next_tick:
time.sleep(0.05)
current_tick = self.get_tick()
return next_tick
def _skip_frame_recorded(self):
if self.frame_skip_rt:
tick = self.get_tick()
elif self.fps_requested is not None:
tick = self.get_current_timestamp() + (1000 / self.fps_requested)
else:
return
video_msec = self.get_current_timestamp()
skip = video_msec < tick
while skip:
frame_ = self._read_frame_func()
video_msec = self.get_current_timestamp()
skip = video_msec < tick
return None
##
# read_frame(self)
#
# read a frame from the input. The device should have been
# activated by select_device() method.
#
# @return Image if capture succeeded. Otherwise None.
def read_frame(self):
try:
self.lock.acquire()
if not self.is_active():
return None
next_tick = None
img = self._read_frame_func()
# Skip some frames for performance.
try:
if self.cap_recorded_video:
self._skip_frame_recorded()
else:
next_tick = self._skip_frame_realtime()
except EOFError:
pass # EOFError should be captured by the next cycle.
finally:
self.lock.release()
if img is None:
return None
if self.cap_optimal_input_resolution:
res720p = (img.shape[0] == 720) and (img.shape[1] == 1280)
res1080p = (img.shape[0] == 1080) and (img.shape[1] == 1920)
if not (res720p or res1080p):
IkaUtils.dprint(
'Invalid input resolution (%dx%d). Acceptable res: 1280x720 or 1920x1080' %
(img.shape[1], img.shape[0])
)
return None
if next_tick is not None:
self.last_tick = next_tick
# need stratch?
stratch = (
img.shape[0] != self.output_geometry[0] or
img.shape[1] != self.output_geometry[1])
if stratch:
img = cv2.resize(
img,
(self.output_geometry[1], self.output_geometry[0]),
# fixme
)
img = self._offset_filter.execute(img)
return img
def _get_current_timestamp_func(self):
return self.get_tick()
##
# reset(self)
#
# Reset the source plugin. It still try to keep current device active.
def reset_tick(self):
self._base_tick = int(time.time() * 1000)
self.last_tick = 0
def get_tick(self):
return int(time.time() * 1000 - self._base_tick)
def reset(self):
pass
##
# get_current_timestamp(self)
#
# Get current timestamp information.
# @return Timestamp (in msec)
def get_current_timestamp(self):
return self._get_current_timestamp_func()
def get_epoch_time(self):
return None
def set_pos_msec(self, pos_msec):
pass
# Returns the source file if the input is from a file. Otherwise None.
def get_source_file(self):
return None
# Puts file_path to be processed and returns True,
# otherwise returns False if the instance does not support this method.
def put_source_file(self, file_path):
return False
# Callback on EOFError. Returns True if a next data source is available.
def on_eof(self):
return False
##
# set_frame_rate(self, fps=None, realtime=False)
#
# Specify input frame rate desired.
# If realtime mode is enabled, the plugin will drop
# frames to perform real-time playback.
#
# @param fps frames per second to be read
# @param realtime Realtime mode if True.
def set_frame_rate(self, fps=None, realtime=False):
self.fps_requested = fps
self.frame_skip_rt = realtime
def set_offset(self, offset=None):
if offset is None:
self._offset_filter.disable()
else:
assert len(offset) == 2
self._offset_filter.offset = offset
self._offset_filter.enable()
##
# Backward compatibility.
def start_camera(self, source):
IkaUtils.dprint(
'%s: start_camera() is deprcated. Use select_source().' % self)
IkaUtils.dprint(' start_camera(index=1)')
IkaUtils.dprint(' start_camera(name="my capture device")')
raise Exception()
##
# Constructor.
#
def __init__(self):
self.output_geometry = (720, 1280)
self.effective_lines = 720
self.lock = threading.Lock()
self.is_realtime = True
self.reset()
self.reset_tick()
self._offset_filter = OffsetFilter(self)
self.set_frame_rate()
self._initialize_driver_func()
|
tests/test_rangeset.py | runfalk/spans | 123 | 12679164 | import pickle
import pytest
from spans import \
daterangeset, datetimerangeset, floatrange, floatrangeset, intrange, \
intrangeset, strrangeset, timedeltarangeset
def test_empty():
assert not intrangeset([])
def test_non_empty():
assert intrangeset([intrange(1, 5)])
@pytest.mark.parametrize("rangeset, span", [
(intrangeset([intrange(1, 5), intrange(10, 15)]), intrange(1, 15)),
(intrangeset([]), intrange.empty()),
])
def test_span(rangeset, span):
assert rangeset.span() == span
def test_iteration():
ranges = [intrange(1, 5), intrange(10, 15)]
assert list(intrangeset(ranges)) == ranges
def test_copy():
rset = intrangeset([intrange(1, 5), intrange(10, 15)])
rcopy = rset.copy()
assert list(rset) == list(rcopy)
assert rset._list is not rcopy._list
@pytest.mark.parametrize("value", [
intrange(1, 5),
intrange(5, 10),
intrange.empty(),
1,
5,
])
def test_contains(value):
assert intrangeset([intrange(1, 10)]).contains(value)
@pytest.mark.parametrize("value", [
intrange(5, 15),
10,
])
def test_not_contains(value):
assert not intrangeset([intrange(1, 10)]).contains(value)
@pytest.mark.parametrize("rset", [
intrangeset([]),
intrangeset([intrange(1, 5)]),
])
def test_contains_empty(rset):
assert rset.contains(intrange.empty())
def test_contains_type_check():
with pytest.raises(ValueError):
intrangeset([]).contains(1.0)
with pytest.raises(ValueError):
intrangeset([]).contains(floatrangeset([]))
def test_add():
rset = intrangeset([intrange(1, 15)])
rset.add(intrange(5, 15))
assert list(rset) == [intrange(1, 15)]
with pytest.raises(TypeError):
rset.add(floatrange(1.0))
def test_remove():
rset = intrangeset([intrange(upper=1), intrange(5)])
rset.remove(intrange(10, 15))
assert rset == intrangeset([intrange(upper=1), intrange(5, 10), intrange(15)])
# Test deletion of empty set
temp = rset.copy()
temp.remove(intrange.empty())
assert rset == temp
# Test total deletion
rset.remove(intrange())
assert rset == intrangeset([])
# Test deletion on empty set
temp = intrangeset([])
temp.remove(intrange(1, 5))
assert temp == intrangeset([])
with pytest.raises(TypeError):
rset.remove(floatrange(1.0))
def test_invert():
rset = intrangeset([intrange(1, 5), intrange(10, 15)])
rset_inv = intrangeset([intrange(upper=1), intrange(5, 10), intrange(15)])
assert ~rset == rset_inv
assert rset == ~~rset
def test_union():
a = intrangeset([intrange(1, 5), intrange(20, 30)])
b = intrangeset([intrange(5, 10), intrange(20, 100)])
union = [intrange(1, 10), intrange(20, 100)]
assert list(a.union(b)) == union
assert list(a | b) == union
with pytest.raises(TypeError):
intrangeset([]).union(intrange())
assert intrangeset([]).__or__(intrange()) is NotImplemented
def test_difference():
a = intrangeset([intrange(1, 5), intrange(20, 30)])
b = intrangeset([intrange(5, 10), intrange(20, 100)])
difference = [intrange(1, 5)]
assert list(a.difference(b)) == difference
assert list(a - b) == difference
with pytest.raises(TypeError):
intrangeset([]).difference(intrange())
assert intrangeset([]).__sub__(intrange()) is NotImplemented
def test_intersection():
a = intrangeset([intrange(1, 5), intrange(20, 30)])
b = intrangeset([intrange(5, 10), intrange(20, 100)])
intersection = [intrange(20, 30)]
assert list(a.intersection(b)) == intersection
assert list(a & b) == intersection
assert not intrangeset([intrange(1, 5)]).intersection(
intrangeset([intrange(5, 10)]))
with pytest.raises(TypeError):
intrangeset([]).intersection(intrange())
assert intrangeset([]).__and__(intrange()) is NotImplemented
def test_values():
values = intrangeset([intrange(1, 5), intrange(10, 15)]).values()
assert list(values) == list(range(1, 5)) + list(range(10, 15))
@pytest.mark.parametrize("span, repr_str", [
(intrangeset([]), "intrangeset([])"),
(intrangeset([intrange(1)]), "intrangeset([intrange(1)])"),
])
def test_repr(span, repr_str):
assert repr(span) == repr_str
def test_pickling():
span = intrangeset([intrange(1, 10), intrange(20, 30)])
assert span == pickle.loads(pickle.dumps(span))
def test_equal():
range_a = intrange(1, 5)
range_b = intrange(10, 15)
assert intrangeset([range_a, range_b]) == intrangeset([range_a, range_b])
assert not intrangeset([range_a, range_b]) == intrangeset([range_a])
assert not intrangeset([range_a]) == "foo"
def test_less_than():
range_a = intrange(1, 5)
range_b = intrange(10, 15)
assert not intrangeset([range_a, range_b]) < intrangeset([range_a])
assert intrangeset([range_a, range_b]) < intrangeset([range_b])
assert not intrangeset([range_a, range_b]) <= intrangeset([range_a])
assert not intrangeset([range_a]) == "foo"
def test_greater_than():
range_a = intrange(1, 5)
range_b = intrange(10, 15)
assert intrangeset([range_a, range_b]) > intrangeset([range_a])
assert not intrangeset([range_a, range_b]) > intrangeset([range_b])
assert intrangeset([range_b]) > intrangeset([range_a, range_b])
assert intrangeset([range_a, range_b]) >= intrangeset([range_a])
def test_bug3_intersection():
"""
`Bug #3 <https://github.com/runfalk/spans/issues/3>`_
"""
range_a = intrange(1, 5)
range_b = intrange(5, 10)
range_c = intrange(10, 15)
rangeset_a = intrangeset([range_a, range_c])
rangeset_b = intrangeset([range_b])
rangeset_c = intrangeset([range_c])
rangeset_empty = intrangeset([])
assert rangeset_a.intersection(rangeset_b, rangeset_c) == rangeset_empty
def test_bug4_empty_set_iteration():
"""
`Bug #4 <https://github.com/runfalk/spans/issues/4>`_
"""
assert list(intrangeset([])) == []
@pytest.mark.parametrize("cls", [
daterangeset,
datetimerangeset,
intrangeset,
floatrangeset,
strrangeset,
timedeltarangeset,
])
def test_bug10_missing_slots_in_cls_hierarchy(cls):
"""
`Bug #10 <https://github.com/runfalk/spans/issues/10`_
"""
for c in cls.mro():
if c is object:
continue
assert hasattr(c, "__slots__")
def test_bug14_pickle_not_working_for_rangesets():
"""
`Bug #14 <https://github.com/runfalk/spans/issues/14`_
"""
# If __getstate__ returns a falsy value __setstate__ will not be called
# when loading the value again, which is why this bug occured
range_set = floatrangeset([])
pickled = pickle.dumps(range_set, protocol=1)
pickle.loads(pickled)
assert range_set == pickle.loads(pickled)
# We need to ensure that code pickled using protocol 1 by spans versions
# before 1.1.0 still loads
old_data = (
b"ccopy_reg\n_reconstructor\nq\x00(cspans.settypes\nfloatrangeset\n"
b"q\x01c__builtin__\nobject\nq\x02Ntq\x03Rq\x04]q\x05h\x00(cspans."
b"types\nfloatrange\nq\x06h\x02Ntq\x07Rq\x08}q\tX\x06\x00\x00\x00_"
b"rangeq\nh\x00(cspans.types\n_internal_range\nq\x0bc__builtin__\n"
b"tuple\nq\x0c(G?\xf0\x00\x00\x00\x00\x00\x00NI01\nI00\nI00\ntq\rtq"
b"\x0eRq\x0fsbab."
)
assert pickle.loads(old_data) == floatrangeset([floatrange(1.0)])
|
ch07/07_07.py | leeseedong/book-cryptocurrency | 121 | 12679166 | <reponame>leeseedong/book-cryptocurrency<gh_stars>100-1000
import pybithumb
import numpy as np
df = pybithumb.get_ohlcv("BTC")
df['range'] = (df['high'] - df['low']) * 0.5
df['target'] = df['open'] + df['range'].shift(1)
df['ror'] = np.where(df['high'] > df['target'],
df['close'] / df['target'],
1)
df.to_excel("trade.xlsx") |
python/math_test.py | honux77/practice | 152 | 12679178 | import math
def fact(x):
if x == 1:
return 1
else:
return x * fact(x-1);
def mysin(x):
return x - math.pow(x,3)/fact(3) + pow(x,5)/fact(5) - pow(x,7)/fact(7) \
+ math.pow(x,9)/fact(9)
print mysin(3.14/2), math.sin(3.14/2)
print mysin(3.14/4), math.sin(3.14/4), 1/math.sqrt(2)
|
test/spreadsheet_test.py | hermansyah/hyou | 121 | 12679188 | # Copyright 2015 Google Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import datetime
import unittest
import hyou.api
import hyou.collection
import http_mocks
class SpreadsheetReadOnlyTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = hyou.api.API(
http_mocks.ReplayHttp('unittest-sheets.json'),
discovery=False)
def setUp(self):
self.collection = hyou.collection.Collection(self.api)
self.spreadsheet = self.collection[
'<KEY>']
def test_repr(self):
self.assertEqual(
str('Spreadsheet('
'key=\'<KEY>')'),
repr(self.spreadsheet))
def test_worksheet_accessors(self):
# iter()
self.assertEqual(
['Sheet1', 'Sheet2', 'Sheet3'],
list(self.spreadsheet))
# len()
self.assertEqual(3, len(self.spreadsheet))
# keys()
self.assertEqual(['Sheet1', 'Sheet2', 'Sheet3'],
self.spreadsheet.keys())
# values()
values = self.spreadsheet.values()
self.assertEqual(3, len(values))
self.assertEqual('Sheet1', values[0].title)
self.assertEqual('Sheet2', values[1].title)
self.assertEqual('Sheet3', values[2].title)
# items()
items = self.spreadsheet.items()
self.assertEqual(3, len(items))
self.assertEqual('Sheet1', items[0][0])
self.assertEqual('Sheet1', items[0][1].title)
self.assertEqual('Sheet2', items[1][0])
self.assertEqual('Sheet2', items[1][1].title)
self.assertEqual('Sheet3', items[2][0])
self.assertEqual('Sheet3', items[2][1].title)
# Indexing by an integer
self.assertEqual('Sheet1', self.spreadsheet[0].title)
self.assertEqual('Sheet2', self.spreadsheet[1].title)
self.assertEqual('Sheet3', self.spreadsheet[2].title)
# Indexing by a key
self.assertEqual('Sheet1', self.spreadsheet['Sheet1'].title)
self.assertEqual('Sheet2', self.spreadsheet['Sheet2'].title)
self.assertEqual('Sheet3', self.spreadsheet['Sheet3'].title)
def test_refresh(self):
self.spreadsheet.refresh()
def test_url(self):
self.assertEqual(
'https://docs.google.com/spreadsheets/d/'
'1EQKX_l9GS2HSAMqQd_IrLjy5M0IFq1SbO3uUKVlfHjU/edit',
self.spreadsheet.url)
def test_title(self):
self.assertEqual('SpreadsheetReadOnlyTest', self.spreadsheet.title)
def test_updated(self):
self.assertTrue(
isinstance(self.spreadsheet.updated, datetime.datetime))
class SpreadsheetReadWriteTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = hyou.api.API(
http_mocks.ReplayHttp('unittest-sheets.json'),
discovery=False)
def setUp(self):
self.collection = hyou.collection.Collection(self.api)
self.spreadsheet = self.collection[
'1cs7S44YeWzIx5AEJSUwP4zMsKKVsKrTi8kxNhJbqI08']
def test_set_title(self):
self.spreadsheet.title = 'SpreadsheetReadWriteTest'
def test_add_delete_worksheet(self):
worksheet = self.spreadsheet.add_worksheet('Sheet9', rows=2, cols=8)
self.assertEqual('Sheet9', worksheet.title)
self.assertEqual(2, worksheet.rows)
self.assertEqual(8, worksheet.cols)
self.spreadsheet.delete_worksheet('Sheet9')
|
scvi/dataloaders/_data_splitting.py | YosefLab/scVI | 398 | 12679213 | <filename>scvi/dataloaders/_data_splitting.py
from math import ceil, floor
from typing import Dict, List, Optional
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import DataLoader, Dataset
from scvi import REGISTRY_KEYS, settings
from scvi.data import AnnDataManager
from scvi.data._utils import get_anndata_attribute
from scvi.dataloaders._ann_dataloader import AnnDataLoader, BatchSampler
from scvi.dataloaders._semi_dataloader import SemiSupervisedDataLoader
from scvi.model._utils import parse_use_gpu_arg
def validate_data_split(
n_samples: int, train_size: float, validation_size: Optional[float] = None
):
"""
Check data splitting parameters and return n_train and n_val.
Parameters
----------
n_samples
Number of samples to split
train_size
Size of train set. Need to be: 0 < train_size <= 1.
validation_size
Size of validation set. Need to be 0 <= validation_size < 1
"""
if train_size > 1.0 or train_size <= 0.0:
raise ValueError("Invalid train_size. Must be: 0 < train_size <= 1")
n_train = ceil(train_size * n_samples)
if validation_size is None:
n_val = n_samples - n_train
elif validation_size >= 1.0 or validation_size < 0.0:
raise ValueError("Invalid validation_size. Must be 0 <= validation_size < 1")
elif (train_size + validation_size) > 1:
raise ValueError("train_size + validation_size must be between 0 and 1")
else:
n_val = floor(n_samples * validation_size)
if n_train == 0:
raise ValueError(
"With n_samples={}, train_size={} and validation_size={}, the "
"resulting train set will be empty. Adjust any of the "
"aforementioned parameters.".format(n_samples, train_size, validation_size)
)
return n_train, n_val
class DataSplitter(pl.LightningDataModule):
"""
Creates data loaders ``train_set``, ``validation_set``, ``test_set``.
If ``train_size + validation_set < 1`` then ``test_set`` is non-empty.
Parameters
----------
adata_manager
:class:`~scvi.data.AnnDataManager` object that has been created via ``setup_anndata``.
train_size
float, or None (default is 0.9)
validation_size
float, or None (default is None)
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
**kwargs
Keyword args for data loader. If adata has labeled data, data loader
class is :class:`~scvi.dataloaders.SemiSupervisedDataLoader`,
else data loader class is :class:`~scvi.dataloaders.AnnDataLoader`.
Examples
--------
>>> adata = scvi.data.synthetic_iid()
>>> scvi.model.SCVI.setup_anndata(adata)
>>> adata_manager = scvi.model.SCVI(adata).adata_manager
>>> splitter = DataSplitter(adata)
>>> splitter.setup()
>>> train_dl = splitter.train_dataloader()
"""
def __init__(
self,
adata_manager: AnnDataManager,
train_size: float = 0.9,
validation_size: Optional[float] = None,
use_gpu: bool = False,
**kwargs,
):
super().__init__()
self.adata_manager = adata_manager
self.train_size = float(train_size)
self.validation_size = validation_size
self.data_loader_kwargs = kwargs
self.use_gpu = use_gpu
self.n_train, self.n_val = validate_data_split(
self.adata_manager.adata.n_obs, self.train_size, self.validation_size
)
def setup(self, stage: Optional[str] = None):
"""Split indices in train/test/val sets."""
n_train = self.n_train
n_val = self.n_val
random_state = np.random.RandomState(seed=settings.seed)
permutation = random_state.permutation(self.adata_manager.adata.n_obs)
self.val_idx = permutation[:n_val]
self.train_idx = permutation[n_val : (n_val + n_train)]
self.test_idx = permutation[(n_val + n_train) :]
gpus, self.device = parse_use_gpu_arg(self.use_gpu, return_device=True)
self.pin_memory = (
True if (settings.dl_pin_memory_gpu_training and gpus != 0) else False
)
def train_dataloader(self):
return AnnDataLoader(
self.adata_manager,
indices=self.train_idx,
shuffle=True,
drop_last=3,
pin_memory=self.pin_memory,
**self.data_loader_kwargs,
)
def val_dataloader(self):
if len(self.val_idx) > 0:
return AnnDataLoader(
self.adata_manager,
indices=self.val_idx,
shuffle=False,
drop_last=3,
pin_memory=self.pin_memory,
**self.data_loader_kwargs,
)
else:
pass
def test_dataloader(self):
if len(self.test_idx) > 0:
return AnnDataLoader(
self.adata_manager,
indices=self.test_idx,
shuffle=False,
drop_last=3,
pin_memory=self.pin_memory,
**self.data_loader_kwargs,
)
else:
pass
class SemiSupervisedDataSplitter(pl.LightningDataModule):
"""
Creates data loaders ``train_set``, ``validation_set``, ``test_set``.
If ``train_size + validation_set < 1`` then ``test_set`` is non-empty.
The ratio between labeled and unlabeled data in adata will be preserved
in the train/test/val sets.
Parameters
----------
adata_manager
:class:`~scvi.data.AnnDataManager` object that has been created via ``setup_anndata``.
train_size
float, or None (default is 0.9)
validation_size
float, or None (default is None)
n_samples_per_label
Number of subsamples for each label class to sample per epoch
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
**kwargs
Keyword args for data loader. If adata has labeled data, data loader
class is :class:`~scvi.dataloaders.SemiSupervisedDataLoader`,
else data loader class is :class:`~scvi.dataloaders.AnnDataLoader`.
Examples
--------
>>> adata = scvi.data.synthetic_iid()
>>> scvi.model.SCVI.setup_anndata(adata, labels_key="labels")
>>> adata_manager = scvi.model.SCVI(adata).adata_manager
>>> unknown_label = 'label_0'
>>> splitter = SemiSupervisedDataSplitter(adata, unknown_label)
>>> splitter.setup()
>>> train_dl = splitter.train_dataloader()
"""
def __init__(
self,
adata_manager: AnnDataManager,
train_size: float = 0.9,
validation_size: Optional[float] = None,
n_samples_per_label: Optional[int] = None,
use_gpu: bool = False,
**kwargs,
):
super().__init__()
self.adata_manager = adata_manager
self.train_size = float(train_size)
self.validation_size = validation_size
self.data_loader_kwargs = kwargs
self.n_samples_per_label = n_samples_per_label
labels_state_registry = adata_manager.get_state_registry(
REGISTRY_KEYS.LABELS_KEY
)
labels = get_anndata_attribute(
adata_manager.adata,
adata_manager.data_registry.labels.attr_name,
labels_state_registry.original_key,
).ravel()
self.unlabeled_category = labels_state_registry.unlabeled_category
self._unlabeled_indices = np.argwhere(labels == self.unlabeled_category).ravel()
self._labeled_indices = np.argwhere(labels != self.unlabeled_category).ravel()
self.data_loader_kwargs = kwargs
self.use_gpu = use_gpu
def setup(self, stage: Optional[str] = None):
"""Split indices in train/test/val sets."""
n_labeled_idx = len(self._labeled_indices)
n_unlabeled_idx = len(self._unlabeled_indices)
if n_labeled_idx != 0:
n_labeled_train, n_labeled_val = validate_data_split(
n_labeled_idx, self.train_size, self.validation_size
)
rs = np.random.RandomState(seed=settings.seed)
labeled_permutation = rs.choice(
self._labeled_indices, len(self._labeled_indices), replace=False
)
labeled_idx_val = labeled_permutation[:n_labeled_val]
labeled_idx_train = labeled_permutation[
n_labeled_val : (n_labeled_val + n_labeled_train)
]
labeled_idx_test = labeled_permutation[(n_labeled_val + n_labeled_train) :]
else:
labeled_idx_test = []
labeled_idx_train = []
labeled_idx_val = []
if n_unlabeled_idx != 0:
n_unlabeled_train, n_unlabeled_val = validate_data_split(
n_unlabeled_idx, self.train_size, self.validation_size
)
rs = np.random.RandomState(seed=settings.seed)
unlabeled_permutation = rs.choice(
self._unlabeled_indices, len(self._unlabeled_indices)
)
unlabeled_idx_val = unlabeled_permutation[:n_unlabeled_val]
unlabeled_idx_train = unlabeled_permutation[
n_unlabeled_val : (n_unlabeled_val + n_unlabeled_train)
]
unlabeled_idx_test = unlabeled_permutation[
(n_unlabeled_val + n_unlabeled_train) :
]
else:
unlabeled_idx_train = []
unlabeled_idx_val = []
unlabeled_idx_test = []
indices_train = np.concatenate((labeled_idx_train, unlabeled_idx_train))
indices_val = np.concatenate((labeled_idx_val, unlabeled_idx_val))
indices_test = np.concatenate((labeled_idx_test, unlabeled_idx_test))
self.train_idx = indices_train.astype(int)
self.val_idx = indices_val.astype(int)
self.test_idx = indices_test.astype(int)
gpus = parse_use_gpu_arg(self.use_gpu, return_device=False)
self.pin_memory = (
True if (settings.dl_pin_memory_gpu_training and gpus != 0) else False
)
if len(self._labeled_indices) != 0:
self.data_loader_class = SemiSupervisedDataLoader
dl_kwargs = {
"n_samples_per_label": self.n_samples_per_label,
}
else:
self.data_loader_class = AnnDataLoader
dl_kwargs = {}
self.data_loader_kwargs.update(dl_kwargs)
def train_dataloader(self):
return self.data_loader_class(
self.adata_manager,
indices=self.train_idx,
shuffle=True,
drop_last=3,
pin_memory=self.pin_memory,
**self.data_loader_kwargs,
)
def val_dataloader(self):
if len(self.val_idx) > 0:
return self.data_loader_class(
self.adata_manager,
indices=self.val_idx,
shuffle=False,
drop_last=3,
pin_memory=self.pin_memory,
**self.data_loader_kwargs,
)
else:
pass
def test_dataloader(self):
if len(self.test_idx) > 0:
return self.data_loader_class(
self.adata_manager,
indices=self.test_idx,
shuffle=False,
drop_last=3,
pin_memory=self.pin_memory,
**self.data_loader_kwargs,
)
else:
pass
class DeviceBackedDataSplitter(DataSplitter):
"""
Creates loaders for data that is already on device, e.g., GPU.
If ``train_size + validation_set < 1`` then ``test_set`` is non-empty.
Parameters
----------
adata_manager
:class:`~scvi.data.AnnDataManager` object that has been created via ``setup_anndata``.
train_size
float, or None (default is 0.9)
validation_size
float, or None (default is None)
use_gpu
Use default GPU if available (if None or True), or index of GPU to use (if int),
or name of GPU (if str, e.g., `'cuda:0'`), or use CPU (if False).
shuffle
if ``True``, shuffles indices before sampling for training set
shuffle_test_val
Shuffle test and validation indices.
batch_size
batch size of each iteration. If `None`, do not minibatch
Examples
--------
>>> adata = scvi.data.synthetic_iid()
>>> scvi.model.SCVI.setup_anndata(adata)
>>> adata_manager = scvi.model.SCVI(adata).adata_manager
>>> splitter = DeviceBackedDataSplitter(adata)
>>> splitter.setup()
>>> train_dl = splitter.train_dataloader()
"""
def __init__(
self,
adata_manager: AnnDataManager,
train_size: float = 1.0,
validation_size: Optional[float] = None,
use_gpu: bool = False,
shuffle: bool = False,
shuffle_test_val: bool = False,
batch_size: Optional[int] = None,
**kwargs,
):
super().__init__(
adata_manager=adata_manager,
train_size=train_size,
validation_size=validation_size,
use_gpu=use_gpu,
**kwargs,
)
self.batch_size = batch_size
self.shuffle = shuffle
self.shuffle_test_val = shuffle_test_val
def setup(self, stage: Optional[str] = None):
super().setup()
if self.shuffle is False:
self.train_idx = np.sort(self.train_idx)
self.val_idx = (
np.sort(self.val_idx) if len(self.val_idx) > 0 else self.val_idx
)
self.test_idx = (
np.sort(self.test_idx) if len(self.test_idx) > 0 else self.test_idx
)
self.train_tensor_dict = self._get_tensor_dict(
self.train_idx, device=self.device
)
self.test_tensor_dict = self._get_tensor_dict(self.test_idx, device=self.device)
self.val_tensor_dict = self._get_tensor_dict(self.val_idx, device=self.device)
def _get_tensor_dict(self, indices, device):
if len(indices) is not None and len(indices) > 0:
dl = AnnDataLoader(
self.adata_manager,
indices=indices,
batch_size=len(indices),
shuffle=False,
pin_memory=self.pin_memory,
**self.data_loader_kwargs,
)
# will only have one minibatch
for batch in dl:
tensor_dict = batch
for k, v in tensor_dict.items():
tensor_dict[k] = v.to(device)
return tensor_dict
else:
return None
def _make_dataloader(self, tensor_dict: Dict[str, torch.Tensor], shuffle):
if tensor_dict is None:
return None
dataset = _DeviceBackedDataset(tensor_dict)
indices = np.arange(len(dataset))
bs = self.batch_size if self.batch_size is not None else len(indices)
sampler = BatchSampler(shuffle=shuffle, indices=indices, batch_size=bs)
return DataLoader(dataset, sampler=sampler, batch_size=None)
def train_dataloader(self):
return self._make_dataloader(self.train_tensor_dict, self.shuffle)
def test_dataloader(self):
return self._make_dataloader(self.test_tensor_dict, self.shuffle_test_val)
def val_dataloader(self):
return self._make_dataloader(self.val_tensor_dict, self.shuffle_test_val)
class _DeviceBackedDataset(Dataset):
def __init__(self, tensor_dict: Dict[str, torch.Tensor]):
self.data = tensor_dict
def __getitem__(self, idx: List[int]) -> Dict[str, torch.Tensor]:
return_dict = {}
for key, value in self.data.items():
return_dict[key] = value[idx]
return return_dict
def __len__(self):
for _, value in self.data.items():
return len(value)
|
html_parsing/tolko-tebe.ru_uspeykupit.py | DazEB2/SimplePyScripts | 117 | 12679218 | <gh_stars>100-1000
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
from urllib.parse import urljoin
import requests
def wait(days=0, seconds=0, microseconds=0, milliseconds=0, minutes=0, hours=0, weeks=0):
from datetime import timedelta, datetime
from itertools import cycle
import sys
import time
try:
progress_bar = cycle('|/-\\|/-\\')
today = datetime.today()
timeout_date = today + timedelta(
days=days, seconds=seconds, microseconds=microseconds,
milliseconds=milliseconds, minutes=minutes, hours=hours, weeks=weeks
)
def str_timedelta(td: timedelta) -> str:
td = str(td)
# Remove ms
# 0:01:40.123000 -> 0:01:40
if '.' in td:
td = td[:td.rindex('.')]
# 0:01:40 -> 00:01:40
if td.startswith('0:'):
td = '00:' + td[2:]
return td
while today <= timeout_date:
left = timeout_date - today
left = str_timedelta(left)
print('\r' + ' ' * 100 + '\r', end='')
print('[{}] Time left to wait: {}'.format(next(progress_bar), left), end='')
sys.stdout.flush()
# Delay 1 seconds
time.sleep(1)
today = datetime.today()
print('\r' + ' ' * 100 + '\r', end='')
except KeyboardInterrupt:
print()
print('Waiting canceled')
session = requests.session()
session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0'
session.get('https://tolko-tebe.ru/')
headers = {
'X-Requested-With': 'XMLHttpRequest',
}
while True:
rs = session.post('https://tolko-tebe.ru/uspeykupit', headers=headers)
data = rs.json()
title = data['title']
price = data['price']
price_action = data['price_action']
mins = data['mins']
secs = data['secs']
time_left = f"{mins}:{secs}"
url_product = urljoin(rs.url, '/product/' + data['path'])
print(
f"{title!r}\n"
f" Цена {price} ₽, со скидкой {price_action} ₽\n"
f" {url_product}\n"
f" Осталось: {time_left}\n"
# f" raw_data: {data}\n"
)
wait(minutes=mins, seconds=secs)
# OUTPUT example:
# 'SKINLITE Очищающая маска, стягивающая поры'
# Цена 385 ₽, со скидкой 193 ₽
# https://tolko-tebe.ru/product/skinlite-ochischayuschaya-maska-styagivayuschaya-pory
# Осталось: 12:47
|
tests/test_scripts.py | center-for-threat-informed-defense/attack-control-framework-mappings | 269 | 12679222 | import os
import pathlib
import subprocess
import sys
import pytest
@pytest.fixture()
def attack_domain():
return "enterprise-attack"
@pytest.fixture()
def dir_location():
cwd = os.getcwd()
if "tests" in cwd:
return os.path.dirname(cwd)
else:
return cwd
@pytest.mark.parametrize("attack_version", ["v8.2", "v9.0"])
@pytest.mark.parametrize("rev", ["nist800-53-r4", "nist800-53-r5"])
def test_list_mappings(dir_location, attack_domain, attack_version, rev):
"""Tests list_mappings.py with both framework entries"""
rx_controls = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-controls.json")
rx_mappings = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-mappings.json")
output_location = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
f"{rev}-mappings.xlsx")
script_location = f"{dir_location}/src/list_mappings.py"
child_process = subprocess.Popen([
sys.executable, script_location,
"-controls", str(rx_controls),
"-mappings", str(rx_mappings),
"-domain", attack_domain,
"-version", attack_version,
"-output", str(output_location),
])
child_process.wait(timeout=240)
assert child_process.returncode == 0
@pytest.mark.parametrize("attack_version", ["v8.2", "v9.0"])
@pytest.mark.parametrize("rev", ["nist800-53-r4", "nist800-53-r5"])
def test_mappings_to_heatmaps(dir_location, attack_domain, attack_version, rev):
"""Tests mappings_to_heatmaps.py with both framework entries"""
rx_controls = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-controls.json")
rx_mappings = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-mappings.json")
output_location = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"layers")
script_location = f"{dir_location}/src/mappings_to_heatmaps.py"
child_process = subprocess.Popen([
sys.executable, script_location,
"-framework", rev,
"-controls", str(rx_controls),
"-mappings", str(rx_mappings),
"-domain", attack_domain,
"-version", attack_version,
"-output", str(output_location),
"--clear",
"--build-directory",
])
child_process.wait(timeout=90)
assert child_process.returncode == 0
@pytest.mark.parametrize("attack_version", ["v8.2", "v9.0"])
@pytest.mark.parametrize("rev", ["nist800-53-r4", "nist800-53-r5"])
def test_substitute(dir_location, attack_domain, attack_version, rev):
"""Tests substitute.py with both frameworks"""
rx_controls = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-controls.json")
rx_mappings = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-mappings.json")
output_location = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-enterprise-attack.json")
script_location = f"{dir_location}/src/substitute.py"
child_process = subprocess.Popen([
sys.executable, script_location,
"-controls", str(rx_controls),
"-mappings", str(rx_mappings),
"-domain", attack_domain,
"-version", attack_version,
"-output", str(output_location),
"--allow-unmapped",
])
child_process.wait(timeout=90)
assert child_process.returncode == 0
def test_make(dir_location):
"""Test the main make.py script"""
script_location = f"{dir_location}/src/make.py"
child_process = subprocess.Popen([
sys.executable, script_location,
])
child_process.wait(timeout=1080)
assert child_process.returncode == 0
@pytest.mark.parametrize("attack_version", ["v8.2", "v9.0"])
@pytest.mark.parametrize("rev", ["nist800-53-r4", "nist800-53-r5"])
def test_parse_framework(dir_location, attack_version, rev):
"""Tests parse.py with both frameworks"""
rx_input_controls = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"input", f"{rev}-controls.tsv")
rx_input_mappings = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"input", f"{rev}-mappings.tsv")
rx_output_controls = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-controls.json")
rx_output_mappings = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"stix", f"{rev}-mappings.json")
config_location = pathlib.Path(dir_location, "frameworks", f"ATT&CK-{attack_version}", rev,
"input", "config.json")
script_location = f"{dir_location}/frameworks/ATT&CK-{attack_version}/{rev}/parse.py"
child_process = subprocess.Popen([
sys.executable, script_location,
"-input-controls", str(rx_input_controls),
"-input-mappings", str(rx_input_mappings),
"-output-controls", str(rx_output_controls),
"-output-mappings", str(rx_output_mappings),
"-config-location", str(config_location),
])
child_process.wait(timeout=90)
assert child_process.returncode == 0
|
misc/l2net/convert_l2net_weights_matconv_pytorch.py | mihaidusmanu/image-matching-benchmark-baselines | 103 | 12679228 | <reponame>mihaidusmanu/image-matching-benchmark-baselines
import numpy as np
import scipy.io as sio
import torch
import torch.nn.init
from misc.l2net.l2net_model import L2Net
eps = 1e-10
def check_ported(l2net_model, test_patch, img_mean):
test_patch = test_patch.transpose(3, 2, 0, 1)-img_mean
desc = l2net_model(torch.from_numpy(test_patch))
print(desc)
return desc
if __name__ == '__main__':
path_to_l2net_weights = 'descriptors/sfm-evaluation-benchmarking/third_party/l2net/matlab/L2Net-LIB+.mat'
l2net_weights = sio.loadmat(path_to_l2net_weights)
l2net_model = L2Net()
l2net_model.eval()
new_state_dict = l2net_model.state_dict().copy()
conv_layers, bn_layers = {}, {}
all_layer_weights = l2net_weights['net']['layers'][0][0][0]
img_mean = l2net_weights['pixMean']
conv_layers_to_track, bn_layers_to_track = [0,3,6,9,12,15,18], \
[1,4,7,10,13,16,19]
conv_i, bn_i = 0,0
for layer in all_layer_weights:
if 'weights' not in layer.dtype.names:
continue
layer_name = layer[0][0][0][0]
layer_value = layer['weights'][0][0][0]
if layer_name == 'conv':
conv_layers[conv_layers_to_track[conv_i]] = layer_value
conv_i+=1
elif layer_name == 'bnormPair':
bn_layers[bn_layers_to_track[bn_i]] = layer_value
bn_i+=1
for key, value in new_state_dict.items():
layer_number = int(key.split('.')[1])
if layer_number in conv_layers.keys():
if 'weight' in key:
new_state_dict[key] = torch.from_numpy(conv_layers[layer_number][0].transpose((3,2,0,1)))
elif 'bias' in key:
new_state_dict[key] = torch.from_numpy(conv_layers[layer_number][1]).squeeze()
elif layer_number in bn_layers.keys():
if 'running_mean' in key:
new_state_dict[key] = torch.from_numpy(np.array([x[0] for x in bn_layers[layer_number][2]])).squeeze()
elif 'running_var' in key:
new_state_dict[key] = torch.from_numpy(np.array([x[1] for x in bn_layers[layer_number][2]] )** 2 -eps).squeeze()
elif 'weight' in key:
new_state_dict[key] = torch.from_numpy(np.ones(value.size()[0])).squeeze()
else:
continue
l2net_model.load_state_dict(new_state_dict)
l2net_model.eval()
torch.save(l2net_model.state_dict(),'l2net_ported_weights_lib+.pth')
# compare desc on test patch with matlab implementation
# test_patch_batch = sio.loadmat('test_batch_img.mat')['testPatch']
# check_ported(l2net_model, test_patch_batch, img_mean)
#
# test_patch_one = sio.loadmat('test_one.mat')['testPatch']
# check_ported(l2net_model, np.expand_dims(np.expand_dims(test_patch_one, axis=2),axis=2), img_mean)
|
openpyscad/modifiers.py | GothAck/openpyscad | 105 | 12679234 | <reponame>GothAck/openpyscad<gh_stars>100-1000
# -*- coding: utf-8 -*-
class Modifier(object):
def __init__(self):
self.is_disable = False
self.is_show_only = False
self.is_debug = False
self.is_transparent = False
def turn_on_disable(self):
self.is_disable = True
def turn_off_disable(self):
self.is_disable = False
def turn_on_show_only(self):
self.is_show_only = True
def turn_off_show_only(self):
self.is_show_only = False
def turn_on_debug(self):
self.is_debug = True
def turn_off_debug(self):
self.is_debug = False
def turn_on_transparent(self):
self.is_transparent = True
def turn_off_transparent(self):
self.is_transparent = False
def get_prefix(self):
prefix = ''
if self.is_disable:
prefix += '*'
if self.is_show_only:
prefix += '!'
if self.is_debug:
prefix += '#'
if self.is_transparent:
prefix += '%'
return prefix
class ModifierMixin(object):
def __init__(self):
super(ModifierMixin, self).__init__()
self.mod = Modifier()
def turn_on_disable(self):
self.mod.is_disable = True
return self
def turn_off_disable(self):
self.mod.is_disable = False
return self
def turn_on_show_only(self):
self.mod.is_show_only = True
return self
def turn_off_show_only(self):
self.mod.is_show_only = False
return self
def turn_on_debug(self):
self.mod.is_debug = True
return self
def turn_off_debug(self):
self.mod.is_debug = False
return self
def turn_on_transparent(self):
self.mod.is_transparent = True
return self
def turn_off_transparent(self):
self.mod.is_transparent = False
return self
# Shorthand
def disable(self):
return self.turn_on_disable()
def show_only(self):
return self.turn_on_show_only()
def debug(self):
return self.turn_on_debug()
def transparent(self):
return self.turn_on_transparent()
|
chirp/plugins/events/__init__.py | dhanizael/CHIRP | 1,080 | 12679253 | """Event plugin initializer."""
from . import scan
REQUIRED_OS = "Windows"
REQUIRED_ADMIN = True
entrypoint = scan.run
|
train/tasks/segmentation/dataset/coco/parser.py | hamzaMahdi/bonnetal | 231 | 12679265 | # This file is covered by the LICENSE file in the root of this project.
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import os
import numpy as np
from PIL import Image
import random
import torchvision.transforms.functional as TF
import cv2
'''
means(rgb): [0.47037394 0.44669544 0.40731883]
stds(rgb): [0.27876515 0.27429348 0.28861644]
Num of pixels: 20354514743
Frequency: [1.03595582e-01 8.69684146e-02 1.56773018e-03 5.82611153e-03
4.81058803e-03 3.16223407e-03 7.10428246e-03 7.05528129e-03
5.76380800e-03 2.61561927e-03 6.43652977e-04 1.06484818e-03
1.07875453e-03 6.98690299e-04 3.30846713e-03 1.65507630e-03
6.01311471e-03 4.48253614e-03 3.37169861e-03 1.84147500e-03
2.59677750e-03 4.60398424e-03 1.72642509e-03 3.25079452e-03
3.17922092e-03 9.28004241e-04 3.00187903e-03 1.02122941e-03
7.74191387e-04 3.01174387e-03 3.52895713e-04 3.00067384e-04
3.31869518e-04 1.49010479e-04 6.79291802e-04 1.38228842e-04
1.80938973e-04 5.82766927e-04 1.16591352e-03 5.55644934e-04
1.83246594e-03 9.64564533e-04 2.68603416e-03 3.53508157e-04
4.86584039e-04 3.04124273e-04 6.10763335e-03 2.51745687e-03
1.19416608e-03 3.49547734e-03 1.43915212e-03 1.98661498e-03
8.55161482e-04 1.22814719e-03 8.29490195e-03 2.09027995e-03
3.95652007e-03 6.19389573e-03 5.21590882e-03 2.07798941e-03
9.07128538e-03 2.41144264e-02 3.08866224e-03 3.29269545e-03
3.44996375e-03 2.17966680e-04 5.69893272e-04 1.33344903e-03
1.06328032e-03 9.01832455e-04 3.21914572e-03 5.66035602e-05
1.64377842e-03 3.49153060e-03 2.07557215e-03 1.33823711e-03
1.73024557e-03 3.61442810e-04 3.16915293e-03 3.26746183e-05
1.69843597e-04 2.24706580e-03 1.08037029e-03 1.15556594e-03
2.19738081e-03 2.83867548e-03 4.58330597e-03 6.13085488e-03
5.53305060e-03 1.95223391e-03 1.24932391e-03 2.50343202e-03
4.28674371e-03 1.36921250e-03 3.32965639e-03 1.77840698e-03
5.10465080e-04 2.04364749e-03 1.78148449e-02 2.76140555e-03
5.15718043e-03 2.26026582e-02 1.41155564e-03 9.53189813e-03
2.24532113e-02 2.74807151e-03 1.89481003e-02 1.06579298e-03
7.92184791e-04 7.43852368e-04 5.30637362e-03 2.23005552e-03
8.45400979e-03 6.19471526e-03 4.12920107e-03 1.70490166e-03
9.71786370e-03 6.47590623e-02 1.39815155e-02 8.92733677e-03
8.67340285e-02 8.37997595e-03 1.41617307e-02 1.35923816e-02
2.34834311e-02 7.09260706e-03 4.15174260e-02 1.33029928e-02
4.80344372e-03 7.12591456e-03 3.01482646e-02 4.35955532e-03
6.39422134e-02 6.29973913e-03]
********************************************************************************
Log strategy
Weights: [3.30289772 3.44347075 4.45638856 4.38993873 4.40558454 4.43124634
4.3704214 4.37116607 4.39089505 4.43982977 4.47110532 4.46438403
4.4641625 4.47022578 4.42895632 4.45500307 4.38707112 4.4106653
4.42796692 4.45204959 4.4401263 4.40878283 4.45387203 4.42985916
4.43098019 4.46656527 4.43376055 4.46507904 4.46901983 4.43360579
4.47575828 4.47660484 4.47609518 4.47902746 4.47053574 4.47920049
4.47851516 4.47207879 4.4627746 4.47251257 4.45219224 4.46598228
4.43872198 4.47574847 4.47361754 4.47653982 4.3856233 4.44137513
4.46232492 4.42603155 4.45842983 4.44975287 4.46772733 4.46178419
4.35241406 4.44811407 4.41883936 4.38430288 4.39932503 4.44830829
4.34076057 4.12794364 4.43239948 4.42920318 4.42674297 4.4779212
4.47228467 4.4601095 4.464409 4.46698271 4.43035479 4.4805109
4.45518222 4.42609323 4.44834649 4.46003338 4.45381149 4.47562135
4.43113793 4.48089522 4.47869317 4.44563805 4.46413676 4.46293932
4.44642236 4.43632268 4.40910322 4.38526776 4.3944411 4.45029668
4.46144729 4.44159602 4.41370389 4.45954104 4.42862471 4.45304841
4.47323538 4.4488511 4.21416693 4.43753689 4.40023077 4.14827356
4.45886822 4.33387961 4.15029549 4.4377465 4.19835921 4.46436897
4.46873253 4.46950434 4.39793066 4.44590653 4.35002018 4.38429034
4.41615226 4.45421316 4.33110842 3.65425719 4.26863963 4.34291598
3.44555095 4.3511337 4.26604345 4.27425755 4.13640191 4.37059881
3.90903173 4.27844617 4.40569505 4.37009275 4.04897801 4.41257335
3.66257514 4.38268395]
Linear strategy
Weights: [0.89640442 0.91303159 0.99843227 0.99417389 0.99518941 0.99683777
0.99289572 0.99294472 0.99423619 0.99738438 0.99935635 0.99893515
0.99892125 0.99930131 0.99669153 0.99834492 0.99398689 0.99551746
0.9966283 0.99815853 0.99740322 0.99539602 0.99827357 0.99674921
0.99682078 0.999072 0.99699812 0.99897877 0.99922581 0.99698826
0.9996471 0.99969993 0.99966813 0.99985099 0.99932071 0.99986177
0.99981906 0.99941723 0.99883409 0.99944436 0.99816753 0.99903544
0.99731397 0.99964649 0.99951342 0.99969588 0.99389237 0.99748254
0.99880583 0.99650452 0.99856085 0.99801339 0.99914484 0.99877185
0.9917051 0.99790972 0.99604348 0.9938061 0.99478409 0.99792201
0.99092871 0.97588557 0.99691134 0.9967073 0.99655004 0.99978203
0.99943011 0.99866655 0.99893672 0.99909817 0.99678085 0.9999434
0.99835622 0.99650847 0.99792443 0.99866176 0.99826975 0.99963856
0.99683085 0.99996733 0.99983016 0.99775293 0.99891963 0.99884443
0.99780262 0.99716132 0.99541669 0.99386915 0.99446695 0.99804777
0.99875068 0.99749657 0.99571326 0.99863079 0.99667034 0.99822159
0.99948953 0.99795635 0.98218516 0.99723859 0.99484282 0.97739734
0.99858844 0.9904681 0.97754679 0.99725193 0.9810519 0.99893421
0.99920782 0.99925615 0.99469363 0.99776994 0.99154599 0.99380528
0.9958708 0.9982951 0.99028214 0.93524094 0.98601848 0.99107266
0.91326597 0.99162002 0.98583827 0.98640762 0.97651657 0.99290739
0.95848257 0.98669701 0.99519656 0.99287409 0.96985174 0.99564044
0.93605779 0.99370026]
Squared strategy
Weights: [0.80354088 0.83362668 0.996867 0.98838172 0.99040197 0.99368553
0.98584191 0.98593921 0.98850561 0.9947756 0.99871311 0.99787144
0.99784365 0.99860311 0.99339401 0.99669259 0.98800993 0.99105502
0.99326797 0.99632044 0.99481319 0.99081323 0.99655013 0.99350898
0.99365167 0.99814485 0.99400525 0.99795858 0.99845222 0.99398558
0.99929433 0.99939996 0.99933637 0.999702 0.99864188 0.99972356
0.99963815 0.99883481 0.99766953 0.99888902 0.99633843 0.9980718
0.99463515 0.99929311 0.99902707 0.99939184 0.98782204 0.99497142
0.99761309 0.99302126 0.99712377 0.99603072 0.99829041 0.99754521
0.983479 0.99582381 0.99210261 0.98765057 0.98959539 0.99584834
0.98193972 0.95235265 0.99383222 0.99342545 0.99311197 0.99956411
0.99886054 0.99733488 0.99787457 0.99819715 0.99357207 0.9998868
0.99671515 0.99302913 0.99585316 0.99732532 0.9965425 0.99927725
0.99367174 0.99993465 0.99966034 0.99551092 0.99784043 0.9976902
0.99561007 0.99433071 0.99085439 0.98777588 0.98896451 0.99609934
0.99750291 0.9949994 0.99144489 0.99726345 0.99335177 0.99644635
0.99897933 0.99591688 0.96468768 0.99448481 0.98971224 0.95530556
0.99717888 0.98102706 0.95559772 0.99451141 0.96246283 0.99786955
0.99841626 0.99851285 0.98941541 0.99554486 0.98316345 0.98764894
0.99175865 0.9965931 0.98065871 0.87467561 0.97223245 0.98222502
0.83405473 0.98331027 0.97187709 0.97299999 0.95358461 0.98586509
0.91868884 0.97357098 0.99041619 0.98579895 0.94061239 0.9912999
0.87620418 0.98744021]
1/w strategy
Weights: [9.65292034e+00 1.14984261e+01 6.37860798e+02 1.71640773e+02
2.07874363e+02 3.16231125e+02 1.40759971e+02 1.41737592e+02
1.73496110e+02 3.82317177e+02 1.55360808e+03 9.39092189e+02
9.26986355e+02 1.43122881e+03 3.02253865e+02 6.04198100e+02
1.66302887e+02 2.23087497e+02 2.96585535e+02 5.43039993e+02
3.85091194e+02 2.17202705e+02 5.79228263e+02 3.07616159e+02
3.14541481e+02 1.07756967e+03 3.33123573e+02 9.79202324e+02
1.29165359e+03 3.32032445e+02 2.83361805e+03 3.33247373e+03
3.01314165e+03 6.71048707e+03 1.47209973e+03 7.23385690e+03
5.52641986e+03 1.71592243e+03 8.57689188e+02 1.79967807e+03
5.45709757e+02 1.03672652e+03 3.72294698e+02 2.82870902e+03
2.05510121e+03 3.28802141e+03 1.63729272e+02 3.97224691e+02
8.37397449e+02 2.86083142e+02 6.94848751e+02 5.03366266e+02
1.16935611e+03 8.14228025e+02 1.20555831e+02 4.78402530e+02
2.52746721e+02 1.61449018e+02 1.91720775e+02 4.81232091e+02
1.10237839e+02 4.14689352e+01 3.23763716e+02 3.03701627e+02
2.89857278e+02 4.58764672e+03 1.75468373e+03 7.49929301e+02
9.40476913e+02 1.10884112e+03 3.10640456e+02 1.76636127e+04
6.08350801e+02 2.86406522e+02 4.81792541e+02 7.47246149e+02
5.77949302e+02 2.76661288e+03 3.15540735e+02 3.05954315e+04
5.88742315e+03 4.45022815e+02 9.25600003e+02 8.65369349e+02
4.55085184e+02 3.52275730e+02 2.18182645e+02 1.63109124e+02
1.80731800e+02 5.12231075e+02 8.00426523e+02 3.99450034e+02
2.33276756e+02 7.30341490e+02 3.00330389e+02 5.62297827e+02
1.95895948e+03 4.89318785e+02 5.61329299e+01 3.62133109e+02
1.93904029e+02 4.42425642e+01 7.08433228e+02 1.04910789e+02
4.45370393e+01 3.63890226e+02 5.27757115e+01 9.38259711e+02
1.26231580e+03 1.34433471e+03 1.88452263e+02 4.48417318e+02
1.18286924e+02 1.61427660e+02 2.42177012e+02 5.86540654e+02
1.02903169e+02 1.54418518e+01 7.15229535e+01 1.12015364e+02
1.15294989e+01 1.19331942e+02 7.06127883e+01 7.35705703e+01
4.25831970e+01 1.40991681e+02 2.40862658e+01 7.51709983e+01
2.08183540e+02 1.40332667e+02 3.31693940e+01 2.29380667e+02
1.56391184e+01 1.58736480e+02]
'''
IMG_EXT = ['.jpg']
LBL_EXT = ['.png']
SCALES = [1.0]
class ToLabel:
def __call__(self, label):
label = np.array(label)
return torch.from_numpy(label).long()
def load_image(file):
return Image.open(file)
def load_label(file):
return Image.open(file)
def is_image(filename):
return any(filename.endswith(ext) for ext in IMG_EXT)
def is_label(filename):
return any(filename.endswith(ext) for ext in LBL_EXT)
def resize_and_fit(img, new_h, new_w, img_type):
# check img_type
assert(img_type is "RGB" or img_type is "L")
# get current size
w, h = img.size
# generate new img
out_img = Image.new(img_type, (new_w, new_h))
# now do size magic
curr_asp_ratio = h / w
new_asp_ratio = new_h / new_w
# do resizing according to aspect ratio
if curr_asp_ratio > new_asp_ratio:
# fit h to h
new_tmp_h = new_h
new_tmp_w = int(w * new_h / h)
else:
# fit w to w
new_tmp_w = new_w
new_tmp_h = int(h * new_w / w)
# resize the original image
if img_type is "RGB":
tmp_img = img.resize((new_tmp_w, new_tmp_h), Image.BILINEAR)
else:
tmp_img = img.resize((new_tmp_w, new_tmp_h), Image.NEAREST)
# put in padded image
out_img.paste(tmp_img, (int((new_w-new_tmp_w)//2),
int((new_h-new_tmp_h)//2)))
return out_img
class MS_COCO(Dataset):
def __init__(self, root, subset, h, w, means, stds, crop_h=None, crop_w=None):
self.images_root = os.path.join(root, subset + "2017")
self.labels_root = os.path.join(root,
"annotations/panoptic_"+subset+"2017_remap")
self.subset = subset
assert self.subset == 'train' or self.subset == 'val'
self.w = w
self.h = h
self.means = means
self.stds = stds
if self.subset == 'train':
self.crop_h = crop_h
self.crop_w = crop_w
# check that parameters make sense
assert(self.crop_h <= self.h)
assert(self.crop_w <= self.w)
self.resize_crop_img = transforms.Resize((self.crop_h, self.crop_w),
Image.BILINEAR)
self.resize_crop_lbl = transforms.Resize((self.crop_h, self.crop_w),
Image.NEAREST)
print("Images from: ", self.images_root)
print("Labels from: ", self.labels_root)
self.filenames = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(self.images_root)) for f in fn if is_image(f)]
self.filenames.sort()
self.filenamesGt = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(self.labels_root)) for f in fn if is_label(f)]
self.filenamesGt.sort()
assert len(self.filenames) == len(self.filenamesGt)
# transformations for images
self.jitter = transforms.ColorJitter(brightness=0.05,
contrast=0.05,
saturation=0.05,
hue=0.05)
self.h_flip = TF.hflip
self.crop_param = transforms.RandomCrop.get_params
self.crop = TF.crop
# transformations for tensors
self.norm = transforms.Normalize(mean=self.means, std=self.stds)
self.tensorize_img = transforms.ToTensor()
self.tensorize_lbl = ToLabel()
def __getitem__(self, index):
filename = self.filenames[index]
filenameGt = self.filenamesGt[index]
with open(filename, 'rb') as f:
image = load_image(f).convert('RGB')
with open(filenameGt, 'rb') as f:
label = load_label(f).convert('L')
# resize (resizing is different if we are in train or valid mode)
# generate resizer
if self.subset == 'train':
new_h = self.crop_h
new_w = self.crop_w
else:
new_h = self.h
new_w = self.w
image = resize_and_fit(image, new_h, new_w, "RGB")
label = resize_and_fit(label, new_h, new_w, "L")
# augment data and tensorize
if self.subset == 'train':
# crop randomly sized patches
scale = SCALES[random.randrange(len(SCALES))]
size = (int(self.crop_h * scale), int(self.crop_w * scale))
i, j, h, w = self.crop_param(image, output_size=size)
image = self.resize_crop_img(self.crop(image, i, j, h, w))
label = self.resize_crop_lbl(self.crop(label, i, j, h, w))
# flip
if random.random() > 0.5:
image = self.h_flip(image)
label = self.h_flip(label)
# jitter
if random.random() > 0.5:
image = self.jitter(image)
# show (set workers = 0)
# cv2.imshow("train_img", np.array(image)[:, :, ::-1])
# cv2.imshow("train_lbl", LUT[np.array(label)].astype(np.float32) / 21.0)
# cv2.waitKey(0)
# if self.subset == 'val':
# show (set workers = 0)
# cv2.imshow("valid_img", np.array(image)[:, :, ::-1])
# cv2.waitKey(0)
# tensorize
image = self.tensorize_img(image)
label = self.tensorize_lbl(label)
# normalize
image = self.norm(image)
return image, label
def __len__(self):
return len(self.filenames)
class Parser():
# standard conv, BN, relu
def __init__(self, img_prop, img_means, img_stds, classes, train, location=None, batch_size=None, crop_prop=None, workers=2):
super(Parser, self).__init__()
self.img_prop = img_prop
self.img_means = img_means
self.img_stds = img_stds
self.classes = classes
self.train = train
if self.train:
# if I am training, get the dataset
self.location = location
self.batch_size = batch_size
self.crop_prop = crop_prop
self.workers = workers
# Data loading code
self.train_dataset = MS_COCO(root=self.location,
subset='train',
h=self.img_prop["height"],
w=self.img_prop["width"],
means=self.img_means,
stds=self.img_stds,
crop_h=self.crop_prop["height"],
crop_w=self.crop_prop["width"])
self.trainloader = torch.utils.data.DataLoader(self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.trainloader) > 0
self.trainiter = iter(self.trainloader)
# calculate validation batch from train batch and image sizes
factor_val_over_train = float(self.img_prop["height"] * self.img_prop["width"]) / float(
self.crop_prop["height"] * self.crop_prop["width"])
self.val_batch_size = max(
1, int(self.batch_size / factor_val_over_train))
# if gpus are available make val_batch_size at least the number of gpus
if torch.cuda.is_available() and torch.cuda.device_count() > 1:
self.val_batch_size = max(
self.val_batch_size, torch.cuda.device_count())
print("Inference batch size: ", self.val_batch_size)
self.valid_dataset = MS_COCO(root=self.location,
subset='val',
h=self.img_prop["height"],
w=self.img_prop["width"],
means=self.img_means,
stds=self.img_stds)
self.validloader = torch.utils.data.DataLoader(self.valid_dataset,
batch_size=self.val_batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.validloader) > 0
self.validiter = iter(self.validloader)
def get_train_batch(self):
images, labels = self.trainiter.next()
return images, labels
def get_train_set(self):
return self.trainloader
def get_valid_batch(self):
images, labels = self.validiter.next()
return images, labels
def get_valid_set(self):
return self.validloader
def get_train_size(self):
return len(self.trainloader)
def get_valid_size(self):
return len(self.validloader)
def get_img_size(self):
h = self.img_prop["height"]
w = self.img_prop["width"]
d = self.img_prop["depth"]
return h, w, d
def get_n_classes(self):
return len(self.classes)
def get_class_string(self, idx):
return self.classes[idx]
def get_means_stds(self):
return self.img_means, self.img_stds
|
test/sagemaker_tests/pytorch/inference/integration/sagemaker/test_neuron_hosting.py | johnbensnyder/deep-learning-containers | 383 | 12679279 | <filename>test/sagemaker_tests/pytorch/inference/integration/sagemaker/test_neuron_hosting.py
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import os
import numpy as np
import json
import pytest
import sagemaker
from sagemaker.pytorch import PyTorchModel
from sagemaker.serializers import IdentitySerializer
from sagemaker.deserializers import BytesDeserializer
from ...integration import model_neuron_dir, resnet_neuron_script, resnet_neuron_input, resnet_neuron_image_list
from ...integration.sagemaker.timeout import timeout_and_delete_endpoint
from .... import invoke_pytorch_helper_function
@pytest.mark.model("resnet")
@pytest.mark.processor("neuron")
@pytest.mark.neuron_test
def test_neuron_hosting(framework_version, ecr_image, instance_type, sagemaker_regions):
instance_type = instance_type or 'ml.inf1.xlarge'
model_dir = os.path.join(model_neuron_dir, 'model-resnet.tar.gz')
function_args = {
'framework_version': framework_version,
'instance_type': instance_type,
'model_dir': model_dir,
'resnet_script': resnet_neuron_script,
'resnet_neuron_input': resnet_neuron_input,
'resnet_neuron_image_list': resnet_neuron_image_list,
}
invoke_pytorch_helper_function(ecr_image, sagemaker_regions, _test_resnet_distributed, function_args)
def _test_resnet_distributed(
ecr_image, sagemaker_session, framework_version, instance_type, model_dir, resnet_script, resnet_neuron_input, resnet_neuron_image_list, accelerator_type=None
):
endpoint_name = sagemaker.utils.unique_name_from_base("sagemaker-pytorch-serving")
model_data = sagemaker_session.upload_data(
path=model_dir,
key_prefix="sagemaker-pytorch-serving/models",
)
pytorch = PyTorchModel(
model_data=model_data,
role='SageMakerRole',
entry_point=resnet_script,
framework_version=framework_version,
image_uri=ecr_image,
sagemaker_session=sagemaker_session,
model_server_workers=4,
env={"AWS_NEURON_VISIBLE_DEVICES": "ALL", "NEURONCORE_GROUP_SIZES":"1", "NEURON_RT_VISIBLE_CORES": "0", "NEURON_RT_LOG_LEVEL":"5", "NEURON_RTD_ADDRESS":"run"}
)
with timeout_and_delete_endpoint(endpoint_name, sagemaker_session, minutes=30):
predictor = pytorch.deploy(
initial_instance_count=1,
instance_type=instance_type,
endpoint_name=endpoint_name,
serializer=IdentitySerializer(),
deserializer=BytesDeserializer(),
)
with open(resnet_neuron_input, "rb") as f:
payload = f.read()
output = predictor.predict(data=payload)
print(output)
result = json.loads(output.decode())
print(result)
# Load names for ImageNet classes
object_categories = {}
with open(resnet_neuron_image_list, "r") as f:
for line in f:
key, val = line.strip().split(":")
object_categories[key] = val
assert("cat" in object_categories[str(np.argmax(result))]) |
isegm/model/losses.py | supervisely-ecosystem/ritm-training | 278 | 12679284 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from isegm.utils import misc
class NormalizedFocalLossSigmoid(nn.Module):
def __init__(self, axis=-1, alpha=0.25, gamma=2, max_mult=-1, eps=1e-12,
from_sigmoid=False, detach_delimeter=True,
batch_axis=0, weight=None, size_average=True,
ignore_label=-1):
super(NormalizedFocalLossSigmoid, self).__init__()
self._axis = axis
self._alpha = alpha
self._gamma = gamma
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
self._from_logits = from_sigmoid
self._eps = eps
self._size_average = size_average
self._detach_delimeter = detach_delimeter
self._max_mult = max_mult
self._k_sum = 0
self._m_max = 0
def forward(self, pred, label):
one_hot = label > 0.5
sample_weight = label != self._ignore_label
if not self._from_logits:
pred = torch.sigmoid(pred)
alpha = torch.where(one_hot, self._alpha * sample_weight, (1 - self._alpha) * sample_weight)
pt = torch.where(sample_weight, 1.0 - torch.abs(label - pred), torch.ones_like(pred))
beta = (1 - pt) ** self._gamma
sw_sum = torch.sum(sample_weight, dim=(-2, -1), keepdim=True)
beta_sum = torch.sum(beta, dim=(-2, -1), keepdim=True)
mult = sw_sum / (beta_sum + self._eps)
if self._detach_delimeter:
mult = mult.detach()
beta = beta * mult
if self._max_mult > 0:
beta = torch.clamp_max(beta, self._max_mult)
with torch.no_grad():
ignore_area = torch.sum(label == self._ignore_label, dim=tuple(range(1, label.dim()))).cpu().numpy()
sample_mult = torch.mean(mult, dim=tuple(range(1, mult.dim()))).cpu().numpy()
if np.any(ignore_area == 0):
self._k_sum = 0.9 * self._k_sum + 0.1 * sample_mult[ignore_area == 0].mean()
beta_pmax, _ = torch.flatten(beta, start_dim=1).max(dim=1)
beta_pmax = beta_pmax.mean().item()
self._m_max = 0.8 * self._m_max + 0.2 * beta_pmax
loss = -alpha * beta * torch.log(torch.min(pt + self._eps, torch.ones(1, dtype=torch.float).to(pt.device)))
loss = self._weight * (loss * sample_weight)
if self._size_average:
bsum = torch.sum(sample_weight, dim=misc.get_dims_with_exclusion(sample_weight.dim(), self._batch_axis))
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis)) / (bsum + self._eps)
else:
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis))
return loss
def log_states(self, sw, name, global_step):
sw.add_scalar(tag=name + '_k', value=self._k_sum, global_step=global_step)
sw.add_scalar(tag=name + '_m', value=self._m_max, global_step=global_step)
class FocalLoss(nn.Module):
def __init__(self, axis=-1, alpha=0.25, gamma=2,
from_logits=False, batch_axis=0,
weight=None, num_class=None,
eps=1e-9, size_average=True, scale=1.0,
ignore_label=-1):
super(FocalLoss, self).__init__()
self._axis = axis
self._alpha = alpha
self._gamma = gamma
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
self._scale = scale
self._num_class = num_class
self._from_logits = from_logits
self._eps = eps
self._size_average = size_average
def forward(self, pred, label, sample_weight=None):
one_hot = label > 0.5
sample_weight = label != self._ignore_label
if not self._from_logits:
pred = torch.sigmoid(pred)
alpha = torch.where(one_hot, self._alpha * sample_weight, (1 - self._alpha) * sample_weight)
pt = torch.where(sample_weight, 1.0 - torch.abs(label - pred), torch.ones_like(pred))
beta = (1 - pt) ** self._gamma
loss = -alpha * beta * torch.log(torch.min(pt + self._eps, torch.ones(1, dtype=torch.float).to(pt.device)))
loss = self._weight * (loss * sample_weight)
if self._size_average:
tsum = torch.sum(sample_weight, dim=misc.get_dims_with_exclusion(label.dim(), self._batch_axis))
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis)) / (tsum + self._eps)
else:
loss = torch.sum(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis))
return self._scale * loss
class SoftIoU(nn.Module):
def __init__(self, from_sigmoid=False, ignore_label=-1):
super().__init__()
self._from_sigmoid = from_sigmoid
self._ignore_label = ignore_label
def forward(self, pred, label):
label = label.view(pred.size())
sample_weight = label != self._ignore_label
if not self._from_sigmoid:
pred = torch.sigmoid(pred)
loss = 1.0 - torch.sum(pred * label * sample_weight, dim=(1, 2, 3)) \
/ (torch.sum(torch.max(pred, label) * sample_weight, dim=(1, 2, 3)) + 1e-8)
return loss
class SigmoidBinaryCrossEntropyLoss(nn.Module):
def __init__(self, from_sigmoid=False, weight=None, batch_axis=0, ignore_label=-1):
super(SigmoidBinaryCrossEntropyLoss, self).__init__()
self._from_sigmoid = from_sigmoid
self._ignore_label = ignore_label
self._weight = weight if weight is not None else 1.0
self._batch_axis = batch_axis
def forward(self, pred, label):
label = label.view(pred.size())
sample_weight = label != self._ignore_label
label = torch.where(sample_weight, label, torch.zeros_like(label))
if not self._from_sigmoid:
loss = torch.relu(pred) - pred * label + F.softplus(-torch.abs(pred))
else:
eps = 1e-12
loss = -(torch.log(pred + eps) * label
+ torch.log(1. - pred + eps) * (1. - label))
loss = self._weight * (loss * sample_weight)
return torch.mean(loss, dim=misc.get_dims_with_exclusion(loss.dim(), self._batch_axis))
|
certbot-dns-gehirn/certbot_dns_gehirn/_internal/__init__.py | vivithemage/certbot | 16,789 | 12679290 | """Internal implementation of `~certbot_dns_gehirn.dns_gehirn` plugin."""
|
tf_quant_finance/experimental/pricing_platform/framework/market_data/volatility_surface_test.py | slowy07/tf-quant-finance | 3,138 | 12679293 | <filename>tf_quant_finance/experimental/pricing_platform/framework/market_data/volatility_surface_test.py
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for rate_curve.py."""
from absl.testing import parameterized
import tensorflow.compat.v2 as tf
import tf_quant_finance as tff
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
volatility_surface = tff.experimental.pricing_platform.framework.market_data.volatility_surface
dateslib = tff.datetime
core = tff.experimental.pricing_platform.framework.core
InterpolationMethod = core.interpolation_method.InterpolationMethod
# This function can't be moved to SetUp since that would break graph mode
# execution
def build_surface(dim, default_interp=True):
dtype = tf.float64
year = dim * [[2021, 2022, 2023, 2025, 2050]]
month = dim * [[2, 2, 2, 2, 2]]
day = dim * [[8, 8, 8, 8, 8]]
expiries = tff.datetime.dates_from_year_month_day(year, month, day)
valuation_date = [(2020, 6, 24)]
strikes = dim * [[[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510],
[1500, 1550, 1510]]]
volatilities = dim * [[[0.1, 0.12, 0.13],
[0.15, 0.2, 0.15],
[0.1, 0.2, 0.1],
[0.1, 0.2, 0.1],
[0.1, 0.1, 0.3]]]
interpolator = None
if not default_interp:
expiry_times = tf.cast(
tff.datetime.convert_to_date_tensor(
valuation_date).days_until(expiries), dtype=dtype) / 365.0
interpolator_obj = tff.math.interpolation.interpolation_2d.Interpolation2D(
expiry_times, tf.convert_to_tensor(strikes, dtype=dtype),
volatilities)
interpolator = interpolator_obj.interpolate
return volatility_surface.VolatilitySurface(
valuation_date, expiries, strikes, volatilities,
interpolator=interpolator, dtype=dtype)
@test_util.run_all_in_graph_and_eager_modes
class VolatilitySurfaceTest(tf.test.TestCase, parameterized.TestCase):
def test_volatility_1d(self):
vol_surface = build_surface(1)
expiry = tff.datetime.dates_from_tuples(
[(2020, 6, 16), (2021, 6, 1), (2025, 1, 1)])
vols = vol_surface.volatility(
strike=[[1525, 1400, 1570]], expiry_dates=expiry.expand_dims(axis=0))
self.assertAllClose(
self.evaluate(vols),
[[0.14046875, 0.11547945, 0.1]], atol=1e-6)
def test_volatility_2d(self):
vol_surface = build_surface(2)
expiry = tff.datetime.dates_from_ordinals(
[[737592, 737942, 739252],
[737592, 737942, 739252]])
vols = vol_surface.volatility(
strike=[[1525, 1400, 1570], [1525, 1505, 1570]], expiry_dates=expiry)
self.assertAllClose(
self.evaluate(vols),
[[0.14046875, 0.11547945, 0.1],
[0.14046875, 0.12300392, 0.1]], atol=1e-6)
def test_volatility_2d_interpolation(self):
"""Test using externally specified interpolator."""
vol_surface = build_surface(2, False)
expiry = tff.datetime.dates_from_ordinals(
[[737592, 737942, 739252],
[737592, 737942, 739252]])
vols = vol_surface.volatility(
strike=[[1525, 1400, 1570], [1525, 1505, 1570]], expiry_dates=expiry)
self.assertAllClose(
self.evaluate(vols),
[[0.14046875, 0.11547945, 0.1],
[0.14046875, 0.12300392, 0.1]], atol=1e-6)
def test_volatility_2d_floats(self):
vol_surface = build_surface(2)
expiry = tff.datetime.dates_from_ordinals(
[[737592, 737942, 739252],
[737592, 737942, 739252]])
valuation_date = tff.datetime.convert_to_date_tensor([(2020, 6, 24)])
expiries = tf.cast(valuation_date.days_until(expiry),
dtype=vol_surface._dtype) / 365.0
vols = vol_surface.volatility(
strike=[[1525, 1400, 1570], [1525, 1505, 1570]],
expiry_times=expiries)
self.assertAllClose(
self.evaluate(vols),
[[0.14046875, 0.11547945, 0.1],
[0.14046875, 0.12300392, 0.1]], atol=1e-6)
if __name__ == '__main__':
tf.test.main()
|
Python/random-password-generator.py | OluSure/Hacktoberfest2021-1 | 215 | 12679308 | print("Hi, are you having trouble making a password?")
print("let me help you!")
number = input("give me a number from 1-3")
password = <PASSWORD>
animal = input("do you prefer dogs or cats?")
password = number + animal
color=input("what is your favorite color?")
password = color+number+animal
book=input("ok, last question. What is your favorite book?(the longer the title, the longer the password!")
password=book+password
|
test/hlt/pytest/python/test_linux_mqtt_al.py | yuanyi-thu/AIOT- | 128 | 12679313 | <gh_stars>100-1000
import pytest
import sys
import time
import json
import requests
from requests.adapters import HTTPAdapter
from requests import RequestException, ReadTimeout
from test_adapter import ts_call_single
from test_adapter import resource_release
from const import mqtt_device_info
from const import mqtt_testid
from com.huawei.iotplatform.constant.Constant import Constant
from com.huawei.iotplatform.utils.DictUtil import DictUtil
from com.huawei.iotplatform.client.invokeapi.Authentication import Authentication
from com.huawei.iotplatform.client.invokeapi.DeviceManagement import DeviceManagement
from com.huawei.iotplatform.client.invokeapiTest.DeviceManagementTest import DeviceManagementTest
from com.huawei.iotplatform.client.dto.AuthOutDTO import AuthOutDTO
from com.huawei.iotplatform.client.dto.RegDirectDeviceOutDTO import RegDirectDeviceOutDTO
def test_mqtt_al_init():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_INIT, fname, "192.168.1.103", "8883", "test", "test123", "YES", "CAVALID")
print(result)
assert (result)
assert (result.test_id == mqtt_testid.TEST_MQTT_AL_INIT)
assert (result.ret_code == 0)
def test_mqtt_al_install():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_INSTALL, fname)
assert (result)
assert (result.test_id == mqtt_testid.TEST_MQTT_AL_INSTALL)
assert (result.ret_code == 0)
def test_mqtt_al_uninstall():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_UNINSTALL, fname)
assert (result)
assert (result.test_id == mqtt_testid.TEST_MQTT_AL_UNINSTALL)
assert (result.ret_code == 0)
def test_mqtt_al_connect():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_CONNECT, fname)
assert(result)
assert(result.test_id == mqtt_testid.TEST_MQTT_AL_CONNECT)
assert(result.ret_code == 0)
def test_mqtt_al_disconnect():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_DISCONNECT, fname)
assert(result)
assert(result.test_id == mqtt_testid.TEST_MQTT_AL_DISCONNECT)
assert(result.ret_code == 0)
def test_mqtt_al_sub():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_SUBSCRIBLE, fname, "test")
assert(result)
assert(result.test_id == mqtt_testid.TEST_MQTT_AL_SUBSCRIBLE)
assert(result.ret_code == 0)
def test_mqtt_al_unsub():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_UNSUBSCRIBLE, fname, "test")
assert(result)
assert(result.test_id == mqtt_testid.TEST_MQTT_AL_UNSUBSCRIBLE)
assert(result.ret_code == 0)
def test_mqtt_al_pub():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_PBULISH, fname, "test22", "hello world")
assert (result)
assert (result.test_id == mqtt_testid.TEST_MQTT_AL_PBULISH)
assert (result.ret_code == 0)
def test_mqtt_al_checkstatus():
fname = sys._getframe().f_code.co_name
result = ts_call_single(mqtt_testid.TEST_MQTT_AL_CHECKSTATUS, fname)
assert (result)
assert (result.test_id == mqtt_testid.TEST_MQTT_AL_CHECKSTATUS)
assert (result.ret_code == 0)
if __name__ == '__main__':
print("hello world")
test_mqtt_al_init()
test_mqtt_al_install()
test_mqtt_al_connect()
test_mqtt_al_sub()
test_mqtt_al_unsub()
test_mqtt_al_pub()
test_mqtt_al_checkstatus()
test_mqtt_al_disconnect()
test_mqtt_al_uninstall()
|
lib/pymedphys/_experimental/pedromartinez/utils/running_mean.py | ethanio12345/pymedphys | 207 | 12679314 | from pymedphys._imports import numpy as np
def running_mean(x, N):
out = np.zeros_like(x, dtype=np.float64)
dim_len = x.shape[0]
for i in range(dim_len):
if N % 2 == 0:
a, b = i - (N - 1) // 2, i + (N - 1) // 2 + 2
else:
a, b = i - (N - 1) // 2, i + (N - 1) // 2 + 1
# cap indices to min and max indices
a = max(0, a)
b = min(dim_len, b)
out[i] = np.mean(x[a:b])
return out
|
Python/Tests/TestData/SendToInteractive/SelectOutput.py | techkey/PTVS | 404 | 12679333 | print('first')
print('second')
if True:
x = 1
y = 2
print('hi')
|
tests/test_download_and_extract_extracaptiveflask.py | oza6ut0ne/wifipumpkin3 | 911 | 12679353 | import unittest
from wifipumpkin3.core.common.platforms import Linux
import wifipumpkin3.core.utility.constants as C
from wifipumpkin3.core.utility.collection import SettingsINI
import tempfile
import requests
import os
from os import path
from zipfile import ZipFile
class TestDownloadCaptiveFlaskTemplates(unittest.TestCase):
def setUp(self):
url = "https://github.com/mh4x0f/extra-captiveflask/archive/master.zip"
save_path = tempfile.gettempdir() + "/master.zip"
chunk_size = 128
r = requests.get(url, stream=True)
with open(save_path, "wb") as fd:
for chunk in r.iter_content(chunk_size=chunk_size):
fd.write(chunk)
self.assertTrue(path.isfile(save_path))
def test_unzip_file(self):
path_to_zip_file = tempfile.gettempdir() + "/master.zip"
with ZipFile(path_to_zip_file, "r") as zip_ref:
zip_ref.extractall(tempfile.gettempdir())
extracted_filepath = tempfile.gettempdir() + "/extra-captiveflask-master"
self.assertTrue(path.isdir(extracted_filepath))
if __name__ == "__main__":
unittest.main()
|
utils/compute_mean_std.py | tink2123/DBNet.pytorch | 709 | 12679359 | # -*- coding: utf-8 -*-
# @Time : 2019/12/7 14:46
# @Author : zhoujun
import numpy as np
import cv2
import os
import random
from tqdm import tqdm
# calculate means and std
train_txt_path = './train_val_list.txt'
CNum = 10000 # 挑选多少图片进行计算
img_h, img_w = 640, 640
imgs = np.zeros([img_w, img_h, 3, 1])
means, stdevs = [], []
with open(train_txt_path, 'r') as f:
lines = f.readlines()
random.shuffle(lines) # shuffle , 随机挑选图片
for i in tqdm(range(CNum)):
img_path = lines[i].split('\t')[0]
img = cv2.imread(img_path)
img = cv2.resize(img, (img_h, img_w))
img = img[:, :, :, np.newaxis]
imgs = np.concatenate((imgs, img), axis=3)
# print(i)
imgs = imgs.astype(np.float32) / 255.
for i in tqdm(range(3)):
pixels = imgs[:, :, i, :].ravel() # 拉成一行
means.append(np.mean(pixels))
stdevs.append(np.std(pixels))
# cv2 读取的图像格式为BGR,PIL/Skimage读取到的都是RGB不用转
means.reverse() # BGR --> RGB
stdevs.reverse()
print("normMean = {}".format(means))
print("normStd = {}".format(stdevs))
print('transforms.Normalize(normMean = {}, normStd = {})'.format(means, stdevs)) |
corehq/messaging/scheduling/migrations/0009_randomtimedevent.py | dimagilg/commcare-hq | 471 | 12679361 | # Generated by Django 1.11.7 on 2017-12-12 19:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('scheduling', '0008_timedschedule_event_type'),
]
operations = [
migrations.CreateModel(
name='RandomTimedEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order', models.IntegerField()),
('day', models.IntegerField()),
('time', models.TimeField()),
('window_length', models.PositiveIntegerField()),
('custom_content', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scheduling.CustomContent')),
('email_content', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scheduling.EmailContent')),
('ivr_survey_content', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scheduling.IVRSurveyContent')),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='scheduling.TimedSchedule')),
('sms_content', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scheduling.SMSContent')),
('sms_survey_content', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='scheduling.SMSSurveyContent')),
],
options={
'abstract': False,
},
),
]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.