repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
bcbio/bcbio-nextgen
bcbio/ngsalign/rtg.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/ngsalign/rtg.py#L45-L54
def calculate_splits(sdf_file, split_size): """Retrieve """ counts = _sdfstats(sdf_file)["counts"] splits = [] cur = 0 for i in range(counts // split_size + (0 if counts % split_size == 0 else 1)): splits.append("%s-%s" % (cur, min(counts, cur + split_size))) cur += split_size return splits
[ "def", "calculate_splits", "(", "sdf_file", ",", "split_size", ")", ":", "counts", "=", "_sdfstats", "(", "sdf_file", ")", "[", "\"counts\"", "]", "splits", "=", "[", "]", "cur", "=", "0", "for", "i", "in", "range", "(", "counts", "//", "split_size", "+", "(", "0", "if", "counts", "%", "split_size", "==", "0", "else", "1", ")", ")", ":", "splits", ".", "append", "(", "\"%s-%s\"", "%", "(", "cur", ",", "min", "(", "counts", ",", "cur", "+", "split_size", ")", ")", ")", "cur", "+=", "split_size", "return", "splits" ]
Retrieve
[ "Retrieve" ]
python
train
32.6
google/prettytensor
prettytensor/pretty_tensor_methods.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/pretty_tensor_methods.py#L395-L408
def _rapply(input_layer, operation, *op_args, **op_kwargs): """Applies the given operation to this after expanding op_args. Args: input_layer: The input layer for this op. operation: An operation that takes a tensor and the supplied args. *op_args: Extra arguments for operation. **op_kwargs: Keyword arguments for the operation. Returns: A new layer with operation applied. """ op_args = list(op_args) op_args.append(input_layer.tensor) return input_layer.with_tensor(operation(*op_args, **op_kwargs))
[ "def", "_rapply", "(", "input_layer", ",", "operation", ",", "*", "op_args", ",", "*", "*", "op_kwargs", ")", ":", "op_args", "=", "list", "(", "op_args", ")", "op_args", ".", "append", "(", "input_layer", ".", "tensor", ")", "return", "input_layer", ".", "with_tensor", "(", "operation", "(", "*", "op_args", ",", "*", "*", "op_kwargs", ")", ")" ]
Applies the given operation to this after expanding op_args. Args: input_layer: The input layer for this op. operation: An operation that takes a tensor and the supplied args. *op_args: Extra arguments for operation. **op_kwargs: Keyword arguments for the operation. Returns: A new layer with operation applied.
[ "Applies", "the", "given", "operation", "to", "this", "after", "expanding", "op_args", "." ]
python
train
37.428571
alorence/django-modern-rpc
modernrpc/compat.py
https://github.com/alorence/django-modern-rpc/blob/6dc42857d35764b24e2c09334f4b578629a75f9e/modernrpc/compat.py#L7-L35
def _generic_convert_string(v, from_type, to_type, encoding): """ Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent, with string values converted to given 'to_type' (str or unicode). This method must be used with Python 2 interpreter only. :param v: The value to convert :param from_type: The original string type to convert :param to_type: The target string type to convert to :param encoding: When :return: """ assert six.PY2, "This function should be used with Python 2 only" assert from_type != to_type if from_type == six.binary_type and isinstance(v, six.binary_type): return six.text_type(v, encoding) elif from_type == six.text_type and isinstance(v, six.text_type): return v.encode(encoding) elif isinstance(v, (list, tuple, set)): return type(v)([_generic_convert_string(element, from_type, to_type, encoding) for element in v]) elif isinstance(v, dict): return {k: _generic_convert_string(v, from_type, to_type, encoding) for k, v in v.iteritems()} return v
[ "def", "_generic_convert_string", "(", "v", ",", "from_type", ",", "to_type", ",", "encoding", ")", ":", "assert", "six", ".", "PY2", ",", "\"This function should be used with Python 2 only\"", "assert", "from_type", "!=", "to_type", "if", "from_type", "==", "six", ".", "binary_type", "and", "isinstance", "(", "v", ",", "six", ".", "binary_type", ")", ":", "return", "six", ".", "text_type", "(", "v", ",", "encoding", ")", "elif", "from_type", "==", "six", ".", "text_type", "and", "isinstance", "(", "v", ",", "six", ".", "text_type", ")", ":", "return", "v", ".", "encode", "(", "encoding", ")", "elif", "isinstance", "(", "v", ",", "(", "list", ",", "tuple", ",", "set", ")", ")", ":", "return", "type", "(", "v", ")", "(", "[", "_generic_convert_string", "(", "element", ",", "from_type", ",", "to_type", ",", "encoding", ")", "for", "element", "in", "v", "]", ")", "elif", "isinstance", "(", "v", ",", "dict", ")", ":", "return", "{", "k", ":", "_generic_convert_string", "(", "v", ",", "from_type", ",", "to_type", ",", "encoding", ")", "for", "k", ",", "v", "in", "v", ".", "iteritems", "(", ")", "}", "return", "v" ]
Generic method to convert any argument type (string type, list, set, tuple, dict) to an equivalent, with string values converted to given 'to_type' (str or unicode). This method must be used with Python 2 interpreter only. :param v: The value to convert :param from_type: The original string type to convert :param to_type: The target string type to convert to :param encoding: When :return:
[ "Generic", "method", "to", "convert", "any", "argument", "type", "(", "string", "type", "list", "set", "tuple", "dict", ")", "to", "an", "equivalent", "with", "string", "values", "converted", "to", "given", "to_type", "(", "str", "or", "unicode", ")", ".", "This", "method", "must", "be", "used", "with", "Python", "2", "interpreter", "only", "." ]
python
train
37.793103
dropbox/stone
stone/backends/python_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_types.py#L779-L822
def _generate_enumerated_subtypes_tag_mapping(self, ns, data_type): """ Generates attributes needed for serializing and deserializing structs with enumerated subtypes. These assignments are made after all the Python class definitions to ensure that all references exist. """ assert data_type.has_enumerated_subtypes() # Generate _tag_to_subtype_ attribute: Map from string type tag to # the validator of the referenced subtype. Used on deserialization # to look up the subtype for a given tag. tag_to_subtype_items = [] for tags, subtype in data_type.get_all_subtypes_with_tags(): tag_to_subtype_items.append("{}: {}".format( tags, generate_validator_constructor(ns, subtype))) self.generate_multiline_list( tag_to_subtype_items, before='{}._tag_to_subtype_ = '.format(data_type.name), delim=('{', '}'), compact=False) # Generate _pytype_to_tag_and_subtype_: Map from Python class to a # tuple of (type tag, subtype). Used on serialization to lookup how a # class should be encoded based on the root struct's enumerated # subtypes. items = [] for tag, subtype in data_type.get_all_subtypes_with_tags(): items.append("{0}: ({1}, {2})".format( fmt_class(subtype.name), tag, generate_validator_constructor(ns, subtype))) self.generate_multiline_list( items, before='{}._pytype_to_tag_and_subtype_ = '.format(data_type.name), delim=('{', '}'), compact=False) # Generate _is_catch_all_ attribute: self.emit('{}._is_catch_all_ = {!r}'.format( data_type.name, data_type.is_catch_all())) self.emit()
[ "def", "_generate_enumerated_subtypes_tag_mapping", "(", "self", ",", "ns", ",", "data_type", ")", ":", "assert", "data_type", ".", "has_enumerated_subtypes", "(", ")", "# Generate _tag_to_subtype_ attribute: Map from string type tag to", "# the validator of the referenced subtype. Used on deserialization", "# to look up the subtype for a given tag.", "tag_to_subtype_items", "=", "[", "]", "for", "tags", ",", "subtype", "in", "data_type", ".", "get_all_subtypes_with_tags", "(", ")", ":", "tag_to_subtype_items", ".", "append", "(", "\"{}: {}\"", ".", "format", "(", "tags", ",", "generate_validator_constructor", "(", "ns", ",", "subtype", ")", ")", ")", "self", ".", "generate_multiline_list", "(", "tag_to_subtype_items", ",", "before", "=", "'{}._tag_to_subtype_ = '", ".", "format", "(", "data_type", ".", "name", ")", ",", "delim", "=", "(", "'{'", ",", "'}'", ")", ",", "compact", "=", "False", ")", "# Generate _pytype_to_tag_and_subtype_: Map from Python class to a", "# tuple of (type tag, subtype). Used on serialization to lookup how a", "# class should be encoded based on the root struct's enumerated", "# subtypes.", "items", "=", "[", "]", "for", "tag", ",", "subtype", "in", "data_type", ".", "get_all_subtypes_with_tags", "(", ")", ":", "items", ".", "append", "(", "\"{0}: ({1}, {2})\"", ".", "format", "(", "fmt_class", "(", "subtype", ".", "name", ")", ",", "tag", ",", "generate_validator_constructor", "(", "ns", ",", "subtype", ")", ")", ")", "self", ".", "generate_multiline_list", "(", "items", ",", "before", "=", "'{}._pytype_to_tag_and_subtype_ = '", ".", "format", "(", "data_type", ".", "name", ")", ",", "delim", "=", "(", "'{'", ",", "'}'", ")", ",", "compact", "=", "False", ")", "# Generate _is_catch_all_ attribute:", "self", ".", "emit", "(", "'{}._is_catch_all_ = {!r}'", ".", "format", "(", "data_type", ".", "name", ",", "data_type", ".", "is_catch_all", "(", ")", ")", ")", "self", ".", "emit", "(", ")" ]
Generates attributes needed for serializing and deserializing structs with enumerated subtypes. These assignments are made after all the Python class definitions to ensure that all references exist.
[ "Generates", "attributes", "needed", "for", "serializing", "and", "deserializing", "structs", "with", "enumerated", "subtypes", ".", "These", "assignments", "are", "made", "after", "all", "the", "Python", "class", "definitions", "to", "ensure", "that", "all", "references", "exist", "." ]
python
train
41.840909
Shinichi-Nakagawa/pitchpx
pitchpx/game/game.py
https://github.com/Shinichi-Nakagawa/pitchpx/blob/5747402a0b3416f5e910b479e100df858f0b6440/pitchpx/game/game.py#L47-L95
def row(self): """ Game Dataset(Row) :return: { 'retro_game_id': Retrosheet Game id 'game_type': Game Type(S/R/F/D/L/W) 'game_type_des': Game Type Description (Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series) 'st_fl': Spring Training FLAG(T or F) 'regseason_fl': Regular Season FLAG(T or F) 'playoff_fl': Play Off Flag(T or F) 'local_game_time': Game Time(UTC -5) 'game_id': Game Id 'home_team_id': Home Team Id 'home_team_lg': Home Team league(AL or NL) 'away_team_id': Away Team Id 'away_team_lg': Away Team league(AL or NL) 'home_team_name': Home Team Name 'away_team_name': Away Team Name 'home_team_name_full': Home Team Name(Full Name) 'away_team_name_full': Away Team Name(Full Name) 'interleague_fl': Inter League Flag(T or F) 'park_id': Park Id 'park_name': Park Name 'park_loc': Park Location } """ row = OrderedDict() row['retro_game_id'] = self.retro_game_id row['game_type'] = self.game_type row['game_type_des'] = self.game_type_des row['st_fl'] = self.st_fl row['regseason_fl'] = self.regseason_fl row['playoff_fl'] = self.playoff_fl row['local_game_time'] = self.local_game_time row['game_id'] = self.game_id row['home_team_id'] = self.home_team_id row['home_team_lg'] = self.home_team_lg row['away_team_id'] = self.away_team_id row['away_team_lg'] = self.away_team_lg row['home_team_name'] = self.home_team_name row['away_team_name'] = self.away_team_name row['home_team_name_full'] = self.home_team_name_full row['away_team_name_full'] = self.away_team_name_full row['interleague_fl'] = self.interleague_fl row['park_id'] = self.park_id row['park_name'] = self.park_name row['park_loc'] = self.park_loc return row
[ "def", "row", "(", "self", ")", ":", "row", "=", "OrderedDict", "(", ")", "row", "[", "'retro_game_id'", "]", "=", "self", ".", "retro_game_id", "row", "[", "'game_type'", "]", "=", "self", ".", "game_type", "row", "[", "'game_type_des'", "]", "=", "self", ".", "game_type_des", "row", "[", "'st_fl'", "]", "=", "self", ".", "st_fl", "row", "[", "'regseason_fl'", "]", "=", "self", ".", "regseason_fl", "row", "[", "'playoff_fl'", "]", "=", "self", ".", "playoff_fl", "row", "[", "'local_game_time'", "]", "=", "self", ".", "local_game_time", "row", "[", "'game_id'", "]", "=", "self", ".", "game_id", "row", "[", "'home_team_id'", "]", "=", "self", ".", "home_team_id", "row", "[", "'home_team_lg'", "]", "=", "self", ".", "home_team_lg", "row", "[", "'away_team_id'", "]", "=", "self", ".", "away_team_id", "row", "[", "'away_team_lg'", "]", "=", "self", ".", "away_team_lg", "row", "[", "'home_team_name'", "]", "=", "self", ".", "home_team_name", "row", "[", "'away_team_name'", "]", "=", "self", ".", "away_team_name", "row", "[", "'home_team_name_full'", "]", "=", "self", ".", "home_team_name_full", "row", "[", "'away_team_name_full'", "]", "=", "self", ".", "away_team_name_full", "row", "[", "'interleague_fl'", "]", "=", "self", ".", "interleague_fl", "row", "[", "'park_id'", "]", "=", "self", ".", "park_id", "row", "[", "'park_name'", "]", "=", "self", ".", "park_name", "row", "[", "'park_loc'", "]", "=", "self", ".", "park_loc", "return", "row" ]
Game Dataset(Row) :return: { 'retro_game_id': Retrosheet Game id 'game_type': Game Type(S/R/F/D/L/W) 'game_type_des': Game Type Description (Spring Training or Regular Season or Wild-card Game or Divisional Series or LCS or World Series) 'st_fl': Spring Training FLAG(T or F) 'regseason_fl': Regular Season FLAG(T or F) 'playoff_fl': Play Off Flag(T or F) 'local_game_time': Game Time(UTC -5) 'game_id': Game Id 'home_team_id': Home Team Id 'home_team_lg': Home Team league(AL or NL) 'away_team_id': Away Team Id 'away_team_lg': Away Team league(AL or NL) 'home_team_name': Home Team Name 'away_team_name': Away Team Name 'home_team_name_full': Home Team Name(Full Name) 'away_team_name_full': Away Team Name(Full Name) 'interleague_fl': Inter League Flag(T or F) 'park_id': Park Id 'park_name': Park Name 'park_loc': Park Location }
[ "Game", "Dataset", "(", "Row", ")", ":", "return", ":", "{", "retro_game_id", ":", "Retrosheet", "Game", "id", "game_type", ":", "Game", "Type", "(", "S", "/", "R", "/", "F", "/", "D", "/", "L", "/", "W", ")", "game_type_des", ":", "Game", "Type", "Description", "(", "Spring", "Training", "or", "Regular", "Season", "or", "Wild", "-", "card", "Game", "or", "Divisional", "Series", "or", "LCS", "or", "World", "Series", ")", "st_fl", ":", "Spring", "Training", "FLAG", "(", "T", "or", "F", ")", "regseason_fl", ":", "Regular", "Season", "FLAG", "(", "T", "or", "F", ")", "playoff_fl", ":", "Play", "Off", "Flag", "(", "T", "or", "F", ")", "local_game_time", ":", "Game", "Time", "(", "UTC", "-", "5", ")", "game_id", ":", "Game", "Id", "home_team_id", ":", "Home", "Team", "Id", "home_team_lg", ":", "Home", "Team", "league", "(", "AL", "or", "NL", ")", "away_team_id", ":", "Away", "Team", "Id", "away_team_lg", ":", "Away", "Team", "league", "(", "AL", "or", "NL", ")", "home_team_name", ":", "Home", "Team", "Name", "away_team_name", ":", "Away", "Team", "Name", "home_team_name_full", ":", "Home", "Team", "Name", "(", "Full", "Name", ")", "away_team_name_full", ":", "Away", "Team", "Name", "(", "Full", "Name", ")", "interleague_fl", ":", "Inter", "League", "Flag", "(", "T", "or", "F", ")", "park_id", ":", "Park", "Id", "park_name", ":", "Park", "Name", "park_loc", ":", "Park", "Location", "}" ]
python
train
42.836735
genepattern/genepattern-notebook
genepattern/remote_widgets.py
https://github.com/genepattern/genepattern-notebook/blob/953168bd08c5332412438cbc5bb59993a07a6911/genepattern/remote_widgets.py#L53-L73
def get(self, server): """ Returns a registered GPServer object with a matching GenePattern server url or index Returns None if no matching result was found :param server: :return: """ # Handle indexes if isinstance(server, int): if server >= len(self.sessions): return None else: return self.sessions[server] # Handle server URLs index = self._get_index(server) if index == -1: return None else: return self.sessions[index]
[ "def", "get", "(", "self", ",", "server", ")", ":", "# Handle indexes", "if", "isinstance", "(", "server", ",", "int", ")", ":", "if", "server", ">=", "len", "(", "self", ".", "sessions", ")", ":", "return", "None", "else", ":", "return", "self", ".", "sessions", "[", "server", "]", "# Handle server URLs", "index", "=", "self", ".", "_get_index", "(", "server", ")", "if", "index", "==", "-", "1", ":", "return", "None", "else", ":", "return", "self", ".", "sessions", "[", "index", "]" ]
Returns a registered GPServer object with a matching GenePattern server url or index Returns None if no matching result was found :param server: :return:
[ "Returns", "a", "registered", "GPServer", "object", "with", "a", "matching", "GenePattern", "server", "url", "or", "index", "Returns", "None", "if", "no", "matching", "result", "was", "found", ":", "param", "server", ":", ":", "return", ":" ]
python
valid
27.714286
happyleavesaoc/gstreamer-player
gsp/__init__.py
https://github.com/happyleavesaoc/gstreamer-player/blob/750edd95d4be4d2f8eee3aa3cb86d4781758f5fb/gsp/__init__.py#L99-L124
def media(self, uri): """Play a media file.""" try: local_path, _ = urllib.request.urlretrieve(uri) metadata = mutagen.File(local_path, easy=True) if metadata.tags: self._tags = metadata.tags title = self._tags.get(TAG_TITLE, []) self._manager[ATTR_TITLE] = title[0] if len(title) else '' artist = self._tags.get(TAG_ARTIST, []) self._manager[ATTR_ARTIST] = artist[0] if len(artist) else '' album = self._tags.get(TAG_ALBUM, []) self._manager[ATTR_ALBUM] = album[0] if len(album) else '' local_uri = 'file://{}'.format(local_path) # urllib.error.HTTPError except Exception: # pylint: disable=broad-except local_uri = uri self._player.set_state(Gst.State.NULL) self._player.set_property(PROP_URI, local_uri) self._player.set_state(Gst.State.PLAYING) self.state = STATE_PLAYING self._manager[ATTR_URI] = uri self._manager[ATTR_DURATION] = self._duration() self._manager[ATTR_VOLUME] = self._player.get_property(PROP_VOLUME) _LOGGER.info('playing %s (as %s)', uri, local_uri)
[ "def", "media", "(", "self", ",", "uri", ")", ":", "try", ":", "local_path", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "uri", ")", "metadata", "=", "mutagen", ".", "File", "(", "local_path", ",", "easy", "=", "True", ")", "if", "metadata", ".", "tags", ":", "self", ".", "_tags", "=", "metadata", ".", "tags", "title", "=", "self", ".", "_tags", ".", "get", "(", "TAG_TITLE", ",", "[", "]", ")", "self", ".", "_manager", "[", "ATTR_TITLE", "]", "=", "title", "[", "0", "]", "if", "len", "(", "title", ")", "else", "''", "artist", "=", "self", ".", "_tags", ".", "get", "(", "TAG_ARTIST", ",", "[", "]", ")", "self", ".", "_manager", "[", "ATTR_ARTIST", "]", "=", "artist", "[", "0", "]", "if", "len", "(", "artist", ")", "else", "''", "album", "=", "self", ".", "_tags", ".", "get", "(", "TAG_ALBUM", ",", "[", "]", ")", "self", ".", "_manager", "[", "ATTR_ALBUM", "]", "=", "album", "[", "0", "]", "if", "len", "(", "album", ")", "else", "''", "local_uri", "=", "'file://{}'", ".", "format", "(", "local_path", ")", "# urllib.error.HTTPError", "except", "Exception", ":", "# pylint: disable=broad-except", "local_uri", "=", "uri", "self", ".", "_player", ".", "set_state", "(", "Gst", ".", "State", ".", "NULL", ")", "self", ".", "_player", ".", "set_property", "(", "PROP_URI", ",", "local_uri", ")", "self", ".", "_player", ".", "set_state", "(", "Gst", ".", "State", ".", "PLAYING", ")", "self", ".", "state", "=", "STATE_PLAYING", "self", ".", "_manager", "[", "ATTR_URI", "]", "=", "uri", "self", ".", "_manager", "[", "ATTR_DURATION", "]", "=", "self", ".", "_duration", "(", ")", "self", ".", "_manager", "[", "ATTR_VOLUME", "]", "=", "self", ".", "_player", ".", "get_property", "(", "PROP_VOLUME", ")", "_LOGGER", ".", "info", "(", "'playing %s (as %s)'", ",", "uri", ",", "local_uri", ")" ]
Play a media file.
[ "Play", "a", "media", "file", "." ]
python
train
45.884615
blockstack/blockstack-core
blockstack/lib/nameset/__init__.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/nameset/__init__.py#L230-L269
def state_transition(history_id_key, table_name, always_set=[], may_spend_tokens=False): """ Decorator for the check() method on state-transition operations. Make sure that: * there is a __table__ field set, which names the table in which this record is stored. * there is a __history_id_key__ field set, which identifies the table record's primary key. Any fields named in @always_set will always be set when the transition is applied. That is, fields set here *must* be set on transition, and *will* be set in the database, even if they have prior values in the affected name record that might constrain which rows to update. """ def wrap( check ): def wrapped_check( state_engine, nameop, block_id, checked_ops ): rc = check( state_engine, nameop, block_id, checked_ops ) if rc: # put fields in place nameop['__table__'] = table_name nameop['__history_id_key__'] = history_id_key nameop['__state_transition__'] = True nameop['__always_set__'] = always_set if not may_spend_tokens: state_transition_put_account_payment_info(nameop, None, None, None) elif '__account_payment_info__' not in nameop: raise Exception('Operation spends tokens, but no payment account information is set') # sanity check invariant_tags = state_transition_invariant_tags() for tag in invariant_tags: assert tag in nameop, "BUG: missing invariant tag '%s'" % tag # sanity check---all required consensus fields must be present for required_field in CONSENSUS_FIELDS_REQUIRED: assert required_field in nameop, 'BUG: missing required consensus field {}'.format(required_field) return rc return wrapped_check return wrap
[ "def", "state_transition", "(", "history_id_key", ",", "table_name", ",", "always_set", "=", "[", "]", ",", "may_spend_tokens", "=", "False", ")", ":", "def", "wrap", "(", "check", ")", ":", "def", "wrapped_check", "(", "state_engine", ",", "nameop", ",", "block_id", ",", "checked_ops", ")", ":", "rc", "=", "check", "(", "state_engine", ",", "nameop", ",", "block_id", ",", "checked_ops", ")", "if", "rc", ":", "# put fields in place ", "nameop", "[", "'__table__'", "]", "=", "table_name", "nameop", "[", "'__history_id_key__'", "]", "=", "history_id_key", "nameop", "[", "'__state_transition__'", "]", "=", "True", "nameop", "[", "'__always_set__'", "]", "=", "always_set", "if", "not", "may_spend_tokens", ":", "state_transition_put_account_payment_info", "(", "nameop", ",", "None", ",", "None", ",", "None", ")", "elif", "'__account_payment_info__'", "not", "in", "nameop", ":", "raise", "Exception", "(", "'Operation spends tokens, but no payment account information is set'", ")", "# sanity check", "invariant_tags", "=", "state_transition_invariant_tags", "(", ")", "for", "tag", "in", "invariant_tags", ":", "assert", "tag", "in", "nameop", ",", "\"BUG: missing invariant tag '%s'\"", "%", "tag", "# sanity check---all required consensus fields must be present", "for", "required_field", "in", "CONSENSUS_FIELDS_REQUIRED", ":", "assert", "required_field", "in", "nameop", ",", "'BUG: missing required consensus field {}'", ".", "format", "(", "required_field", ")", "return", "rc", "return", "wrapped_check", "return", "wrap" ]
Decorator for the check() method on state-transition operations. Make sure that: * there is a __table__ field set, which names the table in which this record is stored. * there is a __history_id_key__ field set, which identifies the table record's primary key. Any fields named in @always_set will always be set when the transition is applied. That is, fields set here *must* be set on transition, and *will* be set in the database, even if they have prior values in the affected name record that might constrain which rows to update.
[ "Decorator", "for", "the", "check", "()", "method", "on", "state", "-", "transition", "operations", ".", "Make", "sure", "that", ":", "*", "there", "is", "a", "__table__", "field", "set", "which", "names", "the", "table", "in", "which", "this", "record", "is", "stored", ".", "*", "there", "is", "a", "__history_id_key__", "field", "set", "which", "identifies", "the", "table", "record", "s", "primary", "key", "." ]
python
train
48.275
edx/edx-val
edxval/api.py
https://github.com/edx/edx-val/blob/30df48061e77641edb5272895b7c7f7f25eb7aa7/edxval/api.py#L1106-L1167
def import_transcript_from_fs(edx_video_id, language_code, file_name, provider, resource_fs, static_dir): """ Imports transcript file from file system and creates transcript record in DS. Arguments: edx_video_id (str): Video id of the video. language_code (unicode): Language code of the requested transcript. file_name (unicode): File name of the transcript file. provider (unicode): Transcript provider. resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file. """ file_format = None transcript_data = get_video_transcript_data(edx_video_id, language_code) # First check if transcript record does not exist. if not transcript_data: # Read file from import file system and attach it to transcript record in DS. try: with resource_fs.open(combine(static_dir, file_name), 'rb') as f: file_content = f.read() file_content = file_content.decode('utf-8-sig') except ResourceNotFound as exc: # Don't raise exception in case transcript file is not found in course OLX. logger.warn( '[edx-val] "%s" transcript "%s" for video "%s" is not found.', language_code, file_name, edx_video_id ) return except UnicodeDecodeError: # Don't raise exception in case transcript contains non-utf8 content. logger.warn( '[edx-val] "%s" transcript "%s" for video "%s" contains a non-utf8 file content.', language_code, file_name, edx_video_id ) return # Get file format from transcript content. try: file_format = get_transcript_format(file_content) except Error as ex: # Don't raise exception, just don't create transcript record. logger.warn( '[edx-val] Error while getting transcript format for video=%s -- language_code=%s --file_name=%s', edx_video_id, language_code, file_name ) return # Create transcript record. create_video_transcript( video_id=edx_video_id, language_code=language_code, file_format=file_format, content=ContentFile(file_content), provider=provider )
[ "def", "import_transcript_from_fs", "(", "edx_video_id", ",", "language_code", ",", "file_name", ",", "provider", ",", "resource_fs", ",", "static_dir", ")", ":", "file_format", "=", "None", "transcript_data", "=", "get_video_transcript_data", "(", "edx_video_id", ",", "language_code", ")", "# First check if transcript record does not exist.", "if", "not", "transcript_data", ":", "# Read file from import file system and attach it to transcript record in DS.", "try", ":", "with", "resource_fs", ".", "open", "(", "combine", "(", "static_dir", ",", "file_name", ")", ",", "'rb'", ")", "as", "f", ":", "file_content", "=", "f", ".", "read", "(", ")", "file_content", "=", "file_content", ".", "decode", "(", "'utf-8-sig'", ")", "except", "ResourceNotFound", "as", "exc", ":", "# Don't raise exception in case transcript file is not found in course OLX.", "logger", ".", "warn", "(", "'[edx-val] \"%s\" transcript \"%s\" for video \"%s\" is not found.'", ",", "language_code", ",", "file_name", ",", "edx_video_id", ")", "return", "except", "UnicodeDecodeError", ":", "# Don't raise exception in case transcript contains non-utf8 content.", "logger", ".", "warn", "(", "'[edx-val] \"%s\" transcript \"%s\" for video \"%s\" contains a non-utf8 file content.'", ",", "language_code", ",", "file_name", ",", "edx_video_id", ")", "return", "# Get file format from transcript content.", "try", ":", "file_format", "=", "get_transcript_format", "(", "file_content", ")", "except", "Error", "as", "ex", ":", "# Don't raise exception, just don't create transcript record.", "logger", ".", "warn", "(", "'[edx-val] Error while getting transcript format for video=%s -- language_code=%s --file_name=%s'", ",", "edx_video_id", ",", "language_code", ",", "file_name", ")", "return", "# Create transcript record.", "create_video_transcript", "(", "video_id", "=", "edx_video_id", ",", "language_code", "=", "language_code", ",", "file_format", "=", "file_format", ",", "content", "=", "ContentFile", "(", "file_content", ")", ",", "provider", "=", "provider", ")" ]
Imports transcript file from file system and creates transcript record in DS. Arguments: edx_video_id (str): Video id of the video. language_code (unicode): Language code of the requested transcript. file_name (unicode): File name of the transcript file. provider (unicode): Transcript provider. resource_fs (OSFS): Import file system. static_dir (str): The Directory to retrieve transcript file.
[ "Imports", "transcript", "file", "from", "file", "system", "and", "creates", "transcript", "record", "in", "DS", "." ]
python
train
39.532258
scott-maddox/openbandparams
src/openbandparams/iii_v_zinc_blende_strained.py
https://github.com/scott-maddox/openbandparams/blob/bc24e59187326bcb8948117434536082c9055777/src/openbandparams/iii_v_zinc_blende_strained.py#L160-L166
def CBO_L(self, **kwargs): ''' Returns the strain-shifted L-valley conduction band offset (CBO), assuming the strain affects all conduction band valleys equally. ''' return (self.unstrained.CBO_L(**kwargs) + self.CBO_strain_shift(**kwargs))
[ "def", "CBO_L", "(", "self", ",", "*", "*", "kwargs", ")", ":", "return", "(", "self", ".", "unstrained", ".", "CBO_L", "(", "*", "*", "kwargs", ")", "+", "self", ".", "CBO_strain_shift", "(", "*", "*", "kwargs", ")", ")" ]
Returns the strain-shifted L-valley conduction band offset (CBO), assuming the strain affects all conduction band valleys equally.
[ "Returns", "the", "strain", "-", "shifted", "L", "-", "valley", "conduction", "band", "offset", "(", "CBO", ")", "assuming", "the", "strain", "affects", "all", "conduction", "band", "valleys", "equally", "." ]
python
train
41.428571
kfdm/wanikani
wanikani/core.py
https://github.com/kfdm/wanikani/blob/209f9b34b2832c2b9c9b12077f4a4382c047f710/wanikani/core.py#L202-L222
def vocabulary(self, levels=None): """ :param levels: An optional argument of declaring a single or comma-delimited list of levels is available, as seen in the example as 1. An example of a comma-delimited list of levels is 1,2,5,9. :type levels: str or None http://www.wanikani.com/api/v1.2#vocabulary-list """ url = WANIKANI_BASE.format(self.api_key, 'vocabulary') if levels: url += '/{0}'.format(levels) data = self.get(url) if 'general' in data['requested_information']: for item in data['requested_information']['general']: yield Vocabulary(item) else: for item in data['requested_information']: yield Vocabulary(item)
[ "def", "vocabulary", "(", "self", ",", "levels", "=", "None", ")", ":", "url", "=", "WANIKANI_BASE", ".", "format", "(", "self", ".", "api_key", ",", "'vocabulary'", ")", "if", "levels", ":", "url", "+=", "'/{0}'", ".", "format", "(", "levels", ")", "data", "=", "self", ".", "get", "(", "url", ")", "if", "'general'", "in", "data", "[", "'requested_information'", "]", ":", "for", "item", "in", "data", "[", "'requested_information'", "]", "[", "'general'", "]", ":", "yield", "Vocabulary", "(", "item", ")", "else", ":", "for", "item", "in", "data", "[", "'requested_information'", "]", ":", "yield", "Vocabulary", "(", "item", ")" ]
:param levels: An optional argument of declaring a single or comma-delimited list of levels is available, as seen in the example as 1. An example of a comma-delimited list of levels is 1,2,5,9. :type levels: str or None http://www.wanikani.com/api/v1.2#vocabulary-list
[ ":", "param", "levels", ":", "An", "optional", "argument", "of", "declaring", "a", "single", "or", "comma", "-", "delimited", "list", "of", "levels", "is", "available", "as", "seen", "in", "the", "example", "as", "1", ".", "An", "example", "of", "a", "comma", "-", "delimited", "list", "of", "levels", "is", "1", "2", "5", "9", ".", ":", "type", "levels", ":", "str", "or", "None" ]
python
train
37.047619
GoogleCloudPlatform/appengine-gcs-client
python/src/cloudstorage/rest_api.py
https://github.com/GoogleCloudPlatform/appengine-gcs-client/blob/d11078331ecd915d753c886e96a80133599f3f98/python/src/cloudstorage/rest_api.py#L236-L277
def urlfetch_async(self, url, method='GET', headers=None, payload=None, deadline=None, callback=None, follow_redirects=False): """Make an async urlfetch() call. This is an async wrapper around urlfetch(). It adds an authentication header. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. follow_redirects: whether or not to follow redirects. Yields: This returns a Future despite not being decorated with @ndb.tasklet! """ headers = {} if headers is None else dict(headers) headers.update(self.user_agent) try: self.token = yield self.get_token_async() except app_identity.InternalError, e: if os.environ.get('DATACENTER', '').endswith('sandman'): self.token = None logging.warning('Could not fetch an authentication token in sandman ' 'based Appengine devel setup; proceeding without one.') else: raise e if self.token: headers['authorization'] = 'OAuth ' + self.token deadline = deadline or self.retry_params.urlfetch_timeout ctx = ndb.get_context() resp = yield ctx.urlfetch( url, payload=payload, method=method, headers=headers, follow_redirects=follow_redirects, deadline=deadline, callback=callback) raise ndb.Return(resp)
[ "def", "urlfetch_async", "(", "self", ",", "url", ",", "method", "=", "'GET'", ",", "headers", "=", "None", ",", "payload", "=", "None", ",", "deadline", "=", "None", ",", "callback", "=", "None", ",", "follow_redirects", "=", "False", ")", ":", "headers", "=", "{", "}", "if", "headers", "is", "None", "else", "dict", "(", "headers", ")", "headers", ".", "update", "(", "self", ".", "user_agent", ")", "try", ":", "self", ".", "token", "=", "yield", "self", ".", "get_token_async", "(", ")", "except", "app_identity", ".", "InternalError", ",", "e", ":", "if", "os", ".", "environ", ".", "get", "(", "'DATACENTER'", ",", "''", ")", ".", "endswith", "(", "'sandman'", ")", ":", "self", ".", "token", "=", "None", "logging", ".", "warning", "(", "'Could not fetch an authentication token in sandman '", "'based Appengine devel setup; proceeding without one.'", ")", "else", ":", "raise", "e", "if", "self", ".", "token", ":", "headers", "[", "'authorization'", "]", "=", "'OAuth '", "+", "self", ".", "token", "deadline", "=", "deadline", "or", "self", ".", "retry_params", ".", "urlfetch_timeout", "ctx", "=", "ndb", ".", "get_context", "(", ")", "resp", "=", "yield", "ctx", ".", "urlfetch", "(", "url", ",", "payload", "=", "payload", ",", "method", "=", "method", ",", "headers", "=", "headers", ",", "follow_redirects", "=", "follow_redirects", ",", "deadline", "=", "deadline", ",", "callback", "=", "callback", ")", "raise", "ndb", ".", "Return", "(", "resp", ")" ]
Make an async urlfetch() call. This is an async wrapper around urlfetch(). It adds an authentication header. Args: url: the url to fetch. method: the method in which to fetch. headers: the http headers. payload: the data to submit in the fetch. deadline: the deadline in which to make the call. callback: the call to make once completed. follow_redirects: whether or not to follow redirects. Yields: This returns a Future despite not being decorated with @ndb.tasklet!
[ "Make", "an", "async", "urlfetch", "()", "call", "." ]
python
train
36.071429
Clinical-Genomics/scout
scout/commands/base.py
https://github.com/Clinical-Genomics/scout/blob/90a551e2e1653a319e654c2405c2866f93d0ebb9/scout/commands/base.py#L57-L111
def cli(context, mongodb, username, password, authdb, host, port, loglevel, config, demo): """scout: manage interactions with a scout instance.""" # log_format = "%(message)s" if sys.stdout.isatty() else None log_format = None coloredlogs.install(level=loglevel, fmt=log_format) LOG.info("Running scout version %s", __version__) LOG.debug("Debug logging enabled.") mongo_config = {} cli_config = {} if config: LOG.debug("Use config file %s", config) with open(config, 'r') as in_handle: cli_config = yaml.load(in_handle) mongo_config['mongodb'] = (mongodb or cli_config.get('mongodb') or 'scout') if demo: mongo_config['mongodb'] = 'scout-demo' mongo_config['host'] = (host or cli_config.get('host') or 'localhost') mongo_config['port'] = (port or cli_config.get('port') or 27017) mongo_config['username'] = username or cli_config.get('username') mongo_config['password'] = password or cli_config.get('password') mongo_config['authdb'] = authdb or cli_config.get('authdb') or mongo_config['mongodb'] mongo_config['omim_api_key'] = cli_config.get('omim_api_key') if context.invoked_subcommand in ('setup', 'serve'): mongo_config['adapter'] = None else: LOG.info("Setting database name to %s", mongo_config['mongodb']) LOG.debug("Setting host to %s", mongo_config['host']) LOG.debug("Setting port to %s", mongo_config['port']) try: client = get_connection(**mongo_config) except ConnectionFailure: context.abort() database = client[mongo_config['mongodb']] LOG.info("Setting up a mongo adapter") mongo_config['client'] = client adapter = MongoAdapter(database) mongo_config['adapter'] = adapter LOG.info("Check if authenticated...") try: for ins_obj in adapter.institutes(): pass except OperationFailure as err: LOG.info("User not authenticated") context.abort() context.obj = mongo_config
[ "def", "cli", "(", "context", ",", "mongodb", ",", "username", ",", "password", ",", "authdb", ",", "host", ",", "port", ",", "loglevel", ",", "config", ",", "demo", ")", ":", "# log_format = \"%(message)s\" if sys.stdout.isatty() else None", "log_format", "=", "None", "coloredlogs", ".", "install", "(", "level", "=", "loglevel", ",", "fmt", "=", "log_format", ")", "LOG", ".", "info", "(", "\"Running scout version %s\"", ",", "__version__", ")", "LOG", ".", "debug", "(", "\"Debug logging enabled.\"", ")", "mongo_config", "=", "{", "}", "cli_config", "=", "{", "}", "if", "config", ":", "LOG", ".", "debug", "(", "\"Use config file %s\"", ",", "config", ")", "with", "open", "(", "config", ",", "'r'", ")", "as", "in_handle", ":", "cli_config", "=", "yaml", ".", "load", "(", "in_handle", ")", "mongo_config", "[", "'mongodb'", "]", "=", "(", "mongodb", "or", "cli_config", ".", "get", "(", "'mongodb'", ")", "or", "'scout'", ")", "if", "demo", ":", "mongo_config", "[", "'mongodb'", "]", "=", "'scout-demo'", "mongo_config", "[", "'host'", "]", "=", "(", "host", "or", "cli_config", ".", "get", "(", "'host'", ")", "or", "'localhost'", ")", "mongo_config", "[", "'port'", "]", "=", "(", "port", "or", "cli_config", ".", "get", "(", "'port'", ")", "or", "27017", ")", "mongo_config", "[", "'username'", "]", "=", "username", "or", "cli_config", ".", "get", "(", "'username'", ")", "mongo_config", "[", "'password'", "]", "=", "password", "or", "cli_config", ".", "get", "(", "'password'", ")", "mongo_config", "[", "'authdb'", "]", "=", "authdb", "or", "cli_config", ".", "get", "(", "'authdb'", ")", "or", "mongo_config", "[", "'mongodb'", "]", "mongo_config", "[", "'omim_api_key'", "]", "=", "cli_config", ".", "get", "(", "'omim_api_key'", ")", "if", "context", ".", "invoked_subcommand", "in", "(", "'setup'", ",", "'serve'", ")", ":", "mongo_config", "[", "'adapter'", "]", "=", "None", "else", ":", "LOG", ".", "info", "(", "\"Setting database name to %s\"", ",", "mongo_config", "[", "'mongodb'", "]", ")", "LOG", ".", "debug", "(", "\"Setting host to %s\"", ",", "mongo_config", "[", "'host'", "]", ")", "LOG", ".", "debug", "(", "\"Setting port to %s\"", ",", "mongo_config", "[", "'port'", "]", ")", "try", ":", "client", "=", "get_connection", "(", "*", "*", "mongo_config", ")", "except", "ConnectionFailure", ":", "context", ".", "abort", "(", ")", "database", "=", "client", "[", "mongo_config", "[", "'mongodb'", "]", "]", "LOG", ".", "info", "(", "\"Setting up a mongo adapter\"", ")", "mongo_config", "[", "'client'", "]", "=", "client", "adapter", "=", "MongoAdapter", "(", "database", ")", "mongo_config", "[", "'adapter'", "]", "=", "adapter", "LOG", ".", "info", "(", "\"Check if authenticated...\"", ")", "try", ":", "for", "ins_obj", "in", "adapter", ".", "institutes", "(", ")", ":", "pass", "except", "OperationFailure", "as", "err", ":", "LOG", ".", "info", "(", "\"User not authenticated\"", ")", "context", ".", "abort", "(", ")", "context", ".", "obj", "=", "mongo_config" ]
scout: manage interactions with a scout instance.
[ "scout", ":", "manage", "interactions", "with", "a", "scout", "instance", "." ]
python
test
37.509091
tradenity/python-sdk
tradenity/resources/store_credit_payment.py
https://github.com/tradenity/python-sdk/blob/d13fbe23f4d6ff22554c6d8d2deaf209371adaf1/tradenity/resources/store_credit_payment.py#L424-L444
def delete_store_credit_payment_by_id(cls, store_credit_payment_id, **kwargs): """Delete StoreCreditPayment Delete an instance of StoreCreditPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to delete. (required) :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs) else: (data) = cls._delete_store_credit_payment_by_id_with_http_info(store_credit_payment_id, **kwargs) return data
[ "def", "delete_store_credit_payment_by_id", "(", "cls", ",", "store_credit_payment_id", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async'", ")", ":", "return", "cls", ".", "_delete_store_credit_payment_by_id_with_http_info", "(", "store_credit_payment_id", ",", "*", "*", "kwargs", ")", "else", ":", "(", "data", ")", "=", "cls", ".", "_delete_store_credit_payment_by_id_with_http_info", "(", "store_credit_payment_id", ",", "*", "*", "kwargs", ")", "return", "data" ]
Delete StoreCreditPayment Delete an instance of StoreCreditPayment by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.delete_store_credit_payment_by_id(store_credit_payment_id, async=True) >>> result = thread.get() :param async bool :param str store_credit_payment_id: ID of storeCreditPayment to delete. (required) :return: None If the method is called asynchronously, returns the request thread.
[ "Delete", "StoreCreditPayment" ]
python
train
48.095238
pgmpy/pgmpy
pgmpy/models/BayesianModel.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/models/BayesianModel.py#L364-L391
def check_model(self): """ Check the model for various errors. This method checks for the following errors. * Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01). * Checks if the CPDs associated with nodes are consistent with their parents. Returns ------- check: boolean True if all the checks are passed """ for node in self.nodes(): cpd = self.get_cpds(node=node) if cpd is None: raise ValueError('No CPD associated with {}'.format(node)) elif isinstance(cpd, (TabularCPD, ContinuousFactor)): evidence = cpd.get_evidence() parents = self.get_parents(node) if set(evidence if evidence else []) != set(parents if parents else []): raise ValueError("CPD associated with {node} doesn't have " "proper parents associated with it.".format(node=node)) if not cpd.is_valid_cpd(): raise ValueError("Sum or integral of conditional probabilites for node {node}" " is not equal to 1.".format(node=node)) return True
[ "def", "check_model", "(", "self", ")", ":", "for", "node", "in", "self", ".", "nodes", "(", ")", ":", "cpd", "=", "self", ".", "get_cpds", "(", "node", "=", "node", ")", "if", "cpd", "is", "None", ":", "raise", "ValueError", "(", "'No CPD associated with {}'", ".", "format", "(", "node", ")", ")", "elif", "isinstance", "(", "cpd", ",", "(", "TabularCPD", ",", "ContinuousFactor", ")", ")", ":", "evidence", "=", "cpd", ".", "get_evidence", "(", ")", "parents", "=", "self", ".", "get_parents", "(", "node", ")", "if", "set", "(", "evidence", "if", "evidence", "else", "[", "]", ")", "!=", "set", "(", "parents", "if", "parents", "else", "[", "]", ")", ":", "raise", "ValueError", "(", "\"CPD associated with {node} doesn't have \"", "\"proper parents associated with it.\"", ".", "format", "(", "node", "=", "node", ")", ")", "if", "not", "cpd", ".", "is_valid_cpd", "(", ")", ":", "raise", "ValueError", "(", "\"Sum or integral of conditional probabilites for node {node}\"", "\" is not equal to 1.\"", ".", "format", "(", "node", "=", "node", ")", ")", "return", "True" ]
Check the model for various errors. This method checks for the following errors. * Checks if the sum of the probabilities for each state is equal to 1 (tol=0.01). * Checks if the CPDs associated with nodes are consistent with their parents. Returns ------- check: boolean True if all the checks are passed
[ "Check", "the", "model", "for", "various", "errors", ".", "This", "method", "checks", "for", "the", "following", "errors", "." ]
python
train
44.25
Yipit/eventlib
eventlib/api.py
https://github.com/Yipit/eventlib/blob/0cf29e5251a59fcbfc727af5f5157a3bb03832e2/eventlib/api.py#L26-L41
def _register_handler(event, fun, external=False): """Register a function to be an event handler""" registry = core.HANDLER_REGISTRY if external: registry = core.EXTERNAL_HANDLER_REGISTRY if not isinstance(event, basestring): # If not basestring, it is a BaseEvent subclass. # This occurs when class methods are registered as handlers event = core.parse_event_to_name(event) if event in registry: registry[event].append(fun) else: registry[event] = [fun] return fun
[ "def", "_register_handler", "(", "event", ",", "fun", ",", "external", "=", "False", ")", ":", "registry", "=", "core", ".", "HANDLER_REGISTRY", "if", "external", ":", "registry", "=", "core", ".", "EXTERNAL_HANDLER_REGISTRY", "if", "not", "isinstance", "(", "event", ",", "basestring", ")", ":", "# If not basestring, it is a BaseEvent subclass.", "# This occurs when class methods are registered as handlers", "event", "=", "core", ".", "parse_event_to_name", "(", "event", ")", "if", "event", "in", "registry", ":", "registry", "[", "event", "]", ".", "append", "(", "fun", ")", "else", ":", "registry", "[", "event", "]", "=", "[", "fun", "]", "return", "fun" ]
Register a function to be an event handler
[ "Register", "a", "function", "to", "be", "an", "event", "handler" ]
python
train
33
kblomqvist/yasha
yasha/cmsis.py
https://github.com/kblomqvist/yasha/blob/aebda08f45458611a59497fb7505f0881b73fbd5/yasha/cmsis.py#L75-L94
def from_element(self, element, defaults={}): """Populate object variables from SVD element""" if isinstance(defaults, SvdElement): defaults = vars(defaults) for key in self.props: try: value = element.find(key).text except AttributeError: # Maybe it's attribute? default = defaults[key] if key in defaults else None value = element.get(key, default) if value is not None: if key in self.props_to_integer: try: value = int(value) except ValueError: # It has to be hex value = int(value, 16) elif key in self.props_to_boolean: value = value.lower() in ("yes", "true", "t", "1") setattr(self, key, value)
[ "def", "from_element", "(", "self", ",", "element", ",", "defaults", "=", "{", "}", ")", ":", "if", "isinstance", "(", "defaults", ",", "SvdElement", ")", ":", "defaults", "=", "vars", "(", "defaults", ")", "for", "key", "in", "self", ".", "props", ":", "try", ":", "value", "=", "element", ".", "find", "(", "key", ")", ".", "text", "except", "AttributeError", ":", "# Maybe it's attribute?", "default", "=", "defaults", "[", "key", "]", "if", "key", "in", "defaults", "else", "None", "value", "=", "element", ".", "get", "(", "key", ",", "default", ")", "if", "value", "is", "not", "None", ":", "if", "key", "in", "self", ".", "props_to_integer", ":", "try", ":", "value", "=", "int", "(", "value", ")", "except", "ValueError", ":", "# It has to be hex", "value", "=", "int", "(", "value", ",", "16", ")", "elif", "key", "in", "self", ".", "props_to_boolean", ":", "value", "=", "value", ".", "lower", "(", ")", "in", "(", "\"yes\"", ",", "\"true\"", ",", "\"t\"", ",", "\"1\"", ")", "setattr", "(", "self", ",", "key", ",", "value", ")" ]
Populate object variables from SVD element
[ "Populate", "object", "variables", "from", "SVD", "element" ]
python
train
42.9
saltstack/salt
salt/cloud/clouds/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L2549-L2573
def list_storage_containers(kwargs=None, storage_conn=None, call=None): ''' .. versionadded:: 2015.8.0 List containers associated with the storage account CLI Example: .. code-block:: bash salt-cloud -f list_storage_containers my-azure ''' if call != 'function': raise SaltCloudSystemExit( 'The list_storage_containers function must be called with -f or --function.' ) if not storage_conn: storage_conn = get_storage_conn(conn_kwargs=kwargs) data = storage_conn.list_containers() ret = {} for item in data.containers: ret[item.name] = object_to_dict(item) return ret
[ "def", "list_storage_containers", "(", "kwargs", "=", "None", ",", "storage_conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The list_storage_containers function must be called with -f or --function.'", ")", "if", "not", "storage_conn", ":", "storage_conn", "=", "get_storage_conn", "(", "conn_kwargs", "=", "kwargs", ")", "data", "=", "storage_conn", ".", "list_containers", "(", ")", "ret", "=", "{", "}", "for", "item", "in", "data", ".", "containers", ":", "ret", "[", "item", ".", "name", "]", "=", "object_to_dict", "(", "item", ")", "return", "ret" ]
.. versionadded:: 2015.8.0 List containers associated with the storage account CLI Example: .. code-block:: bash salt-cloud -f list_storage_containers my-azure
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
25.92
pyBookshelf/bookshelf
bookshelf/api_v2/os_helpers.py
https://github.com/pyBookshelf/bookshelf/blob/a6770678e735de95b194f6e6989223970db5f654/bookshelf/api_v2/os_helpers.py#L246-L257
def restart_service(service, log=False): """ restarts a service """ with settings(): if log: bookshelf2.logging_helpers.log_yellow( 'stoping service %s' % service) sudo('service %s stop' % service) if log: bookshelf2.logging_helpers.log_yellow( 'starting service %s' % service) sudo('service %s start' % service) return True
[ "def", "restart_service", "(", "service", ",", "log", "=", "False", ")", ":", "with", "settings", "(", ")", ":", "if", "log", ":", "bookshelf2", ".", "logging_helpers", ".", "log_yellow", "(", "'stoping service %s'", "%", "service", ")", "sudo", "(", "'service %s stop'", "%", "service", ")", "if", "log", ":", "bookshelf2", ".", "logging_helpers", ".", "log_yellow", "(", "'starting service %s'", "%", "service", ")", "sudo", "(", "'service %s start'", "%", "service", ")", "return", "True" ]
restarts a service
[ "restarts", "a", "service" ]
python
train
34.5
kensho-technologies/graphql-compiler
graphql_compiler/schema_generation/schema_graph.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/schema_generation/schema_graph.py#L297-L311
def get_default_property_values(self, classname): """Return a dict with default values for all properties declared on this class.""" schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: # Remove the source/destination properties for edges, if they exist. result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
[ "def", "get_default_property_values", "(", "self", ",", "classname", ")", ":", "schema_element", "=", "self", ".", "get_element_by_class_name", "(", "classname", ")", "result", "=", "{", "property_name", ":", "property_descriptor", ".", "default", "for", "property_name", ",", "property_descriptor", "in", "six", ".", "iteritems", "(", "schema_element", ".", "properties", ")", "}", "if", "schema_element", ".", "is_edge", ":", "# Remove the source/destination properties for edges, if they exist.", "result", ".", "pop", "(", "EDGE_SOURCE_PROPERTY_NAME", ",", "None", ")", "result", ".", "pop", "(", "EDGE_DESTINATION_PROPERTY_NAME", ",", "None", ")", "return", "result" ]
Return a dict with default values for all properties declared on this class.
[ "Return", "a", "dict", "with", "default", "values", "for", "all", "properties", "declared", "on", "this", "class", "." ]
python
train
42
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Executor.py#L487-L507
def scan(self, scanner, node_list): """Scan a list of this Executor's files (targets or sources) for implicit dependencies and update all of the targets with them. This essentially short-circuits an N*M scan of the sources for each individual target, which is a hell of a lot more efficient. """ env = self.get_build_env() path = self.get_build_scanner_path kw = self.get_kw() # TODO(batch): scan by batches) deps = [] for node in node_list: node.disambiguate() deps.extend(node.get_implicit_deps(env, scanner, path, kw)) deps.extend(self.get_implicit_deps()) for tgt in self.get_all_targets(): tgt.add_to_implicit(deps)
[ "def", "scan", "(", "self", ",", "scanner", ",", "node_list", ")", ":", "env", "=", "self", ".", "get_build_env", "(", ")", "path", "=", "self", ".", "get_build_scanner_path", "kw", "=", "self", ".", "get_kw", "(", ")", "# TODO(batch): scan by batches)", "deps", "=", "[", "]", "for", "node", "in", "node_list", ":", "node", ".", "disambiguate", "(", ")", "deps", ".", "extend", "(", "node", ".", "get_implicit_deps", "(", "env", ",", "scanner", ",", "path", ",", "kw", ")", ")", "deps", ".", "extend", "(", "self", ".", "get_implicit_deps", "(", ")", ")", "for", "tgt", "in", "self", ".", "get_all_targets", "(", ")", ":", "tgt", ".", "add_to_implicit", "(", "deps", ")" ]
Scan a list of this Executor's files (targets or sources) for implicit dependencies and update all of the targets with them. This essentially short-circuits an N*M scan of the sources for each individual target, which is a hell of a lot more efficient.
[ "Scan", "a", "list", "of", "this", "Executor", "s", "files", "(", "targets", "or", "sources", ")", "for", "implicit", "dependencies", "and", "update", "all", "of", "the", "targets", "with", "them", ".", "This", "essentially", "short", "-", "circuits", "an", "N", "*", "M", "scan", "of", "the", "sources", "for", "each", "individual", "target", "which", "is", "a", "hell", "of", "a", "lot", "more", "efficient", "." ]
python
train
35.47619
dnanexus/dx-toolkit
src/python/dxpy/api.py
https://github.com/dnanexus/dx-toolkit/blob/74befb53ad90fcf902d8983ae6d74580f402d619/src/python/dxpy/api.py#L587-L593
def file_rename(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /file-xxxx/rename API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename """ return DXHTTPRequest('/%s/rename' % object_id, input_params, always_retry=always_retry, **kwargs)
[ "def", "file_rename", "(", "object_id", ",", "input_params", "=", "{", "}", ",", "always_retry", "=", "True", ",", "*", "*", "kwargs", ")", ":", "return", "DXHTTPRequest", "(", "'/%s/rename'", "%", "object_id", ",", "input_params", ",", "always_retry", "=", "always_retry", ",", "*", "*", "kwargs", ")" ]
Invokes the /file-xxxx/rename API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename
[ "Invokes", "the", "/", "file", "-", "xxxx", "/", "rename", "API", "method", "." ]
python
train
49.857143
fhcrc/seqmagick
seqmagick/subcommands/quality_filter.py
https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/subcommands/quality_filter.py#L374-L396
def filter_records(self, records): """ Apply the filter to records """ for record in records: try: filtered = self.filter_record(record) assert (filtered) # Quick tracking whether the sequence was modified if filtered.seq == record.seq: self.passed_unchanged += 1 else: self.passed_changed += 1 yield filtered except FailedFilter as e: self.failed += 1 v = e.value if self.listener: self.listener( 'failed_filter', record, filter_name=self.name, value=v)
[ "def", "filter_records", "(", "self", ",", "records", ")", ":", "for", "record", "in", "records", ":", "try", ":", "filtered", "=", "self", ".", "filter_record", "(", "record", ")", "assert", "(", "filtered", ")", "# Quick tracking whether the sequence was modified", "if", "filtered", ".", "seq", "==", "record", ".", "seq", ":", "self", ".", "passed_unchanged", "+=", "1", "else", ":", "self", ".", "passed_changed", "+=", "1", "yield", "filtered", "except", "FailedFilter", "as", "e", ":", "self", ".", "failed", "+=", "1", "v", "=", "e", ".", "value", "if", "self", ".", "listener", ":", "self", ".", "listener", "(", "'failed_filter'", ",", "record", ",", "filter_name", "=", "self", ".", "name", ",", "value", "=", "v", ")" ]
Apply the filter to records
[ "Apply", "the", "filter", "to", "records" ]
python
train
34.26087
dpkp/kafka-python
kafka/metrics/metrics.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/metrics/metrics.py#L256-L261
def close(self): """Close this metrics repository.""" for reporter in self._reporters: reporter.close() self._metrics.clear()
[ "def", "close", "(", "self", ")", ":", "for", "reporter", "in", "self", ".", "_reporters", ":", "reporter", ".", "close", "(", ")", "self", ".", "_metrics", ".", "clear", "(", ")" ]
Close this metrics repository.
[ "Close", "this", "metrics", "repository", "." ]
python
train
26.166667
morngrar/ui
ui/ui.py
https://github.com/morngrar/ui/blob/93e160b55ff7d486a53dba7a8c0f2d46e6f95ed9/ui/ui.py#L55-L71
def yn_prompt(text): ''' Takes the text prompt, and presents it, takes only "y" or "n" for answers, and returns True or False. Repeats itself on bad input. ''' text = "\n"+ text + "\n('y' or 'n'): " while True: answer = input(text).strip() if answer != 'y' and answer != 'n': continue elif answer == 'y': return True elif answer == 'n': return False
[ "def", "yn_prompt", "(", "text", ")", ":", "text", "=", "\"\\n\"", "+", "text", "+", "\"\\n('y' or 'n'): \"", "while", "True", ":", "answer", "=", "input", "(", "text", ")", ".", "strip", "(", ")", "if", "answer", "!=", "'y'", "and", "answer", "!=", "'n'", ":", "continue", "elif", "answer", "==", "'y'", ":", "return", "True", "elif", "answer", "==", "'n'", ":", "return", "False" ]
Takes the text prompt, and presents it, takes only "y" or "n" for answers, and returns True or False. Repeats itself on bad input.
[ "Takes", "the", "text", "prompt", "and", "presents", "it", "takes", "only", "y", "or", "n", "for", "answers", "and", "returns", "True", "or", "False", ".", "Repeats", "itself", "on", "bad", "input", "." ]
python
train
25.647059
jxtech/wechatpy
wechatpy/pay/api/transfer.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/pay/api/transfer.py#L67-L95
def transfer_bankcard(self, true_name, bank_card_no, bank_code, amount, desc=None, out_trade_no=None): """ 企业付款到银行卡接口 :param true_name: 开户人名称 :param bank_card_no: 银行卡号 :param bank_code: 银行编号 :param amount: 付款金额,单位分 :param desc: 付款说明 :param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成 :return: 返回的结果信息 """ if not out_trade_no: now = datetime.now() out_trade_no = '{0}{1}{2}'.format( self.mch_id, now.strftime('%Y%m%d%H%M%S'), random.randint(1000, 10000) ) data = { 'mch_id': self.mch_id, 'partner_trade_no': out_trade_no, 'amount': amount, 'desc': desc, 'enc_bank_no': self._rsa_encrypt(bank_card_no), 'enc_true_name': self._rsa_encrypt(true_name), 'bank_code': bank_code, } return self._post('mmpaysptrans/pay_bank', data=data)
[ "def", "transfer_bankcard", "(", "self", ",", "true_name", ",", "bank_card_no", ",", "bank_code", ",", "amount", ",", "desc", "=", "None", ",", "out_trade_no", "=", "None", ")", ":", "if", "not", "out_trade_no", ":", "now", "=", "datetime", ".", "now", "(", ")", "out_trade_no", "=", "'{0}{1}{2}'", ".", "format", "(", "self", ".", "mch_id", ",", "now", ".", "strftime", "(", "'%Y%m%d%H%M%S'", ")", ",", "random", ".", "randint", "(", "1000", ",", "10000", ")", ")", "data", "=", "{", "'mch_id'", ":", "self", ".", "mch_id", ",", "'partner_trade_no'", ":", "out_trade_no", ",", "'amount'", ":", "amount", ",", "'desc'", ":", "desc", ",", "'enc_bank_no'", ":", "self", ".", "_rsa_encrypt", "(", "bank_card_no", ")", ",", "'enc_true_name'", ":", "self", ".", "_rsa_encrypt", "(", "true_name", ")", ",", "'bank_code'", ":", "bank_code", ",", "}", "return", "self", ".", "_post", "(", "'mmpaysptrans/pay_bank'", ",", "data", "=", "data", ")" ]
企业付款到银行卡接口 :param true_name: 开户人名称 :param bank_card_no: 银行卡号 :param bank_code: 银行编号 :param amount: 付款金额,单位分 :param desc: 付款说明 :param out_trade_no: 可选,商户订单号,需保持唯一性,默认自动生成 :return: 返回的结果信息
[ "企业付款到银行卡接口" ]
python
train
33.551724
wbond/asn1crypto
asn1crypto/x509.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/x509.py#L2647-L2657
def delta_crl_distribution_points(self): """ Returns delta CRL URLs - does not include complete CRLs :return: A list of zero or more DistributionPoint objects """ if self._delta_crl_distribution_points is None: self._delta_crl_distribution_points = self._get_http_crl_distribution_points(self.freshest_crl_value) return self._delta_crl_distribution_points
[ "def", "delta_crl_distribution_points", "(", "self", ")", ":", "if", "self", ".", "_delta_crl_distribution_points", "is", "None", ":", "self", ".", "_delta_crl_distribution_points", "=", "self", ".", "_get_http_crl_distribution_points", "(", "self", ".", "freshest_crl_value", ")", "return", "self", ".", "_delta_crl_distribution_points" ]
Returns delta CRL URLs - does not include complete CRLs :return: A list of zero or more DistributionPoint objects
[ "Returns", "delta", "CRL", "URLs", "-", "does", "not", "include", "complete", "CRLs" ]
python
train
38.090909
codelv/enaml-native-maps
src/googlemaps/android/android_map_view.py
https://github.com/codelv/enaml-native-maps/blob/5b6dda745cede05755dd40d29775cc0544226c29/src/googlemaps/android/android_map_view.py#L629-L635
def on_map_clicked(self, pos): """ Called when the map is clicked """ d = self.declaration d.clicked({ 'click': 'short', 'position': tuple(pos) })
[ "def", "on_map_clicked", "(", "self", ",", "pos", ")", ":", "d", "=", "self", ".", "declaration", "d", ".", "clicked", "(", "{", "'click'", ":", "'short'", ",", "'position'", ":", "tuple", "(", "pos", ")", "}", ")" ]
Called when the map is clicked
[ "Called", "when", "the", "map", "is", "clicked" ]
python
valid
28
lowandrew/OLCTools
metagenomefilter/filtermetagenome.py
https://github.com/lowandrew/OLCTools/blob/88aa90ac85f84d0bbeb03e43c29b0a9d36e4ce2a/metagenomefilter/filtermetagenome.py#L125-L140
def fastqfilter(self): """Filter the reads into separate files based on taxonomic assignment""" printtime('Creating filtered .fastqfiles', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.filterfastq, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.filterqueue.put(sample) self.filterqueue.join() # Print the metadata to file metadataprinter.MetadataPrinter(self)
[ "def", "fastqfilter", "(", "self", ")", ":", "printtime", "(", "'Creating filtered .fastqfiles'", ",", "self", ".", "start", ")", "# Create and start threads", "for", "i", "in", "range", "(", "self", ".", "cpus", ")", ":", "# Send the threads to the appropriate destination function", "threads", "=", "Thread", "(", "target", "=", "self", ".", "filterfastq", ",", "args", "=", "(", ")", ")", "# Set the daemon to true - something to do with thread management", "threads", ".", "setDaemon", "(", "True", ")", "# Start the threading", "threads", ".", "start", "(", ")", "for", "sample", "in", "self", ".", "runmetadata", ".", "samples", ":", "self", ".", "filterqueue", ".", "put", "(", "sample", ")", "self", ".", "filterqueue", ".", "join", "(", ")", "# Print the metadata to file", "metadataprinter", ".", "MetadataPrinter", "(", "self", ")" ]
Filter the reads into separate files based on taxonomic assignment
[ "Filter", "the", "reads", "into", "separate", "files", "based", "on", "taxonomic", "assignment" ]
python
train
45.9375
agoragames/haigha
haigha/transports/socket_transport.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/transports/socket_transport.py#L130-L156
def write(self, data): ''' Write some bytes to the transport. ''' if not hasattr(self, '_sock'): return None try: self._sock.sendall(data) if self.connection.debug > 1: self.connection.logger.debug( 'sent %d bytes to %s' % (len(data), self._host)) return except EnvironmentError: # sockets raise this type of error, and since if sendall() fails # we're left in an indeterminate state, assume that any error we # catch means that the connection is dead. Note that this # assumption requires this to be a blocking socket; if we ever # support non-blocking in this class then this whole method has # to change a lot. self.connection.logger.exception( 'error writing to %s' % (self._host)) self.connection.transport_closed( msg='error writing to %s' % (self._host))
[ "def", "write", "(", "self", ",", "data", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_sock'", ")", ":", "return", "None", "try", ":", "self", ".", "_sock", ".", "sendall", "(", "data", ")", "if", "self", ".", "connection", ".", "debug", ">", "1", ":", "self", ".", "connection", ".", "logger", ".", "debug", "(", "'sent %d bytes to %s'", "%", "(", "len", "(", "data", ")", ",", "self", ".", "_host", ")", ")", "return", "except", "EnvironmentError", ":", "# sockets raise this type of error, and since if sendall() fails", "# we're left in an indeterminate state, assume that any error we", "# catch means that the connection is dead. Note that this", "# assumption requires this to be a blocking socket; if we ever", "# support non-blocking in this class then this whole method has", "# to change a lot.", "self", ".", "connection", ".", "logger", ".", "exception", "(", "'error writing to %s'", "%", "(", "self", ".", "_host", ")", ")", "self", ".", "connection", ".", "transport_closed", "(", "msg", "=", "'error writing to %s'", "%", "(", "self", ".", "_host", ")", ")" ]
Write some bytes to the transport.
[ "Write", "some", "bytes", "to", "the", "transport", "." ]
python
train
36.703704
tensorflow/tensor2tensor
tensor2tensor/layers/discretization.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/discretization.py#L360-L381
def vae(x, z_size, name=None): """Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga. """ with tf.variable_scope(name, default_name="vae"): mu = tf.layers.dense(x, z_size, name="mu") log_sigma = tf.layers.dense(x, z_size, name="log_sigma") shape = common_layers.shape_list(x) epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) z = mu + tf.exp(log_sigma / 2) * epsilon kl = 0.5 * tf.reduce_mean( tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) free_bits = z_size // 4 kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) return z, kl_loss, mu, log_sigma
[ "def", "vae", "(", "x", ",", "z_size", ",", "name", "=", "None", ")", ":", "with", "tf", ".", "variable_scope", "(", "name", ",", "default_name", "=", "\"vae\"", ")", ":", "mu", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "z_size", ",", "name", "=", "\"mu\"", ")", "log_sigma", "=", "tf", ".", "layers", ".", "dense", "(", "x", ",", "z_size", ",", "name", "=", "\"log_sigma\"", ")", "shape", "=", "common_layers", ".", "shape_list", "(", "x", ")", "epsilon", "=", "tf", ".", "random_normal", "(", "[", "shape", "[", "0", "]", ",", "shape", "[", "1", "]", ",", "1", ",", "z_size", "]", ")", "z", "=", "mu", "+", "tf", ".", "exp", "(", "log_sigma", "/", "2", ")", "*", "epsilon", "kl", "=", "0.5", "*", "tf", ".", "reduce_mean", "(", "tf", ".", "expm1", "(", "log_sigma", ")", "+", "tf", ".", "square", "(", "mu", ")", "-", "log_sigma", ",", "axis", "=", "-", "1", ")", "free_bits", "=", "z_size", "//", "4", "kl_loss", "=", "tf", ".", "reduce_mean", "(", "tf", ".", "maximum", "(", "kl", "-", "free_bits", ",", "0.0", ")", ")", "return", "z", ",", "kl_loss", ",", "mu", ",", "log_sigma" ]
Simple variational autoencoder without discretization. Args: x: Input to the discretization bottleneck. z_size: Number of bits, where discrete codes range from 1 to 2**z_size. name: Name for the bottleneck scope. Returns: Embedding function, latent, loss, mu and log_simga.
[ "Simple", "variational", "autoencoder", "without", "discretization", "." ]
python
train
38.590909
numenta/nupic
src/nupic/regions/tm_region.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/regions/tm_region.py#L407-L427
def initialize(self): """ Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`. """ # Allocate appropriate temporal memory object # Retrieve the necessary extra arguments that were handled automatically autoArgs = dict((name, getattr(self, name)) for name in self._temporalArgNames) if self._tfdr is None: tpClass = _getTPClass(self.temporalImp) if self.temporalImp in ['py', 'cpp', 'r', 'tm_py', 'tm_cpp', 'monitored_tm_py',]: self._tfdr = tpClass( numberOfCols=self.columnCount, cellsPerColumn=self.cellsPerColumn, **autoArgs) else: raise RuntimeError("Invalid temporalImp")
[ "def", "initialize", "(", "self", ")", ":", "# Allocate appropriate temporal memory object", "# Retrieve the necessary extra arguments that were handled automatically", "autoArgs", "=", "dict", "(", "(", "name", ",", "getattr", "(", "self", ",", "name", ")", ")", "for", "name", "in", "self", ".", "_temporalArgNames", ")", "if", "self", ".", "_tfdr", "is", "None", ":", "tpClass", "=", "_getTPClass", "(", "self", ".", "temporalImp", ")", "if", "self", ".", "temporalImp", "in", "[", "'py'", ",", "'cpp'", ",", "'r'", ",", "'tm_py'", ",", "'tm_cpp'", ",", "'monitored_tm_py'", ",", "]", ":", "self", ".", "_tfdr", "=", "tpClass", "(", "numberOfCols", "=", "self", ".", "columnCount", ",", "cellsPerColumn", "=", "self", ".", "cellsPerColumn", ",", "*", "*", "autoArgs", ")", "else", ":", "raise", "RuntimeError", "(", "\"Invalid temporalImp\"", ")" ]
Overrides :meth:`~nupic.bindings.regions.PyRegion.initialize`.
[ "Overrides", ":", "meth", ":", "~nupic", ".", "bindings", ".", "regions", ".", "PyRegion", ".", "initialize", "." ]
python
valid
35.619048
ClimateImpactLab/DataFS
datafs/managers/manager.py
https://github.com/ClimateImpactLab/DataFS/blob/0d32c2b4e18d300a11b748a552f6adbc3dd8f59d/datafs/managers/manager.py#L223-L264
def create_archive( self, archive_name, authority_name, archive_path, versioned, raise_on_err=True, metadata=None, user_config=None, tags=None, helper=False): ''' Create a new data archive Returns ------- archive : object new :py:class:`~datafs.core.data_archive.DataArchive` object ''' archive_metadata = self._create_archive_metadata( archive_name=archive_name, authority_name=authority_name, archive_path=archive_path, versioned=versioned, raise_on_err=raise_on_err, metadata=metadata, user_config=user_config, tags=tags, helper=helper) if raise_on_err: self._create_archive( archive_name, archive_metadata) else: self._create_if_not_exists( archive_name, archive_metadata) return self.get_archive(archive_name)
[ "def", "create_archive", "(", "self", ",", "archive_name", ",", "authority_name", ",", "archive_path", ",", "versioned", ",", "raise_on_err", "=", "True", ",", "metadata", "=", "None", ",", "user_config", "=", "None", ",", "tags", "=", "None", ",", "helper", "=", "False", ")", ":", "archive_metadata", "=", "self", ".", "_create_archive_metadata", "(", "archive_name", "=", "archive_name", ",", "authority_name", "=", "authority_name", ",", "archive_path", "=", "archive_path", ",", "versioned", "=", "versioned", ",", "raise_on_err", "=", "raise_on_err", ",", "metadata", "=", "metadata", ",", "user_config", "=", "user_config", ",", "tags", "=", "tags", ",", "helper", "=", "helper", ")", "if", "raise_on_err", ":", "self", ".", "_create_archive", "(", "archive_name", ",", "archive_metadata", ")", "else", ":", "self", ".", "_create_if_not_exists", "(", "archive_name", ",", "archive_metadata", ")", "return", "self", ".", "get_archive", "(", "archive_name", ")" ]
Create a new data archive Returns ------- archive : object new :py:class:`~datafs.core.data_archive.DataArchive` object
[ "Create", "a", "new", "data", "archive" ]
python
train
25.857143
fprimex/zdesk
zdesk/zdesk_api.py
https://github.com/fprimex/zdesk/blob/851611c13b4d530e9df31390b3ec709baf0a0188/zdesk/zdesk_api.py#L142-L146
def agents_email_show(self, email_id, **kwargs): "https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id" api_path = "/api/v2/agents/email/{email_id}" api_path = api_path.format(email_id=email_id) return self.call(api_path, **kwargs)
[ "def", "agents_email_show", "(", "self", ",", "email_id", ",", "*", "*", "kwargs", ")", ":", "api_path", "=", "\"/api/v2/agents/email/{email_id}\"", "api_path", "=", "api_path", ".", "format", "(", "email_id", "=", "email_id", ")", "return", "self", ".", "call", "(", "api_path", ",", "*", "*", "kwargs", ")" ]
https://developer.zendesk.com/rest_api/docs/chat/agents#get-agent-by-email-id
[ "https", ":", "//", "developer", ".", "zendesk", ".", "com", "/", "rest_api", "/", "docs", "/", "chat", "/", "agents#get", "-", "agent", "-", "by", "-", "email", "-", "id" ]
python
train
56.8
mozilla/treeherder
treeherder/model/models.py
https://github.com/mozilla/treeherder/blob/cc47bdec872e5c668d0f01df89517390a164cda3/treeherder/model/models.py#L1185-L1210
def update_matches(self, other): """ Update this instance's Matches to point to the given other's Matches. Find Matches with the same TextLogError as our Matches, updating their score if less than ours and mark our matches for deletion. If there are no other matches, update ours to point to the other ClassifiedFailure. """ for match in self.error_matches.all(): other_matches = TextLogErrorMatch.objects.filter( classified_failure=other, text_log_error=match.text_log_error, ) if not other_matches: match.classified_failure = other match.save(update_fields=['classified_failure']) continue # if any of our matches have higher scores than other's matches, # overwrite with our score. other_matches.filter(score__lt=match.score).update(score=match.score) yield match.id
[ "def", "update_matches", "(", "self", ",", "other", ")", ":", "for", "match", "in", "self", ".", "error_matches", ".", "all", "(", ")", ":", "other_matches", "=", "TextLogErrorMatch", ".", "objects", ".", "filter", "(", "classified_failure", "=", "other", ",", "text_log_error", "=", "match", ".", "text_log_error", ",", ")", "if", "not", "other_matches", ":", "match", ".", "classified_failure", "=", "other", "match", ".", "save", "(", "update_fields", "=", "[", "'classified_failure'", "]", ")", "continue", "# if any of our matches have higher scores than other's matches,", "# overwrite with our score.", "other_matches", ".", "filter", "(", "score__lt", "=", "match", ".", "score", ")", ".", "update", "(", "score", "=", "match", ".", "score", ")", "yield", "match", ".", "id" ]
Update this instance's Matches to point to the given other's Matches. Find Matches with the same TextLogError as our Matches, updating their score if less than ours and mark our matches for deletion. If there are no other matches, update ours to point to the other ClassifiedFailure.
[ "Update", "this", "instance", "s", "Matches", "to", "point", "to", "the", "given", "other", "s", "Matches", "." ]
python
train
37.576923
geophysics-ubonn/crtomo_tools
lib/crtomo/configManager.py
https://github.com/geophysics-ubonn/crtomo_tools/blob/27c3e21a557f8df1c12455b96c4c2e00e08a5b4a/lib/crtomo/configManager.py#L56-L63
def clear_measurements(self): """Remove all measurements from self.measurements. Reset the measurement counter. All ID are invalidated. """ keys = list(self.measurements.keys()) for key in keys: del(self.measurements[key]) self.meas_counter = -1
[ "def", "clear_measurements", "(", "self", ")", ":", "keys", "=", "list", "(", "self", ".", "measurements", ".", "keys", "(", ")", ")", "for", "key", "in", "keys", ":", "del", "(", "self", ".", "measurements", "[", "key", "]", ")", "self", ".", "meas_counter", "=", "-", "1" ]
Remove all measurements from self.measurements. Reset the measurement counter. All ID are invalidated.
[ "Remove", "all", "measurements", "from", "self", ".", "measurements", ".", "Reset", "the", "measurement", "counter", ".", "All", "ID", "are", "invalidated", "." ]
python
train
37.25
cga-harvard/Hypermap-Registry
hypermap/aggregator/models.py
https://github.com/cga-harvard/Hypermap-Registry/blob/899a5385b15af7fba190ab4fae1d41e47d155a1b/hypermap/aggregator/models.py#L1238-L1390
def update_layers_geonode_wm(service, num_layers=None): """ Update layers for a WorldMap instance. Sample endpoint: http://localhost:8000/ """ wm_api_url = urlparse.urljoin(service.url, 'worldmap/api/2.8/layer/?format=json') if num_layers: total = num_layers else: response = requests.get(wm_api_url) data = json.loads(response.content) total = data['meta']['total_count'] # set srs # WorldMap supports only 4326, 900913, 3857 for crs_code in ['EPSG:4326', 'EPSG:900913', 'EPSG:3857']: srs, created = SpatialReferenceSystem.objects.get_or_create(code=crs_code) service.srs.add(srs) service.update_validity() layer_n = 0 limit = 10 for i in range(0, total, limit): try: url = ( '%s&order_by=-date&offset=%s&limit=%s' % (wm_api_url, i, limit) ) LOGGER.debug('Fetching %s' % url) response = requests.get(url) data = json.loads(response.content) for row in data['objects']: typename = row['typename'] # name = typename.split(':')[1] name = typename uuid = row['uuid'] LOGGER.debug('Updating layer %s' % name) title = row['title'] abstract = row['abstract'] bbox = row['bbox'] page_url = urlparse.urljoin(service.url, 'data/%s' % name) category = '' if 'topic_category' in row: category = row['topic_category'] username = '' if 'owner_username' in row: username = row['owner_username'] temporal_extent_start = '' if 'temporal_extent_start' in row: temporal_extent_start = row['temporal_extent_start'] temporal_extent_end = '' if 'temporal_extent_end' in row: temporal_extent_end = row['temporal_extent_end'] # we use the geoserver virtual layer getcapabilities for wm endpoint # TODO we should port make geoserver port configurable some way... # endpoint = urlparse.urljoin(service.url, 'geoserver/geonode/%s/wms?' % name) endpoint = urlparse.urljoin(service.url, 'geoserver/wms?') endpoint = endpoint.replace('8000', '8080') print endpoint if 'is_public' in row: is_public = row['is_public'] layer, created = Layer.objects.get_or_create( service=service, catalog=service.catalog, name=name, uuid=uuid) if created: LOGGER.debug('Added a new layer in registry: %s, %s' % (name, uuid)) if layer.active: links = [['Hypermap:WorldMap', endpoint]] # update fields layer.type = 'Hypermap:WorldMap' layer.title = title layer.abstract = abstract layer.is_public = is_public layer.url = endpoint layer.page_url = page_url # category and owner username layer_wm, created = LayerWM.objects.get_or_create(layer=layer) layer_wm.category = category layer_wm.username = username layer_wm.temporal_extent_start = temporal_extent_start layer_wm.temporal_extent_end = temporal_extent_end layer_wm.save() # bbox [x0, y0, x1, y1] # check if it is a valid bbox (TODO improve this check) # bbox = bbox.replace('-inf', 'None') # bbox = bbox.replace('inf', 'None') # if bbox.count(',') == 3: # bbox_list = bbox[1:-1].split(',') # else: # bbox_list = [None, None, None, None] x0 = format_float(bbox[0]) x1 = format_float(bbox[1]) y0 = format_float(bbox[2]) y1 = format_float(bbox[3]) # In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM. x0, x1 = flip_coordinates(x0, x1) y0, y1 = flip_coordinates(y0, y1) layer.bbox_x0 = x0 layer.bbox_y0 = y0 layer.bbox_x1 = x1 layer.bbox_y1 = y1 # keywords keywords = [] for keyword in row['keywords']: keywords.append(keyword['name']) layer.keywords.all().delete() for keyword in keywords: layer.keywords.add(keyword) layer.wkt_geometry = bbox2wktpolygon([x0, y0, x1, y1]) layer.xml = create_metadata_record( identifier=str(layer.uuid), source=endpoint, links=links, format='Hypermap:WorldMap', type=layer.csw_type, relation=service.id_string, title=layer.title, alternative=name, abstract=layer.abstract, keywords=keywords, wkt_geometry=layer.wkt_geometry ) layer.anytext = gen_anytext(layer.title, layer.abstract, keywords) layer.save() # dates add_mined_dates(layer) add_metadata_dates_to_layer([layer_wm.temporal_extent_start, layer_wm.temporal_extent_end], layer) layer_n = layer_n + 1 # exits if DEBUG_SERVICES LOGGER.debug("Updated layer n. %s/%s" % (layer_n, total)) if DEBUG_SERVICES and layer_n == DEBUG_LAYER_NUMBER: return except Exception as err: LOGGER.error('Error! %s' % err) # update deleted layers. For now we check the whole set of deleted layers # we should optimize it if the list will grow # TODO implement the actions application url = urlparse.urljoin(service.url, 'worldmap/api/2.8/actionlayerdelete/?format=json') LOGGER.debug('Fetching %s for detecting deleted layers' % url) try: response = requests.get(url) data = json.loads(response.content) for deleted_layer in data['objects']: if Layer.objects.filter(uuid=deleted_layer['args']).count() > 0: layer = Layer.objects.get(uuid=deleted_layer['args']) layer.was_deleted = True layer.save() LOGGER.debug('Layer %s marked as deleted' % layer.uuid) except Exception as err: LOGGER.error('Error! %s' % err)
[ "def", "update_layers_geonode_wm", "(", "service", ",", "num_layers", "=", "None", ")", ":", "wm_api_url", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'worldmap/api/2.8/layer/?format=json'", ")", "if", "num_layers", ":", "total", "=", "num_layers", "else", ":", "response", "=", "requests", ".", "get", "(", "wm_api_url", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "total", "=", "data", "[", "'meta'", "]", "[", "'total_count'", "]", "# set srs", "# WorldMap supports only 4326, 900913, 3857", "for", "crs_code", "in", "[", "'EPSG:4326'", ",", "'EPSG:900913'", ",", "'EPSG:3857'", "]", ":", "srs", ",", "created", "=", "SpatialReferenceSystem", ".", "objects", ".", "get_or_create", "(", "code", "=", "crs_code", ")", "service", ".", "srs", ".", "add", "(", "srs", ")", "service", ".", "update_validity", "(", ")", "layer_n", "=", "0", "limit", "=", "10", "for", "i", "in", "range", "(", "0", ",", "total", ",", "limit", ")", ":", "try", ":", "url", "=", "(", "'%s&order_by=-date&offset=%s&limit=%s'", "%", "(", "wm_api_url", ",", "i", ",", "limit", ")", ")", "LOGGER", ".", "debug", "(", "'Fetching %s'", "%", "url", ")", "response", "=", "requests", ".", "get", "(", "url", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "for", "row", "in", "data", "[", "'objects'", "]", ":", "typename", "=", "row", "[", "'typename'", "]", "# name = typename.split(':')[1]", "name", "=", "typename", "uuid", "=", "row", "[", "'uuid'", "]", "LOGGER", ".", "debug", "(", "'Updating layer %s'", "%", "name", ")", "title", "=", "row", "[", "'title'", "]", "abstract", "=", "row", "[", "'abstract'", "]", "bbox", "=", "row", "[", "'bbox'", "]", "page_url", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'data/%s'", "%", "name", ")", "category", "=", "''", "if", "'topic_category'", "in", "row", ":", "category", "=", "row", "[", "'topic_category'", "]", "username", "=", "''", "if", "'owner_username'", "in", "row", ":", "username", "=", "row", "[", "'owner_username'", "]", "temporal_extent_start", "=", "''", "if", "'temporal_extent_start'", "in", "row", ":", "temporal_extent_start", "=", "row", "[", "'temporal_extent_start'", "]", "temporal_extent_end", "=", "''", "if", "'temporal_extent_end'", "in", "row", ":", "temporal_extent_end", "=", "row", "[", "'temporal_extent_end'", "]", "# we use the geoserver virtual layer getcapabilities for wm endpoint", "# TODO we should port make geoserver port configurable some way...", "# endpoint = urlparse.urljoin(service.url, 'geoserver/geonode/%s/wms?' % name)", "endpoint", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'geoserver/wms?'", ")", "endpoint", "=", "endpoint", ".", "replace", "(", "'8000'", ",", "'8080'", ")", "print", "endpoint", "if", "'is_public'", "in", "row", ":", "is_public", "=", "row", "[", "'is_public'", "]", "layer", ",", "created", "=", "Layer", ".", "objects", ".", "get_or_create", "(", "service", "=", "service", ",", "catalog", "=", "service", ".", "catalog", ",", "name", "=", "name", ",", "uuid", "=", "uuid", ")", "if", "created", ":", "LOGGER", ".", "debug", "(", "'Added a new layer in registry: %s, %s'", "%", "(", "name", ",", "uuid", ")", ")", "if", "layer", ".", "active", ":", "links", "=", "[", "[", "'Hypermap:WorldMap'", ",", "endpoint", "]", "]", "# update fields", "layer", ".", "type", "=", "'Hypermap:WorldMap'", "layer", ".", "title", "=", "title", "layer", ".", "abstract", "=", "abstract", "layer", ".", "is_public", "=", "is_public", "layer", ".", "url", "=", "endpoint", "layer", ".", "page_url", "=", "page_url", "# category and owner username", "layer_wm", ",", "created", "=", "LayerWM", ".", "objects", ".", "get_or_create", "(", "layer", "=", "layer", ")", "layer_wm", ".", "category", "=", "category", "layer_wm", ".", "username", "=", "username", "layer_wm", ".", "temporal_extent_start", "=", "temporal_extent_start", "layer_wm", ".", "temporal_extent_end", "=", "temporal_extent_end", "layer_wm", ".", "save", "(", ")", "# bbox [x0, y0, x1, y1]", "# check if it is a valid bbox (TODO improve this check)", "# bbox = bbox.replace('-inf', 'None')", "# bbox = bbox.replace('inf', 'None')", "# if bbox.count(',') == 3:", "# bbox_list = bbox[1:-1].split(',')", "# else:", "# bbox_list = [None, None, None, None]", "x0", "=", "format_float", "(", "bbox", "[", "0", "]", ")", "x1", "=", "format_float", "(", "bbox", "[", "1", "]", ")", "y0", "=", "format_float", "(", "bbox", "[", "2", "]", ")", "y1", "=", "format_float", "(", "bbox", "[", "3", "]", ")", "# In many cases for some reason to be fixed GeoServer has x coordinates flipped in WM.", "x0", ",", "x1", "=", "flip_coordinates", "(", "x0", ",", "x1", ")", "y0", ",", "y1", "=", "flip_coordinates", "(", "y0", ",", "y1", ")", "layer", ".", "bbox_x0", "=", "x0", "layer", ".", "bbox_y0", "=", "y0", "layer", ".", "bbox_x1", "=", "x1", "layer", ".", "bbox_y1", "=", "y1", "# keywords", "keywords", "=", "[", "]", "for", "keyword", "in", "row", "[", "'keywords'", "]", ":", "keywords", ".", "append", "(", "keyword", "[", "'name'", "]", ")", "layer", ".", "keywords", ".", "all", "(", ")", ".", "delete", "(", ")", "for", "keyword", "in", "keywords", ":", "layer", ".", "keywords", ".", "add", "(", "keyword", ")", "layer", ".", "wkt_geometry", "=", "bbox2wktpolygon", "(", "[", "x0", ",", "y0", ",", "x1", ",", "y1", "]", ")", "layer", ".", "xml", "=", "create_metadata_record", "(", "identifier", "=", "str", "(", "layer", ".", "uuid", ")", ",", "source", "=", "endpoint", ",", "links", "=", "links", ",", "format", "=", "'Hypermap:WorldMap'", ",", "type", "=", "layer", ".", "csw_type", ",", "relation", "=", "service", ".", "id_string", ",", "title", "=", "layer", ".", "title", ",", "alternative", "=", "name", ",", "abstract", "=", "layer", ".", "abstract", ",", "keywords", "=", "keywords", ",", "wkt_geometry", "=", "layer", ".", "wkt_geometry", ")", "layer", ".", "anytext", "=", "gen_anytext", "(", "layer", ".", "title", ",", "layer", ".", "abstract", ",", "keywords", ")", "layer", ".", "save", "(", ")", "# dates", "add_mined_dates", "(", "layer", ")", "add_metadata_dates_to_layer", "(", "[", "layer_wm", ".", "temporal_extent_start", ",", "layer_wm", ".", "temporal_extent_end", "]", ",", "layer", ")", "layer_n", "=", "layer_n", "+", "1", "# exits if DEBUG_SERVICES", "LOGGER", ".", "debug", "(", "\"Updated layer n. %s/%s\"", "%", "(", "layer_n", ",", "total", ")", ")", "if", "DEBUG_SERVICES", "and", "layer_n", "==", "DEBUG_LAYER_NUMBER", ":", "return", "except", "Exception", "as", "err", ":", "LOGGER", ".", "error", "(", "'Error! %s'", "%", "err", ")", "# update deleted layers. For now we check the whole set of deleted layers", "# we should optimize it if the list will grow", "# TODO implement the actions application", "url", "=", "urlparse", ".", "urljoin", "(", "service", ".", "url", ",", "'worldmap/api/2.8/actionlayerdelete/?format=json'", ")", "LOGGER", ".", "debug", "(", "'Fetching %s for detecting deleted layers'", "%", "url", ")", "try", ":", "response", "=", "requests", ".", "get", "(", "url", ")", "data", "=", "json", ".", "loads", "(", "response", ".", "content", ")", "for", "deleted_layer", "in", "data", "[", "'objects'", "]", ":", "if", "Layer", ".", "objects", ".", "filter", "(", "uuid", "=", "deleted_layer", "[", "'args'", "]", ")", ".", "count", "(", ")", ">", "0", ":", "layer", "=", "Layer", ".", "objects", ".", "get", "(", "uuid", "=", "deleted_layer", "[", "'args'", "]", ")", "layer", ".", "was_deleted", "=", "True", "layer", ".", "save", "(", ")", "LOGGER", ".", "debug", "(", "'Layer %s marked as deleted'", "%", "layer", ".", "uuid", ")", "except", "Exception", "as", "err", ":", "LOGGER", ".", "error", "(", "'Error! %s'", "%", "err", ")" ]
Update layers for a WorldMap instance. Sample endpoint: http://localhost:8000/
[ "Update", "layers", "for", "a", "WorldMap", "instance", ".", "Sample", "endpoint", ":", "http", ":", "//", "localhost", ":", "8000", "/" ]
python
train
45.54902
pawelad/pymonzo
src/pymonzo/monzo_api.py
https://github.com/pawelad/pymonzo/blob/b5c8d4f46dcb3a2f475797a8b8ef1c15f6493fb9/src/pymonzo/monzo_api.py#L276-L303
def balance(self, account_id=None): """ Returns balance information for a specific account. Official docs: https://monzo.com/docs/#read-balance :param account_id: Monzo account ID :type account_id: str :raises: ValueError :returns: Monzo balance instance :rtype: MonzoBalance """ if not account_id: if len(self.accounts()) == 1: account_id = self.accounts()[0].id else: raise ValueError("You need to pass account ID") endpoint = '/balance' response = self._get_response( method='get', endpoint=endpoint, params={ 'account_id': account_id, }, ) return MonzoBalance(data=response.json())
[ "def", "balance", "(", "self", ",", "account_id", "=", "None", ")", ":", "if", "not", "account_id", ":", "if", "len", "(", "self", ".", "accounts", "(", ")", ")", "==", "1", ":", "account_id", "=", "self", ".", "accounts", "(", ")", "[", "0", "]", ".", "id", "else", ":", "raise", "ValueError", "(", "\"You need to pass account ID\"", ")", "endpoint", "=", "'/balance'", "response", "=", "self", ".", "_get_response", "(", "method", "=", "'get'", ",", "endpoint", "=", "endpoint", ",", "params", "=", "{", "'account_id'", ":", "account_id", ",", "}", ",", ")", "return", "MonzoBalance", "(", "data", "=", "response", ".", "json", "(", ")", ")" ]
Returns balance information for a specific account. Official docs: https://monzo.com/docs/#read-balance :param account_id: Monzo account ID :type account_id: str :raises: ValueError :returns: Monzo balance instance :rtype: MonzoBalance
[ "Returns", "balance", "information", "for", "a", "specific", "account", "." ]
python
train
28.357143
pytroll/trollimage
trollimage/image.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/image.py#L479-L537
def _to_p(self, mode): """Convert the image to P or PA mode. """ if self.mode.endswith("A"): chans = self.channels[:-1] alpha = self.channels[-1] self._secondary_mode = self.mode[:-1] else: chans = self.channels alpha = None self._secondary_mode = self.mode palette = [] selfmask = chans[0].mask for chn in chans[1:]: selfmask = np.ma.mask_or(selfmask, chn.mask) new_chn = np.ma.zeros(self.shape, dtype=int) color_nb = 0 for i in range(self.height): for j in range(self.width): current_col = tuple([chn[i, j] for chn in chans]) try: next(idx for idx in range(len(palette)) if palette[idx] == current_col) except StopIteration: idx = color_nb palette.append(current_col) color_nb = color_nb + 1 new_chn[i, j] = idx if self.fill_value is not None: if self.mode.endswith("A"): current_col = tuple(self.fill_value[:-1]) fill_alpha = [self.fill_value[-1]] else: current_col = tuple(self.fill_value) fill_alpha = [] try: next(idx for idx in range(len(palette)) if palette[idx] == current_col) except StopIteration: idx = color_nb palette.append(current_col) color_nb = color_nb + 1 self.fill_value = [idx] + fill_alpha new_chn.mask = selfmask self.palette = palette if alpha is None: self.channels = [new_chn] else: self.channels = [new_chn, alpha] self.mode = mode
[ "def", "_to_p", "(", "self", ",", "mode", ")", ":", "if", "self", ".", "mode", ".", "endswith", "(", "\"A\"", ")", ":", "chans", "=", "self", ".", "channels", "[", ":", "-", "1", "]", "alpha", "=", "self", ".", "channels", "[", "-", "1", "]", "self", ".", "_secondary_mode", "=", "self", ".", "mode", "[", ":", "-", "1", "]", "else", ":", "chans", "=", "self", ".", "channels", "alpha", "=", "None", "self", ".", "_secondary_mode", "=", "self", ".", "mode", "palette", "=", "[", "]", "selfmask", "=", "chans", "[", "0", "]", ".", "mask", "for", "chn", "in", "chans", "[", "1", ":", "]", ":", "selfmask", "=", "np", ".", "ma", ".", "mask_or", "(", "selfmask", ",", "chn", ".", "mask", ")", "new_chn", "=", "np", ".", "ma", ".", "zeros", "(", "self", ".", "shape", ",", "dtype", "=", "int", ")", "color_nb", "=", "0", "for", "i", "in", "range", "(", "self", ".", "height", ")", ":", "for", "j", "in", "range", "(", "self", ".", "width", ")", ":", "current_col", "=", "tuple", "(", "[", "chn", "[", "i", ",", "j", "]", "for", "chn", "in", "chans", "]", ")", "try", ":", "next", "(", "idx", "for", "idx", "in", "range", "(", "len", "(", "palette", ")", ")", "if", "palette", "[", "idx", "]", "==", "current_col", ")", "except", "StopIteration", ":", "idx", "=", "color_nb", "palette", ".", "append", "(", "current_col", ")", "color_nb", "=", "color_nb", "+", "1", "new_chn", "[", "i", ",", "j", "]", "=", "idx", "if", "self", ".", "fill_value", "is", "not", "None", ":", "if", "self", ".", "mode", ".", "endswith", "(", "\"A\"", ")", ":", "current_col", "=", "tuple", "(", "self", ".", "fill_value", "[", ":", "-", "1", "]", ")", "fill_alpha", "=", "[", "self", ".", "fill_value", "[", "-", "1", "]", "]", "else", ":", "current_col", "=", "tuple", "(", "self", ".", "fill_value", ")", "fill_alpha", "=", "[", "]", "try", ":", "next", "(", "idx", "for", "idx", "in", "range", "(", "len", "(", "palette", ")", ")", "if", "palette", "[", "idx", "]", "==", "current_col", ")", "except", "StopIteration", ":", "idx", "=", "color_nb", "palette", ".", "append", "(", "current_col", ")", "color_nb", "=", "color_nb", "+", "1", "self", ".", "fill_value", "=", "[", "idx", "]", "+", "fill_alpha", "new_chn", ".", "mask", "=", "selfmask", "self", ".", "palette", "=", "palette", "if", "alpha", "is", "None", ":", "self", ".", "channels", "=", "[", "new_chn", "]", "else", ":", "self", ".", "channels", "=", "[", "new_chn", ",", "alpha", "]", "self", ".", "mode", "=", "mode" ]
Convert the image to P or PA mode.
[ "Convert", "the", "image", "to", "P", "or", "PA", "mode", "." ]
python
train
31.864407
PythonCharmers/python-future
src/future/backports/email/_header_value_parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L2259-L2287
def get_group(value): """ group = display-name ":" [group-list] ";" [CFWS] """ group = Group() token, value = get_display_name(value) if not value or value[0] != ':': raise errors.HeaderParseError("expected ':' at end of group " "display name but found '{}'".format(value)) group.append(token) group.append(ValueTerminal(':', 'group-display-name-terminator')) value = value[1:] if value and value[0] == ';': group.append(ValueTerminal(';', 'group-terminator')) return group, value[1:] token, value = get_group_list(value) group.append(token) if not value: group.defects.append(errors.InvalidHeaderDefect( "end of header in group")) if value[0] != ';': raise errors.HeaderParseError( "expected ';' at end of group but found {}".format(value)) group.append(ValueTerminal(';', 'group-terminator')) value = value[1:] if value and value[0] in CFWS_LEADER: token, value = get_cfws(value) group.append(token) return group, value
[ "def", "get_group", "(", "value", ")", ":", "group", "=", "Group", "(", ")", "token", ",", "value", "=", "get_display_name", "(", "value", ")", "if", "not", "value", "or", "value", "[", "0", "]", "!=", "':'", ":", "raise", "errors", ".", "HeaderParseError", "(", "\"expected ':' at end of group \"", "\"display name but found '{}'\"", ".", "format", "(", "value", ")", ")", "group", ".", "append", "(", "token", ")", "group", ".", "append", "(", "ValueTerminal", "(", "':'", ",", "'group-display-name-terminator'", ")", ")", "value", "=", "value", "[", "1", ":", "]", "if", "value", "and", "value", "[", "0", "]", "==", "';'", ":", "group", ".", "append", "(", "ValueTerminal", "(", "';'", ",", "'group-terminator'", ")", ")", "return", "group", ",", "value", "[", "1", ":", "]", "token", ",", "value", "=", "get_group_list", "(", "value", ")", "group", ".", "append", "(", "token", ")", "if", "not", "value", ":", "group", ".", "defects", ".", "append", "(", "errors", ".", "InvalidHeaderDefect", "(", "\"end of header in group\"", ")", ")", "if", "value", "[", "0", "]", "!=", "';'", ":", "raise", "errors", ".", "HeaderParseError", "(", "\"expected ';' at end of group but found {}\"", ".", "format", "(", "value", ")", ")", "group", ".", "append", "(", "ValueTerminal", "(", "';'", ",", "'group-terminator'", ")", ")", "value", "=", "value", "[", "1", ":", "]", "if", "value", "and", "value", "[", "0", "]", "in", "CFWS_LEADER", ":", "token", ",", "value", "=", "get_cfws", "(", "value", ")", "group", ".", "append", "(", "token", ")", "return", "group", ",", "value" ]
group = display-name ":" [group-list] ";" [CFWS]
[ "group", "=", "display", "-", "name", ":", "[", "group", "-", "list", "]", ";", "[", "CFWS", "]" ]
python
train
36.344828
fhamborg/news-please
newsplease/crawler/commoncrawl_extractor.py
https://github.com/fhamborg/news-please/blob/731837c2a6c223cfb3e1d7f5fdc4f4eced2310f9/newsplease/crawler/commoncrawl_extractor.py#L288-L328
def extract_from_commoncrawl(self, warc_download_url, callback_on_article_extracted, valid_hosts=None, start_date=None, end_date=None, strict_date=True, reuse_previously_downloaded_files=True, local_download_dir_warc=None, continue_after_error=True, show_download_progress=False, log_level=logging.ERROR, delete_warc_after_extraction=True, log_pathname_fully_extracted_warcs=None): """ Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted successfully the callback function callback_on_article_extracted is invoked where the first parameter is the article object. :param log_pathname_fully_extracted_warcs: :param delete_warc_after_extraction: :param warc_download_url: :param callback_on_article_extracted: :param valid_hosts: :param start_date: :param end_date: :param strict_date: :param reuse_previously_downloaded_files: :param local_download_dir_warc: :param continue_after_error: :param show_download_progress: :param log_level: :return: """ self.__warc_download_url = warc_download_url self.__filter_valid_hosts = valid_hosts self.__filter_start_date = start_date self.__filter_end_date = end_date self.__filter_strict_date = strict_date if local_download_dir_warc: self.__local_download_dir_warc = local_download_dir_warc self.__reuse_previously_downloaded_files = reuse_previously_downloaded_files self.__continue_after_error = continue_after_error self.__callback_on_article_extracted = callback_on_article_extracted self.__show_download_progress = show_download_progress self.__log_level = log_level self.__delete_warc_after_extraction = delete_warc_after_extraction self.__log_pathname_fully_extracted_warcs = log_pathname_fully_extracted_warcs self.__run()
[ "def", "extract_from_commoncrawl", "(", "self", ",", "warc_download_url", ",", "callback_on_article_extracted", ",", "valid_hosts", "=", "None", ",", "start_date", "=", "None", ",", "end_date", "=", "None", ",", "strict_date", "=", "True", ",", "reuse_previously_downloaded_files", "=", "True", ",", "local_download_dir_warc", "=", "None", ",", "continue_after_error", "=", "True", ",", "show_download_progress", "=", "False", ",", "log_level", "=", "logging", ".", "ERROR", ",", "delete_warc_after_extraction", "=", "True", ",", "log_pathname_fully_extracted_warcs", "=", "None", ")", ":", "self", ".", "__warc_download_url", "=", "warc_download_url", "self", ".", "__filter_valid_hosts", "=", "valid_hosts", "self", ".", "__filter_start_date", "=", "start_date", "self", ".", "__filter_end_date", "=", "end_date", "self", ".", "__filter_strict_date", "=", "strict_date", "if", "local_download_dir_warc", ":", "self", ".", "__local_download_dir_warc", "=", "local_download_dir_warc", "self", ".", "__reuse_previously_downloaded_files", "=", "reuse_previously_downloaded_files", "self", ".", "__continue_after_error", "=", "continue_after_error", "self", ".", "__callback_on_article_extracted", "=", "callback_on_article_extracted", "self", ".", "__show_download_progress", "=", "show_download_progress", "self", ".", "__log_level", "=", "log_level", "self", ".", "__delete_warc_after_extraction", "=", "delete_warc_after_extraction", "self", ".", "__log_pathname_fully_extracted_warcs", "=", "log_pathname_fully_extracted_warcs", "self", ".", "__run", "(", ")" ]
Crawl and extract articles form the news crawl provided by commoncrawl.org. For each article that was extracted successfully the callback function callback_on_article_extracted is invoked where the first parameter is the article object. :param log_pathname_fully_extracted_warcs: :param delete_warc_after_extraction: :param warc_download_url: :param callback_on_article_extracted: :param valid_hosts: :param start_date: :param end_date: :param strict_date: :param reuse_previously_downloaded_files: :param local_download_dir_warc: :param continue_after_error: :param show_download_progress: :param log_level: :return:
[ "Crawl", "and", "extract", "articles", "form", "the", "news", "crawl", "provided", "by", "commoncrawl", ".", "org", ".", "For", "each", "article", "that", "was", "extracted", "successfully", "the", "callback", "function", "callback_on_article_extracted", "is", "invoked", "where", "the", "first", "parameter", "is", "the", "article", "object", ".", ":", "param", "log_pathname_fully_extracted_warcs", ":", ":", "param", "delete_warc_after_extraction", ":", ":", "param", "warc_download_url", ":", ":", "param", "callback_on_article_extracted", ":", ":", "param", "valid_hosts", ":", ":", "param", "start_date", ":", ":", "param", "end_date", ":", ":", "param", "strict_date", ":", ":", "param", "reuse_previously_downloaded_files", ":", ":", "param", "local_download_dir_warc", ":", ":", "param", "continue_after_error", ":", ":", "param", "show_download_progress", ":", ":", "param", "log_level", ":", ":", "return", ":" ]
python
train
51.97561
gtalarico/airtable-python-wrapper
airtable/airtable.py
https://github.com/gtalarico/airtable-python-wrapper/blob/48b2d806178085b52a31817571e5a1fc3dce4045/airtable/airtable.py#L322-L352
def search(self, field_name, field_value, record=None, **options): """ Returns all matching records found in :any:`get_all` >>> airtable.search('Gender', 'Male') [{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ] Args: field_name (``str``): Name of field to match (column name). field_value (``str``): Value of field to match. Keyword Args: max_records (``int``, optional): The maximum total number of records that will be returned. See :any:`MaxRecordsParam` view (``str``, optional): The name or ID of a view. See :any:`ViewParam`. fields (``str``, ``list``, optional): Name of field or fields to be retrieved. Default is all fields. See :any:`FieldsParam`. sort (``list``, optional): List of fields to sort by. Default order is ascending. See :any:`SortParam`. Returns: records (``list``): All records that matched ``field_value`` """ records = [] from_name_and_value = AirtableParams.FormulaParam.from_name_and_value formula = from_name_and_value(field_name, field_value) options['formula'] = formula records = self.get_all(**options) return records
[ "def", "search", "(", "self", ",", "field_name", ",", "field_value", ",", "record", "=", "None", ",", "*", "*", "options", ")", ":", "records", "=", "[", "]", "from_name_and_value", "=", "AirtableParams", ".", "FormulaParam", ".", "from_name_and_value", "formula", "=", "from_name_and_value", "(", "field_name", ",", "field_value", ")", "options", "[", "'formula'", "]", "=", "formula", "records", "=", "self", ".", "get_all", "(", "*", "*", "options", ")", "return", "records" ]
Returns all matching records found in :any:`get_all` >>> airtable.search('Gender', 'Male') [{'fields': {'Name': 'John', 'Gender': 'Male'}, ... ] Args: field_name (``str``): Name of field to match (column name). field_value (``str``): Value of field to match. Keyword Args: max_records (``int``, optional): The maximum total number of records that will be returned. See :any:`MaxRecordsParam` view (``str``, optional): The name or ID of a view. See :any:`ViewParam`. fields (``str``, ``list``, optional): Name of field or fields to be retrieved. Default is all fields. See :any:`FieldsParam`. sort (``list``, optional): List of fields to sort by. Default order is ascending. See :any:`SortParam`. Returns: records (``list``): All records that matched ``field_value``
[ "Returns", "all", "matching", "records", "found", "in", ":", "any", ":", "get_all", ">>>", "airtable", ".", "search", "(", "Gender", "Male", ")", "[", "{", "fields", ":", "{", "Name", ":", "John", "Gender", ":", "Male", "}", "...", "]", "Args", ":", "field_name", "(", "str", ")", ":", "Name", "of", "field", "to", "match", "(", "column", "name", ")", ".", "field_value", "(", "str", ")", ":", "Value", "of", "field", "to", "match", ".", "Keyword", "Args", ":", "max_records", "(", "int", "optional", ")", ":", "The", "maximum", "total", "number", "of", "records", "that", "will", "be", "returned", ".", "See", ":", "any", ":", "MaxRecordsParam", "view", "(", "str", "optional", ")", ":", "The", "name", "or", "ID", "of", "a", "view", ".", "See", ":", "any", ":", "ViewParam", ".", "fields", "(", "str", "list", "optional", ")", ":", "Name", "of", "field", "or", "fields", "to", "be", "retrieved", ".", "Default", "is", "all", "fields", ".", "See", ":", "any", ":", "FieldsParam", ".", "sort", "(", "list", "optional", ")", ":", "List", "of", "fields", "to", "sort", "by", ".", "Default", "order", "is", "ascending", ".", "See", ":", "any", ":", "SortParam", ".", "Returns", ":", "records", "(", "list", ")", ":", "All", "records", "that", "matched", "field_value" ]
python
train
42.612903
PolyJIT/benchbuild
benchbuild/utils/schema.py
https://github.com/PolyJIT/benchbuild/blob/9ad2ec54d96e97b642b1f06eddcbad9ba7aeaf58/benchbuild/utils/schema.py#L351-L361
def enforce_versioning(force=False): """Install versioning on the db.""" connect_str, repo_url = get_version_data() LOG.warning("Your database uses an unversioned benchbuild schema.") if not force and not ui.ask( "Should I enforce version control on your schema?"): LOG.error("User declined schema versioning.") return None repo_version = migrate.version(repo_url, url=connect_str) migrate.version_control(connect_str, repo_url, version=repo_version) return repo_version
[ "def", "enforce_versioning", "(", "force", "=", "False", ")", ":", "connect_str", ",", "repo_url", "=", "get_version_data", "(", ")", "LOG", ".", "warning", "(", "\"Your database uses an unversioned benchbuild schema.\"", ")", "if", "not", "force", "and", "not", "ui", ".", "ask", "(", "\"Should I enforce version control on your schema?\"", ")", ":", "LOG", ".", "error", "(", "\"User declined schema versioning.\"", ")", "return", "None", "repo_version", "=", "migrate", ".", "version", "(", "repo_url", ",", "url", "=", "connect_str", ")", "migrate", ".", "version_control", "(", "connect_str", ",", "repo_url", ",", "version", "=", "repo_version", ")", "return", "repo_version" ]
Install versioning on the db.
[ "Install", "versioning", "on", "the", "db", "." ]
python
train
46.909091
rgs1/zk_shell
zk_shell/keys.py
https://github.com/rgs1/zk_shell/blob/bbf34fdfcf1f81100e2a5816fad8af6afc782a54/zk_shell/keys.py#L147-L207
def set(cls, obj, keys, value, fill_list_value=None): """ sets the value for the given keys on obj. if any of the given keys does not exist, create the intermediate containers. """ current = obj keys_list = keys.split(".") for idx, key in enumerate(keys_list, 1): if type(current) == list: # Validate this key works with a list. try: key = int(key) except ValueError: raise cls.Missing(key) try: # This is the last key, so set the value. if idx == len(keys_list): if type(current) == list: safe_list_set( current, key, lambda: copy.copy(fill_list_value), value ) else: current[key] = value # done. return # More keys left, ensure we have a container for this key. if type(key) == int: try: current[key] except IndexError: # Create a list for this key. cnext = container_for_key(keys_list[idx]) if type(cnext) == list: def fill_with(): return [] else: def fill_with(): return {} safe_list_set( current, key, fill_with, [] if type(cnext) == list else {} ) else: if key not in current: # Create a list for this key. current[key] = container_for_key(keys_list[idx]) # Move on to the next key. current = current[key] except (IndexError, KeyError, TypeError): raise cls.Missing(key)
[ "def", "set", "(", "cls", ",", "obj", ",", "keys", ",", "value", ",", "fill_list_value", "=", "None", ")", ":", "current", "=", "obj", "keys_list", "=", "keys", ".", "split", "(", "\".\"", ")", "for", "idx", ",", "key", "in", "enumerate", "(", "keys_list", ",", "1", ")", ":", "if", "type", "(", "current", ")", "==", "list", ":", "# Validate this key works with a list.", "try", ":", "key", "=", "int", "(", "key", ")", "except", "ValueError", ":", "raise", "cls", ".", "Missing", "(", "key", ")", "try", ":", "# This is the last key, so set the value.", "if", "idx", "==", "len", "(", "keys_list", ")", ":", "if", "type", "(", "current", ")", "==", "list", ":", "safe_list_set", "(", "current", ",", "key", ",", "lambda", ":", "copy", ".", "copy", "(", "fill_list_value", ")", ",", "value", ")", "else", ":", "current", "[", "key", "]", "=", "value", "# done.", "return", "# More keys left, ensure we have a container for this key.", "if", "type", "(", "key", ")", "==", "int", ":", "try", ":", "current", "[", "key", "]", "except", "IndexError", ":", "# Create a list for this key.", "cnext", "=", "container_for_key", "(", "keys_list", "[", "idx", "]", ")", "if", "type", "(", "cnext", ")", "==", "list", ":", "def", "fill_with", "(", ")", ":", "return", "[", "]", "else", ":", "def", "fill_with", "(", ")", ":", "return", "{", "}", "safe_list_set", "(", "current", ",", "key", ",", "fill_with", ",", "[", "]", "if", "type", "(", "cnext", ")", "==", "list", "else", "{", "}", ")", "else", ":", "if", "key", "not", "in", "current", ":", "# Create a list for this key.", "current", "[", "key", "]", "=", "container_for_key", "(", "keys_list", "[", "idx", "]", ")", "# Move on to the next key.", "current", "=", "current", "[", "key", "]", "except", "(", "IndexError", ",", "KeyError", ",", "TypeError", ")", ":", "raise", "cls", ".", "Missing", "(", "key", ")" ]
sets the value for the given keys on obj. if any of the given keys does not exist, create the intermediate containers.
[ "sets", "the", "value", "for", "the", "given", "keys", "on", "obj", ".", "if", "any", "of", "the", "given", "keys", "does", "not", "exist", "create", "the", "intermediate", "containers", "." ]
python
train
36.098361
odlgroup/odl
odl/contrib/solvers/spdhg/misc.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/solvers/spdhg/misc.py#L239-L308
def proximal(self, sigma): """Prox operator of TV. It allows the proximal step length to be a vector of positive elements. Examples -------- Check that the proximal operator is the identity for sigma=0 >>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2) >>> x = -space.one() >>> y = tvnn.proximal(0)(x) >>> (y-x).norm() < 1e-10 Check that negative functions are mapped to 0 >>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2) >>> x = -space.one() >>> y = tvnn.proximal(0.1)(x) >>> y.norm() < 1e-10 """ if sigma == 0: return odl.IdentityOperator(self.domain) else: def tv_prox(z, out=None): if out is None: out = z.space.zero() opts = self.prox_options sigma_ = np.copy(sigma) z_ = z.copy() if self.strong_convexity > 0: sigma_ /= (1 + sigma * self.strong_convexity) z_ /= (1 + sigma * self.strong_convexity) if opts['name'] == 'FGP': if opts['warmstart']: if opts['p'] is None: opts['p'] = self.grad.range.zero() p = opts['p'] else: p = self.grad.range.zero() sigma_sqrt = np.sqrt(sigma_) z_ /= sigma_sqrt grad = sigma_sqrt * self.grad grad.norm = sigma_sqrt * self.grad.norm niter = opts['niter'] alpha = self.alpha out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C, self.proj_P, tol=opts['tol']) out *= sigma_sqrt return out else: raise NotImplementedError('Not yet implemented') return tv_prox
[ "def", "proximal", "(", "self", ",", "sigma", ")", ":", "if", "sigma", "==", "0", ":", "return", "odl", ".", "IdentityOperator", "(", "self", ".", "domain", ")", "else", ":", "def", "tv_prox", "(", "z", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "z", ".", "space", ".", "zero", "(", ")", "opts", "=", "self", ".", "prox_options", "sigma_", "=", "np", ".", "copy", "(", "sigma", ")", "z_", "=", "z", ".", "copy", "(", ")", "if", "self", ".", "strong_convexity", ">", "0", ":", "sigma_", "/=", "(", "1", "+", "sigma", "*", "self", ".", "strong_convexity", ")", "z_", "/=", "(", "1", "+", "sigma", "*", "self", ".", "strong_convexity", ")", "if", "opts", "[", "'name'", "]", "==", "'FGP'", ":", "if", "opts", "[", "'warmstart'", "]", ":", "if", "opts", "[", "'p'", "]", "is", "None", ":", "opts", "[", "'p'", "]", "=", "self", ".", "grad", ".", "range", ".", "zero", "(", ")", "p", "=", "opts", "[", "'p'", "]", "else", ":", "p", "=", "self", ".", "grad", ".", "range", ".", "zero", "(", ")", "sigma_sqrt", "=", "np", ".", "sqrt", "(", "sigma_", ")", "z_", "/=", "sigma_sqrt", "grad", "=", "sigma_sqrt", "*", "self", ".", "grad", "grad", ".", "norm", "=", "sigma_sqrt", "*", "self", ".", "grad", ".", "norm", "niter", "=", "opts", "[", "'niter'", "]", "alpha", "=", "self", ".", "alpha", "out", "[", ":", "]", "=", "fgp_dual", "(", "p", ",", "z_", ",", "alpha", ",", "niter", ",", "grad", ",", "self", ".", "proj_C", ",", "self", ".", "proj_P", ",", "tol", "=", "opts", "[", "'tol'", "]", ")", "out", "*=", "sigma_sqrt", "return", "out", "else", ":", "raise", "NotImplementedError", "(", "'Not yet implemented'", ")", "return", "tv_prox" ]
Prox operator of TV. It allows the proximal step length to be a vector of positive elements. Examples -------- Check that the proximal operator is the identity for sigma=0 >>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2) >>> x = -space.one() >>> y = tvnn.proximal(0)(x) >>> (y-x).norm() < 1e-10 Check that negative functions are mapped to 0 >>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np >>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3]) >>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2) >>> x = -space.one() >>> y = tvnn.proximal(0.1)(x) >>> y.norm() < 1e-10
[ "Prox", "operator", "of", "TV", ".", "It", "allows", "the", "proximal", "step", "length", "to", "be", "a", "vector", "of", "positive", "elements", "." ]
python
train
32.228571
stuaxo/vext
setup.py
https://github.com/stuaxo/vext/blob/fa98a21ecfbbc1c3d1b84085d69ec42defdd2f69/setup.py#L199-L205
def depends_on(self, dependency): """ List of packages that depend on dependency :param dependency: package name, e.g. 'vext' or 'Pillow' """ packages = self.package_info() return [package for package in packages if dependency in package.get("requires", "")]
[ "def", "depends_on", "(", "self", ",", "dependency", ")", ":", "packages", "=", "self", ".", "package_info", "(", ")", "return", "[", "package", "for", "package", "in", "packages", "if", "dependency", "in", "package", ".", "get", "(", "\"requires\"", ",", "\"\"", ")", "]" ]
List of packages that depend on dependency :param dependency: package name, e.g. 'vext' or 'Pillow'
[ "List", "of", "packages", "that", "depend", "on", "dependency", ":", "param", "dependency", ":", "package", "name", "e", ".", "g", ".", "vext", "or", "Pillow" ]
python
train
43
chaoss/grimoirelab-perceval
perceval/backends/core/gerrit.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/gerrit.py#L368-L388
def next_retrieve_group_item(self, last_item=None, entry=None): """Return the item to start from in next reviews group.""" next_item = None gerrit_version = self.version if gerrit_version[0] == 2 and gerrit_version[1] > 9: if last_item is None: next_item = 0 else: next_item = last_item elif gerrit_version[0] == 2 and gerrit_version[1] == 9: # https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E cause = "Gerrit 2.9.0 does not support pagination" raise BackendError(cause=cause) else: if entry is not None: next_item = entry['sortKey'] return next_item
[ "def", "next_retrieve_group_item", "(", "self", ",", "last_item", "=", "None", ",", "entry", "=", "None", ")", ":", "next_item", "=", "None", "gerrit_version", "=", "self", ".", "version", "if", "gerrit_version", "[", "0", "]", "==", "2", "and", "gerrit_version", "[", "1", "]", ">", "9", ":", "if", "last_item", "is", "None", ":", "next_item", "=", "0", "else", ":", "next_item", "=", "last_item", "elif", "gerrit_version", "[", "0", "]", "==", "2", "and", "gerrit_version", "[", "1", "]", "==", "9", ":", "# https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E", "cause", "=", "\"Gerrit 2.9.0 does not support pagination\"", "raise", "BackendError", "(", "cause", "=", "cause", ")", "else", ":", "if", "entry", "is", "not", "None", ":", "next_item", "=", "entry", "[", "'sortKey'", "]", "return", "next_item" ]
Return the item to start from in next reviews group.
[ "Return", "the", "item", "to", "start", "from", "in", "next", "reviews", "group", "." ]
python
test
34.571429
nerdvegas/rez
src/rez/packages_.py
https://github.com/nerdvegas/rez/blob/1d3b846d53b5b5404edfe8ddb9083f9ceec8c5e7/src/rez/packages_.py#L305-L312
def qualified_name(self): """Get the qualified name of the variant. Returns: str: Name of the variant with version and index, eg "maya-2016.1[1]". """ idxstr = '' if self.index is None else str(self.index) return "%s[%s]" % (self.qualified_package_name, idxstr)
[ "def", "qualified_name", "(", "self", ")", ":", "idxstr", "=", "''", "if", "self", ".", "index", "is", "None", "else", "str", "(", "self", ".", "index", ")", "return", "\"%s[%s]\"", "%", "(", "self", ".", "qualified_package_name", ",", "idxstr", ")" ]
Get the qualified name of the variant. Returns: str: Name of the variant with version and index, eg "maya-2016.1[1]".
[ "Get", "the", "qualified", "name", "of", "the", "variant", "." ]
python
train
38.375
Autodesk/aomi
aomi/model/resource.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/model/resource.py#L158-L188
def diff(self, obj=None): """Determine if something has changed or not""" if self.no_resource: return NOOP if not self.present: if self.existing: return DEL return NOOP if not obj: obj = self.obj() is_diff = NOOP if self.present and self.existing: if isinstance(self.existing, dict): current = dict(self.existing) if 'refresh_interval' in current: del current['refresh_interval'] if diff_dict(current, obj): is_diff = CHANGED elif is_unicode(self.existing): if self.existing != obj: is_diff = CHANGED elif self.present and not self.existing: is_diff = ADD return is_diff
[ "def", "diff", "(", "self", ",", "obj", "=", "None", ")", ":", "if", "self", ".", "no_resource", ":", "return", "NOOP", "if", "not", "self", ".", "present", ":", "if", "self", ".", "existing", ":", "return", "DEL", "return", "NOOP", "if", "not", "obj", ":", "obj", "=", "self", ".", "obj", "(", ")", "is_diff", "=", "NOOP", "if", "self", ".", "present", "and", "self", ".", "existing", ":", "if", "isinstance", "(", "self", ".", "existing", ",", "dict", ")", ":", "current", "=", "dict", "(", "self", ".", "existing", ")", "if", "'refresh_interval'", "in", "current", ":", "del", "current", "[", "'refresh_interval'", "]", "if", "diff_dict", "(", "current", ",", "obj", ")", ":", "is_diff", "=", "CHANGED", "elif", "is_unicode", "(", "self", ".", "existing", ")", ":", "if", "self", ".", "existing", "!=", "obj", ":", "is_diff", "=", "CHANGED", "elif", "self", ".", "present", "and", "not", "self", ".", "existing", ":", "is_diff", "=", "ADD", "return", "is_diff" ]
Determine if something has changed or not
[ "Determine", "if", "something", "has", "changed", "or", "not" ]
python
train
26.935484
MisterY/asset-allocation
asset_allocation/stocks.py
https://github.com/MisterY/asset-allocation/blob/72239aa20762cda67c091f27b86e65d61bf3b613/asset_allocation/stocks.py#L125-L131
def __get_pricedb_session(self): """ Provides initialization and access to module-level session """ from pricedb import dal if not self.pricedb_session: self.pricedb_session = dal.get_default_session() return self.pricedb_session
[ "def", "__get_pricedb_session", "(", "self", ")", ":", "from", "pricedb", "import", "dal", "if", "not", "self", ".", "pricedb_session", ":", "self", ".", "pricedb_session", "=", "dal", ".", "get_default_session", "(", ")", "return", "self", ".", "pricedb_session" ]
Provides initialization and access to module-level session
[ "Provides", "initialization", "and", "access", "to", "module", "-", "level", "session" ]
python
train
38.285714
rosenbrockc/fortpy
fortpy/scripts/analyze.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/scripts/analyze.py#L532-L555
def do_dep(self, args): """Adds the name and attribute of a dependent variable to the list for plotting/tabulating functions.""" vals = args.split() twin = None if len(vals) > 1: if len(vals) == 2: var, plot = vals elif len(vals) == 3: var, plot, twin = vals else: var = vals[0] plot = None if not self._validate_var(var): msg.err("Variable {} is not a valid file name and property combination.".format(var)) else: if var in self.curargs["dependents"]: self.curargs["dependents"].remove(var) self.curargs["dependents"].append(var) if plot is not None: self.curargs["plottypes"][var] = plot if twin is not None: self.curargs["twinplots"][var] = twin
[ "def", "do_dep", "(", "self", ",", "args", ")", ":", "vals", "=", "args", ".", "split", "(", ")", "twin", "=", "None", "if", "len", "(", "vals", ")", ">", "1", ":", "if", "len", "(", "vals", ")", "==", "2", ":", "var", ",", "plot", "=", "vals", "elif", "len", "(", "vals", ")", "==", "3", ":", "var", ",", "plot", ",", "twin", "=", "vals", "else", ":", "var", "=", "vals", "[", "0", "]", "plot", "=", "None", "if", "not", "self", ".", "_validate_var", "(", "var", ")", ":", "msg", ".", "err", "(", "\"Variable {} is not a valid file name and property combination.\"", ".", "format", "(", "var", ")", ")", "else", ":", "if", "var", "in", "self", ".", "curargs", "[", "\"dependents\"", "]", ":", "self", ".", "curargs", "[", "\"dependents\"", "]", ".", "remove", "(", "var", ")", "self", ".", "curargs", "[", "\"dependents\"", "]", ".", "append", "(", "var", ")", "if", "plot", "is", "not", "None", ":", "self", ".", "curargs", "[", "\"plottypes\"", "]", "[", "var", "]", "=", "plot", "if", "twin", "is", "not", "None", ":", "self", ".", "curargs", "[", "\"twinplots\"", "]", "[", "var", "]", "=", "twin" ]
Adds the name and attribute of a dependent variable to the list for plotting/tabulating functions.
[ "Adds", "the", "name", "and", "attribute", "of", "a", "dependent", "variable", "to", "the", "list", "for", "plotting", "/", "tabulating", "functions", "." ]
python
train
36.625
sdonk/django-admin-ip-restrictor
admin_ip_restrictor/middleware.py
https://github.com/sdonk/django-admin-ip-restrictor/blob/29c948677e52bc416d44fff0f013d1f4ba2cb782/admin_ip_restrictor/middleware.py#L60-L71
def is_blocked(self, ip): """Determine if an IP address should be considered blocked.""" blocked = True if ip in self.allowed_admin_ips: blocked = False for allowed_range in self.allowed_admin_ip_ranges: if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range): blocked = False return blocked
[ "def", "is_blocked", "(", "self", ",", "ip", ")", ":", "blocked", "=", "True", "if", "ip", "in", "self", ".", "allowed_admin_ips", ":", "blocked", "=", "False", "for", "allowed_range", "in", "self", ".", "allowed_admin_ip_ranges", ":", "if", "ipaddress", ".", "ip_address", "(", "ip", ")", "in", "ipaddress", ".", "ip_network", "(", "allowed_range", ")", ":", "blocked", "=", "False", "return", "blocked" ]
Determine if an IP address should be considered blocked.
[ "Determine", "if", "an", "IP", "address", "should", "be", "considered", "blocked", "." ]
python
train
31.166667
bitesofcode/projexui
projexui/widgets/xsplitbutton.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xsplitbutton.py#L149-L185
def addAction(self, action, checked=None, autoBuild=True): """ Adds the inputed action to this widget's action group. This will auto-\ create a new group if no group is already defined. :param action | <QAction> || <str> :return <QAction> """ # clear the holder actions = self._actionGroup.actions() if actions and actions[0].objectName() == 'place_holder': self._actionGroup.removeAction(actions[0]) actions[0].deleteLater() # create an action from the name if not isinstance(action, QAction): action_name = nativestring(action) action = QAction(action_name, self) action.setObjectName(action_name) action.setCheckable(self.isCheckable()) # auto-check the first option if checked or (not self._actionGroup.actions() and checked is None): action.setChecked(True) elif self.isCheckable(): action.setCheckable(True) if not self.currentAction(): action.setChecked(True) self._actionGroup.addAction(action) if autoBuild: self.rebuild() return action
[ "def", "addAction", "(", "self", ",", "action", ",", "checked", "=", "None", ",", "autoBuild", "=", "True", ")", ":", "# clear the holder\r", "actions", "=", "self", ".", "_actionGroup", ".", "actions", "(", ")", "if", "actions", "and", "actions", "[", "0", "]", ".", "objectName", "(", ")", "==", "'place_holder'", ":", "self", ".", "_actionGroup", ".", "removeAction", "(", "actions", "[", "0", "]", ")", "actions", "[", "0", "]", ".", "deleteLater", "(", ")", "# create an action from the name\r", "if", "not", "isinstance", "(", "action", ",", "QAction", ")", ":", "action_name", "=", "nativestring", "(", "action", ")", "action", "=", "QAction", "(", "action_name", ",", "self", ")", "action", ".", "setObjectName", "(", "action_name", ")", "action", ".", "setCheckable", "(", "self", ".", "isCheckable", "(", ")", ")", "# auto-check the first option\r", "if", "checked", "or", "(", "not", "self", ".", "_actionGroup", ".", "actions", "(", ")", "and", "checked", "is", "None", ")", ":", "action", ".", "setChecked", "(", "True", ")", "elif", "self", ".", "isCheckable", "(", ")", ":", "action", ".", "setCheckable", "(", "True", ")", "if", "not", "self", ".", "currentAction", "(", ")", ":", "action", ".", "setChecked", "(", "True", ")", "self", ".", "_actionGroup", ".", "addAction", "(", "action", ")", "if", "autoBuild", ":", "self", ".", "rebuild", "(", ")", "return", "action" ]
Adds the inputed action to this widget's action group. This will auto-\ create a new group if no group is already defined. :param action | <QAction> || <str> :return <QAction>
[ "Adds", "the", "inputed", "action", "to", "this", "widget", "s", "action", "group", ".", "This", "will", "auto", "-", "\\", "create", "a", "new", "group", "if", "no", "group", "is", "already", "defined", ".", ":", "param", "action", "|", "<QAction", ">", "||", "<str", ">", ":", "return", "<QAction", ">" ]
python
train
35.540541
pyblish/pyblish-lite
pyblish_lite/delegate.py
https://github.com/pyblish/pyblish-lite/blob/9172b81c7ae19a36e99c89dd16e102201992dc20/pyblish_lite/delegate.py#L50-L143
def paint(self, painter, option, index): """Paint checkbox and text _ |_| My label """ body_rect = QtCore.QRectF(option.rect) check_rect = QtCore.QRectF(body_rect) check_rect.setWidth(check_rect.height()) check_rect.adjust(6, 6, -6, -6) check_color = colors["idle"] if index.data(model.IsProcessing) is True: check_color = colors["active"] elif index.data(model.HasFailed) is True: check_color = colors["warning"] elif index.data(model.HasSucceeded) is True: check_color = colors["ok"] elif index.data(model.HasProcessed) is True: check_color = colors["ok"] metrics = painter.fontMetrics() label_rect = QtCore.QRectF(option.rect.adjusted( check_rect.width() + 12, 2, 0, -2)) assert label_rect.width() > 0 label = index.data(model.Label) label = metrics.elidedText(label, QtCore.Qt.ElideRight, label_rect.width() - 20) font_color = colors["idle"] if not index.data(model.IsChecked): font_color = colors["inactive"] # Maintain reference to state, so we can restore it once we're done painter.save() # Draw label painter.setFont(fonts["h4"]) painter.setPen(QtGui.QPen(font_color)) painter.drawText(label_rect, label) # Draw action icon if index.data(model.ActionIconVisible): painter.save() if index.data(model.ActionIdle): color = colors["idle"] elif index.data(model.IsProcessing): color = colors["active"] elif index.data(model.ActionFailed): color = colors["warning"] else: color = colors["ok"] painter.setFont(fonts["smallAwesome"]) painter.setPen(QtGui.QPen(color)) icon_rect = QtCore.QRectF(option.rect.adjusted( label_rect.width() + 1, label_rect.height() / 3, 0, 0)) painter.drawText(icon_rect, icons["action"]) painter.restore() # Draw checkbox pen = QtGui.QPen(check_color, 1) painter.setPen(pen) if index.data(model.IsOptional): painter.drawRect(check_rect) if index.data(model.IsChecked): painter.fillRect(check_rect, check_color) elif not index.data(model.IsIdle) and index.data(model.IsChecked): painter.fillRect(check_rect, check_color) if option.state & QtWidgets.QStyle.State_MouseOver: painter.fillRect(body_rect, colors["hover"]) if option.state & QtWidgets.QStyle.State_Selected: painter.fillRect(body_rect, colors["selected"]) # Ok, we're done, tidy up. painter.restore()
[ "def", "paint", "(", "self", ",", "painter", ",", "option", ",", "index", ")", ":", "body_rect", "=", "QtCore", ".", "QRectF", "(", "option", ".", "rect", ")", "check_rect", "=", "QtCore", ".", "QRectF", "(", "body_rect", ")", "check_rect", ".", "setWidth", "(", "check_rect", ".", "height", "(", ")", ")", "check_rect", ".", "adjust", "(", "6", ",", "6", ",", "-", "6", ",", "-", "6", ")", "check_color", "=", "colors", "[", "\"idle\"", "]", "if", "index", ".", "data", "(", "model", ".", "IsProcessing", ")", "is", "True", ":", "check_color", "=", "colors", "[", "\"active\"", "]", "elif", "index", ".", "data", "(", "model", ".", "HasFailed", ")", "is", "True", ":", "check_color", "=", "colors", "[", "\"warning\"", "]", "elif", "index", ".", "data", "(", "model", ".", "HasSucceeded", ")", "is", "True", ":", "check_color", "=", "colors", "[", "\"ok\"", "]", "elif", "index", ".", "data", "(", "model", ".", "HasProcessed", ")", "is", "True", ":", "check_color", "=", "colors", "[", "\"ok\"", "]", "metrics", "=", "painter", ".", "fontMetrics", "(", ")", "label_rect", "=", "QtCore", ".", "QRectF", "(", "option", ".", "rect", ".", "adjusted", "(", "check_rect", ".", "width", "(", ")", "+", "12", ",", "2", ",", "0", ",", "-", "2", ")", ")", "assert", "label_rect", ".", "width", "(", ")", ">", "0", "label", "=", "index", ".", "data", "(", "model", ".", "Label", ")", "label", "=", "metrics", ".", "elidedText", "(", "label", ",", "QtCore", ".", "Qt", ".", "ElideRight", ",", "label_rect", ".", "width", "(", ")", "-", "20", ")", "font_color", "=", "colors", "[", "\"idle\"", "]", "if", "not", "index", ".", "data", "(", "model", ".", "IsChecked", ")", ":", "font_color", "=", "colors", "[", "\"inactive\"", "]", "# Maintain reference to state, so we can restore it once we're done", "painter", ".", "save", "(", ")", "# Draw label", "painter", ".", "setFont", "(", "fonts", "[", "\"h4\"", "]", ")", "painter", ".", "setPen", "(", "QtGui", ".", "QPen", "(", "font_color", ")", ")", "painter", ".", "drawText", "(", "label_rect", ",", "label", ")", "# Draw action icon", "if", "index", ".", "data", "(", "model", ".", "ActionIconVisible", ")", ":", "painter", ".", "save", "(", ")", "if", "index", ".", "data", "(", "model", ".", "ActionIdle", ")", ":", "color", "=", "colors", "[", "\"idle\"", "]", "elif", "index", ".", "data", "(", "model", ".", "IsProcessing", ")", ":", "color", "=", "colors", "[", "\"active\"", "]", "elif", "index", ".", "data", "(", "model", ".", "ActionFailed", ")", ":", "color", "=", "colors", "[", "\"warning\"", "]", "else", ":", "color", "=", "colors", "[", "\"ok\"", "]", "painter", ".", "setFont", "(", "fonts", "[", "\"smallAwesome\"", "]", ")", "painter", ".", "setPen", "(", "QtGui", ".", "QPen", "(", "color", ")", ")", "icon_rect", "=", "QtCore", ".", "QRectF", "(", "option", ".", "rect", ".", "adjusted", "(", "label_rect", ".", "width", "(", ")", "+", "1", ",", "label_rect", ".", "height", "(", ")", "/", "3", ",", "0", ",", "0", ")", ")", "painter", ".", "drawText", "(", "icon_rect", ",", "icons", "[", "\"action\"", "]", ")", "painter", ".", "restore", "(", ")", "# Draw checkbox", "pen", "=", "QtGui", ".", "QPen", "(", "check_color", ",", "1", ")", "painter", ".", "setPen", "(", "pen", ")", "if", "index", ".", "data", "(", "model", ".", "IsOptional", ")", ":", "painter", ".", "drawRect", "(", "check_rect", ")", "if", "index", ".", "data", "(", "model", ".", "IsChecked", ")", ":", "painter", ".", "fillRect", "(", "check_rect", ",", "check_color", ")", "elif", "not", "index", ".", "data", "(", "model", ".", "IsIdle", ")", "and", "index", ".", "data", "(", "model", ".", "IsChecked", ")", ":", "painter", ".", "fillRect", "(", "check_rect", ",", "check_color", ")", "if", "option", ".", "state", "&", "QtWidgets", ".", "QStyle", ".", "State_MouseOver", ":", "painter", ".", "fillRect", "(", "body_rect", ",", "colors", "[", "\"hover\"", "]", ")", "if", "option", ".", "state", "&", "QtWidgets", ".", "QStyle", ".", "State_Selected", ":", "painter", ".", "fillRect", "(", "body_rect", ",", "colors", "[", "\"selected\"", "]", ")", "# Ok, we're done, tidy up.", "painter", ".", "restore", "(", ")" ]
Paint checkbox and text _ |_| My label
[ "Paint", "checkbox", "and", "text", "_", "|_|", "My", "label" ]
python
train
30.255319
jdoda/sdl2hl
sdl2hl/renderer.py
https://github.com/jdoda/sdl2hl/blob/3b477e1e01cea5d8e15e9e5ef3a302ea460f5946/sdl2hl/renderer.py#L268-L298
def copy(self, texture, source_rect=None, dest_rect=None, rotation=0, center=None, flip=lib.SDL_FLIP_NONE): """Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center. Args: texture (Texture): The source texture. source_rect (Rect): The source rectangle, or None for the entire texture. dest_rect (Rect): The destination rectangle, or None for the entire rendering target. rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect. center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around dest_rect.w/2, dest_rect.h/2). flip (int): A value stating which flipping actions should be performed on the texture. Raises: SDLError: If an error is encountered. """ if source_rect == None: source_rect_ptr = ffi.NULL else: source_rect_ptr = source_rect._ptr if dest_rect == None: dest_rect_ptr = ffi.NULL else: dest_rect_ptr = dest_rect._ptr if center == None: center_ptr = ffi.NULL else: center_ptr = center._ptr check_int_err(lib.SDL_RenderCopyEx(self._ptr, texture._ptr, source_rect_ptr, dest_rect_ptr, rotation, center_ptr, flip))
[ "def", "copy", "(", "self", ",", "texture", ",", "source_rect", "=", "None", ",", "dest_rect", "=", "None", ",", "rotation", "=", "0", ",", "center", "=", "None", ",", "flip", "=", "lib", ".", "SDL_FLIP_NONE", ")", ":", "if", "source_rect", "==", "None", ":", "source_rect_ptr", "=", "ffi", ".", "NULL", "else", ":", "source_rect_ptr", "=", "source_rect", ".", "_ptr", "if", "dest_rect", "==", "None", ":", "dest_rect_ptr", "=", "ffi", ".", "NULL", "else", ":", "dest_rect_ptr", "=", "dest_rect", ".", "_ptr", "if", "center", "==", "None", ":", "center_ptr", "=", "ffi", ".", "NULL", "else", ":", "center_ptr", "=", "center", ".", "_ptr", "check_int_err", "(", "lib", ".", "SDL_RenderCopyEx", "(", "self", ".", "_ptr", ",", "texture", ".", "_ptr", ",", "source_rect_ptr", ",", "dest_rect_ptr", ",", "rotation", ",", "center_ptr", ",", "flip", ")", ")" ]
Copy a portion of the source texture to the current rendering target, rotating it by angle around the given center. Args: texture (Texture): The source texture. source_rect (Rect): The source rectangle, or None for the entire texture. dest_rect (Rect): The destination rectangle, or None for the entire rendering target. rotation (float): An angle in degrees that indicates the rotation that will be applied to dest_rect. center (Point): The point around which dest_rect will be rotated (if None, rotation will be done around dest_rect.w/2, dest_rect.h/2). flip (int): A value stating which flipping actions should be performed on the texture. Raises: SDLError: If an error is encountered.
[ "Copy", "a", "portion", "of", "the", "source", "texture", "to", "the", "current", "rendering", "target", "rotating", "it", "by", "angle", "around", "the", "given", "center", "." ]
python
train
46.580645
mromanello/hucitlib
knowledge_base/__init__.py
https://github.com/mromanello/hucitlib/blob/6587d1b04eb7e5b48ad7359be845e5d3b444d6fa/knowledge_base/__init__.py#L272-L298
def get_author_label(self, urn): """Get the label corresponding to the author identified by the CTS URN. try to get an lang=en label (if multiple labels in this lang pick the shortest) try to get a lang=la label (if multiple labels in this lang exist pick the shortest) try to get a lang=None label (if multiple labels in this lang exist pick the shortest) returns None if no name is found """ author = self.get_resource_by_urn(urn) names = author.get_names() en_names = sorted([name[1] for name in names if name[0] == "en"], key=len) try: assert len(en_names) > 0 return en_names[0] except Exception as e: none_names = sorted([name[1] for name in names if name[0] == None], key=len) try: return none_names[0] except Exception as e: la_names = sorted([name[1] for name in names if name[0] == "la"], key=len) try: assert len(la_names) > 0 return la_names[0] except Exception as e: return None
[ "def", "get_author_label", "(", "self", ",", "urn", ")", ":", "author", "=", "self", ".", "get_resource_by_urn", "(", "urn", ")", "names", "=", "author", ".", "get_names", "(", ")", "en_names", "=", "sorted", "(", "[", "name", "[", "1", "]", "for", "name", "in", "names", "if", "name", "[", "0", "]", "==", "\"en\"", "]", ",", "key", "=", "len", ")", "try", ":", "assert", "len", "(", "en_names", ")", ">", "0", "return", "en_names", "[", "0", "]", "except", "Exception", "as", "e", ":", "none_names", "=", "sorted", "(", "[", "name", "[", "1", "]", "for", "name", "in", "names", "if", "name", "[", "0", "]", "==", "None", "]", ",", "key", "=", "len", ")", "try", ":", "return", "none_names", "[", "0", "]", "except", "Exception", "as", "e", ":", "la_names", "=", "sorted", "(", "[", "name", "[", "1", "]", "for", "name", "in", "names", "if", "name", "[", "0", "]", "==", "\"la\"", "]", ",", "key", "=", "len", ")", "try", ":", "assert", "len", "(", "la_names", ")", ">", "0", "return", "la_names", "[", "0", "]", "except", "Exception", "as", "e", ":", "return", "None" ]
Get the label corresponding to the author identified by the CTS URN. try to get an lang=en label (if multiple labels in this lang pick the shortest) try to get a lang=la label (if multiple labels in this lang exist pick the shortest) try to get a lang=None label (if multiple labels in this lang exist pick the shortest) returns None if no name is found
[ "Get", "the", "label", "corresponding", "to", "the", "author", "identified", "by", "the", "CTS", "URN", "." ]
python
train
42.222222
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L3523-L3617
def _make_value(self, field_name, field_spec, value_spec, field_params, value): """ Contructs an appropriate Asn1Value object for a field :param field_name: A unicode string of the field name :param field_spec: An Asn1Value class that is the field spec :param value_spec: An Asn1Value class that is the vaue spec :param field_params: None or a dict of params for the field spec :param value: The value to construct an Asn1Value object from :return: An instance of a child class of Asn1Value """ if value is None and 'optional' in field_params: return VOID specs_different = field_spec != value_spec is_any = issubclass(field_spec, Any) if issubclass(value_spec, Choice): is_asn1value = isinstance(value, Asn1Value) is_tuple = isinstance(value, tuple) and len(value) == 2 is_dict = isinstance(value, dict) and len(value) == 1 if not is_asn1value and not is_tuple and not is_dict: raise ValueError(unwrap( ''' Can not set a native python value to %s, which has the choice type of %s - value must be an instance of Asn1Value ''', field_name, type_name(value_spec) )) if is_tuple or is_dict: value = value_spec(value) if not isinstance(value, value_spec): wrapper = value_spec() wrapper.validate(value.class_, value.tag, value.contents) wrapper._parsed = value new_value = wrapper else: new_value = value elif isinstance(value, field_spec): new_value = value if specs_different: new_value.parse(value_spec) elif (not specs_different or is_any) and not isinstance(value, value_spec): if (not is_any or specs_different) and isinstance(value, Asn1Value): raise TypeError(unwrap( ''' %s value must be %s, not %s ''', field_name, type_name(value_spec), type_name(value) )) new_value = value_spec(value, **field_params) else: if isinstance(value, value_spec): new_value = value else: if isinstance(value, Asn1Value): raise TypeError(unwrap( ''' %s value must be %s, not %s ''', field_name, type_name(value_spec), type_name(value) )) new_value = value_spec(value) # For when the field is OctetString or OctetBitString with embedded # values we need to wrap the value in the field spec to get the # appropriate encoded value. if specs_different and not is_any: wrapper = field_spec(value=new_value.dump(), **field_params) wrapper._parsed = (new_value, new_value.__class__, None) new_value = wrapper new_value = _fix_tagging(new_value, field_params) return new_value
[ "def", "_make_value", "(", "self", ",", "field_name", ",", "field_spec", ",", "value_spec", ",", "field_params", ",", "value", ")", ":", "if", "value", "is", "None", "and", "'optional'", "in", "field_params", ":", "return", "VOID", "specs_different", "=", "field_spec", "!=", "value_spec", "is_any", "=", "issubclass", "(", "field_spec", ",", "Any", ")", "if", "issubclass", "(", "value_spec", ",", "Choice", ")", ":", "is_asn1value", "=", "isinstance", "(", "value", ",", "Asn1Value", ")", "is_tuple", "=", "isinstance", "(", "value", ",", "tuple", ")", "and", "len", "(", "value", ")", "==", "2", "is_dict", "=", "isinstance", "(", "value", ",", "dict", ")", "and", "len", "(", "value", ")", "==", "1", "if", "not", "is_asn1value", "and", "not", "is_tuple", "and", "not", "is_dict", ":", "raise", "ValueError", "(", "unwrap", "(", "'''\n Can not set a native python value to %s, which has the\n choice type of %s - value must be an instance of Asn1Value\n '''", ",", "field_name", ",", "type_name", "(", "value_spec", ")", ")", ")", "if", "is_tuple", "or", "is_dict", ":", "value", "=", "value_spec", "(", "value", ")", "if", "not", "isinstance", "(", "value", ",", "value_spec", ")", ":", "wrapper", "=", "value_spec", "(", ")", "wrapper", ".", "validate", "(", "value", ".", "class_", ",", "value", ".", "tag", ",", "value", ".", "contents", ")", "wrapper", ".", "_parsed", "=", "value", "new_value", "=", "wrapper", "else", ":", "new_value", "=", "value", "elif", "isinstance", "(", "value", ",", "field_spec", ")", ":", "new_value", "=", "value", "if", "specs_different", ":", "new_value", ".", "parse", "(", "value_spec", ")", "elif", "(", "not", "specs_different", "or", "is_any", ")", "and", "not", "isinstance", "(", "value", ",", "value_spec", ")", ":", "if", "(", "not", "is_any", "or", "specs_different", ")", "and", "isinstance", "(", "value", ",", "Asn1Value", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n %s value must be %s, not %s\n '''", ",", "field_name", ",", "type_name", "(", "value_spec", ")", ",", "type_name", "(", "value", ")", ")", ")", "new_value", "=", "value_spec", "(", "value", ",", "*", "*", "field_params", ")", "else", ":", "if", "isinstance", "(", "value", ",", "value_spec", ")", ":", "new_value", "=", "value", "else", ":", "if", "isinstance", "(", "value", ",", "Asn1Value", ")", ":", "raise", "TypeError", "(", "unwrap", "(", "'''\n %s value must be %s, not %s\n '''", ",", "field_name", ",", "type_name", "(", "value_spec", ")", ",", "type_name", "(", "value", ")", ")", ")", "new_value", "=", "value_spec", "(", "value", ")", "# For when the field is OctetString or OctetBitString with embedded", "# values we need to wrap the value in the field spec to get the", "# appropriate encoded value.", "if", "specs_different", "and", "not", "is_any", ":", "wrapper", "=", "field_spec", "(", "value", "=", "new_value", ".", "dump", "(", ")", ",", "*", "*", "field_params", ")", "wrapper", ".", "_parsed", "=", "(", "new_value", ",", "new_value", ".", "__class__", ",", "None", ")", "new_value", "=", "wrapper", "new_value", "=", "_fix_tagging", "(", "new_value", ",", "field_params", ")", "return", "new_value" ]
Contructs an appropriate Asn1Value object for a field :param field_name: A unicode string of the field name :param field_spec: An Asn1Value class that is the field spec :param value_spec: An Asn1Value class that is the vaue spec :param field_params: None or a dict of params for the field spec :param value: The value to construct an Asn1Value object from :return: An instance of a child class of Asn1Value
[ "Contructs", "an", "appropriate", "Asn1Value", "object", "for", "a", "field" ]
python
train
35.810526
joferkington/mplstereonet
mplstereonet/analysis.py
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/analysis.py#L325-L400
def kmeans(*args, **kwargs): """ Find centers of multi-modal clusters of data using a kmeans approach modified for spherical measurements. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. num : int The number of clusters to find. Defaults to 2. bidirectional : bool Whether or not the measurements are bi-directional linear/planar features or directed vectors. Defaults to True. tolerance : float Iteration will continue until the centers have not changed by more than this amount. Defaults to 1e-5. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- centers : An Nx2 array-like Longitude and latitude in radians of the centers of each cluster. """ lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles')) num = kwargs.get('num', 2) bidirectional = kwargs.get('bidirectional', True) tolerance = kwargs.get('tolerance', 1e-5) points = lon, lat dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional) center_lon = np.random.choice(lon, num) center_lat = np.random.choice(lat, num) centers = np.column_stack([center_lon, center_lat]) while True: dists = np.array([dist(item) for item in centers]).T closest = dists.argmin(axis=1) new_centers = [] for i in range(num): mask = mask = closest == i _, vecs = cov_eig(lon[mask], lat[mask], bidirectional) new_centers.append(stereonet_math.cart2sph(*vecs[:,-1])) if np.allclose(centers, new_centers, atol=tolerance): break else: centers = new_centers return centers
[ "def", "kmeans", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "_convert_measurements", "(", "args", ",", "kwargs", ".", "get", "(", "'measurement'", ",", "'poles'", ")", ")", "num", "=", "kwargs", ".", "get", "(", "'num'", ",", "2", ")", "bidirectional", "=", "kwargs", ".", "get", "(", "'bidirectional'", ",", "True", ")", "tolerance", "=", "kwargs", ".", "get", "(", "'tolerance'", ",", "1e-5", ")", "points", "=", "lon", ",", "lat", "dist", "=", "lambda", "x", ":", "stereonet_math", ".", "angular_distance", "(", "x", ",", "points", ",", "bidirectional", ")", "center_lon", "=", "np", ".", "random", ".", "choice", "(", "lon", ",", "num", ")", "center_lat", "=", "np", ".", "random", ".", "choice", "(", "lat", ",", "num", ")", "centers", "=", "np", ".", "column_stack", "(", "[", "center_lon", ",", "center_lat", "]", ")", "while", "True", ":", "dists", "=", "np", ".", "array", "(", "[", "dist", "(", "item", ")", "for", "item", "in", "centers", "]", ")", ".", "T", "closest", "=", "dists", ".", "argmin", "(", "axis", "=", "1", ")", "new_centers", "=", "[", "]", "for", "i", "in", "range", "(", "num", ")", ":", "mask", "=", "mask", "=", "closest", "==", "i", "_", ",", "vecs", "=", "cov_eig", "(", "lon", "[", "mask", "]", ",", "lat", "[", "mask", "]", ",", "bidirectional", ")", "new_centers", ".", "append", "(", "stereonet_math", ".", "cart2sph", "(", "*", "vecs", "[", ":", ",", "-", "1", "]", ")", ")", "if", "np", ".", "allclose", "(", "centers", ",", "new_centers", ",", "atol", "=", "tolerance", ")", ":", "break", "else", ":", "centers", "=", "new_centers", "return", "centers" ]
Find centers of multi-modal clusters of data using a kmeans approach modified for spherical measurements. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. num : int The number of clusters to find. Defaults to 2. bidirectional : bool Whether or not the measurements are bi-directional linear/planar features or directed vectors. Defaults to True. tolerance : float Iteration will continue until the centers have not changed by more than this amount. Defaults to 1e-5. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- centers : An Nx2 array-like Longitude and latitude in radians of the centers of each cluster.
[ "Find", "centers", "of", "multi", "-", "modal", "clusters", "of", "data", "using", "a", "kmeans", "approach", "modified", "for", "spherical", "measurements", "." ]
python
train
36.328947
orbingol/NURBS-Python
geomdl/linalg.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/linalg.py#L396-L415
def triangle_center(tri, uv=False): """ Computes the center of mass of the input triangle. :param tri: triangle object :type tri: elements.Triangle :param uv: if True, then finds parametric position of the center of mass :type uv: bool :return: center of mass of the triangle :rtype: tuple """ if uv: data = [t.uv for t in tri] mid = [0.0, 0.0] else: data = tri.vertices mid = [0.0, 0.0, 0.0] for vert in data: mid = [m + v for m, v in zip(mid, vert)] mid = [float(m) / 3.0 for m in mid] return tuple(mid)
[ "def", "triangle_center", "(", "tri", ",", "uv", "=", "False", ")", ":", "if", "uv", ":", "data", "=", "[", "t", ".", "uv", "for", "t", "in", "tri", "]", "mid", "=", "[", "0.0", ",", "0.0", "]", "else", ":", "data", "=", "tri", ".", "vertices", "mid", "=", "[", "0.0", ",", "0.0", ",", "0.0", "]", "for", "vert", "in", "data", ":", "mid", "=", "[", "m", "+", "v", "for", "m", ",", "v", "in", "zip", "(", "mid", ",", "vert", ")", "]", "mid", "=", "[", "float", "(", "m", ")", "/", "3.0", "for", "m", "in", "mid", "]", "return", "tuple", "(", "mid", ")" ]
Computes the center of mass of the input triangle. :param tri: triangle object :type tri: elements.Triangle :param uv: if True, then finds parametric position of the center of mass :type uv: bool :return: center of mass of the triangle :rtype: tuple
[ "Computes", "the", "center", "of", "mass", "of", "the", "input", "triangle", "." ]
python
train
28.95
idlesign/uwsgiconf
uwsgiconf/options/queue.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/queue.py#L15-L32
def enable(self, size, block_size=None, store=None, store_sync_interval=None): """Enables shared queue of the given size. :param int size: Queue size. :param int block_size: Block size in bytes. Default: 8 KiB. :param str|unicode store: Persist the queue into file. :param int store_sync_interval: Store sync interval in master cycles (usually seconds). """ self._set('queue', size) self._set('queue-blocksize', block_size) self._set('queue-store', store) self._set('queue-store-sync', store_sync_interval) return self._section
[ "def", "enable", "(", "self", ",", "size", ",", "block_size", "=", "None", ",", "store", "=", "None", ",", "store_sync_interval", "=", "None", ")", ":", "self", ".", "_set", "(", "'queue'", ",", "size", ")", "self", ".", "_set", "(", "'queue-blocksize'", ",", "block_size", ")", "self", ".", "_set", "(", "'queue-store'", ",", "store", ")", "self", ".", "_set", "(", "'queue-store-sync'", ",", "store_sync_interval", ")", "return", "self", ".", "_section" ]
Enables shared queue of the given size. :param int size: Queue size. :param int block_size: Block size in bytes. Default: 8 KiB. :param str|unicode store: Persist the queue into file. :param int store_sync_interval: Store sync interval in master cycles (usually seconds).
[ "Enables", "shared", "queue", "of", "the", "given", "size", "." ]
python
train
33.555556
boriel/zxbasic
arch/zx48k/backend/__parray.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__parray.py#L132-L159
def _pastore8(ins): ''' Stores 2º operand content into address of 1st operand. 1st operand is an array element. Dimensions are pushed into the stack. Use '*' for indirect store on 1st operand (A pointer to an array) ''' output = _paddr(ins.quad[1]) value = ins.quad[2] if value[0] == '*': value = value[1:] indirect = True else: indirect = False try: value = int(ins.quad[2]) & 0xFFFF if indirect: output.append('ld a, (%i)' % value) output.append('ld (hl), a') else: value &= 0xFF output.append('ld (hl), %i' % value) except ValueError: output.append('pop af') output.append('ld (hl), a') return output
[ "def", "_pastore8", "(", "ins", ")", ":", "output", "=", "_paddr", "(", "ins", ".", "quad", "[", "1", "]", ")", "value", "=", "ins", ".", "quad", "[", "2", "]", "if", "value", "[", "0", "]", "==", "'*'", ":", "value", "=", "value", "[", "1", ":", "]", "indirect", "=", "True", "else", ":", "indirect", "=", "False", "try", ":", "value", "=", "int", "(", "ins", ".", "quad", "[", "2", "]", ")", "&", "0xFFFF", "if", "indirect", ":", "output", ".", "append", "(", "'ld a, (%i)'", "%", "value", ")", "output", ".", "append", "(", "'ld (hl), a'", ")", "else", ":", "value", "&=", "0xFF", "output", ".", "append", "(", "'ld (hl), %i'", "%", "value", ")", "except", "ValueError", ":", "output", ".", "append", "(", "'pop af'", ")", "output", ".", "append", "(", "'ld (hl), a'", ")", "return", "output" ]
Stores 2º operand content into address of 1st operand. 1st operand is an array element. Dimensions are pushed into the stack. Use '*' for indirect store on 1st operand (A pointer to an array)
[ "Stores", "2º", "operand", "content", "into", "address", "of", "1st", "operand", ".", "1st", "operand", "is", "an", "array", "element", ".", "Dimensions", "are", "pushed", "into", "the", "stack", ".", "Use", "*", "for", "indirect", "store", "on", "1st", "operand", "(", "A", "pointer", "to", "an", "array", ")" ]
python
train
26.392857
honzamach/pynspect
pynspect/traversers.py
https://github.com/honzamach/pynspect/blob/0582dcc1f7aafe50e25a21c792ea1b3367ea5881/pynspect/traversers.py#L710-L730
def evaluate_binop_math(self, operation, left, right, **kwargs): """ Evaluate given mathematical binary operation with given operands. """ if not operation in self.binops_math: raise ValueError("Invalid math binary operation '{}'".format(operation)) if left is None or right is None: return None if not isinstance(left, (list, ListIP)): left = [left] if not isinstance(right, (list, ListIP)): right = [right] if not left or not right: return None try: vect = self._calculate_vector(operation, left, right) if len(vect) > 1: return vect return vect[0] except: return None
[ "def", "evaluate_binop_math", "(", "self", ",", "operation", ",", "left", ",", "right", ",", "*", "*", "kwargs", ")", ":", "if", "not", "operation", "in", "self", ".", "binops_math", ":", "raise", "ValueError", "(", "\"Invalid math binary operation '{}'\"", ".", "format", "(", "operation", ")", ")", "if", "left", "is", "None", "or", "right", "is", "None", ":", "return", "None", "if", "not", "isinstance", "(", "left", ",", "(", "list", ",", "ListIP", ")", ")", ":", "left", "=", "[", "left", "]", "if", "not", "isinstance", "(", "right", ",", "(", "list", ",", "ListIP", ")", ")", ":", "right", "=", "[", "right", "]", "if", "not", "left", "or", "not", "right", ":", "return", "None", "try", ":", "vect", "=", "self", ".", "_calculate_vector", "(", "operation", ",", "left", ",", "right", ")", "if", "len", "(", "vect", ")", ">", "1", ":", "return", "vect", "return", "vect", "[", "0", "]", "except", ":", "return", "None" ]
Evaluate given mathematical binary operation with given operands.
[ "Evaluate", "given", "mathematical", "binary", "operation", "with", "given", "operands", "." ]
python
train
35.904762
WebarchivCZ/WA-KAT
src/wa_kat/analyzers/keyword_detector.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/analyzers/keyword_detector.py#L54-L73
def get_html_keywords(index_page): """ Return list of `keywords` parsed from HTML ``<meta>`` tags. Args: index_page (str): Content of the page as UTF-8 string Returns: list: List of :class:`.SourceString` objects. """ keyword_lists = ( keyword_list.split(",") for keyword_list in parse_meta(index_page, "keywords", "HTML") ) # create SourceStrings from the list of keywords return [ SourceString(keyword.strip(), source="HTML") for keyword in sum(keyword_lists, []) # flattern the list ]
[ "def", "get_html_keywords", "(", "index_page", ")", ":", "keyword_lists", "=", "(", "keyword_list", ".", "split", "(", "\",\"", ")", "for", "keyword_list", "in", "parse_meta", "(", "index_page", ",", "\"keywords\"", ",", "\"HTML\"", ")", ")", "# create SourceStrings from the list of keywords", "return", "[", "SourceString", "(", "keyword", ".", "strip", "(", ")", ",", "source", "=", "\"HTML\"", ")", "for", "keyword", "in", "sum", "(", "keyword_lists", ",", "[", "]", ")", "# flattern the list", "]" ]
Return list of `keywords` parsed from HTML ``<meta>`` tags. Args: index_page (str): Content of the page as UTF-8 string Returns: list: List of :class:`.SourceString` objects.
[ "Return", "list", "of", "keywords", "parsed", "from", "HTML", "<meta", ">", "tags", "." ]
python
train
28
giffels/CloudStackAIO
CloudStackAIO/CloudStack.py
https://github.com/giffels/CloudStackAIO/blob/f9df856622eb8966a6816f8008cc16d75b0067e5/CloudStackAIO/CloudStack.py#L157-L201
async def _handle_response(self, response: aiohttp.client_reqrep.ClientResponse, await_final_result: bool) -> dict: """ Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which means that the API call returns just a job id. The actually expected API response is postponed and a specific asyncJobResults API has to be polled using the job id to get the final result once the API call has been processed. :param response: The response returned by the aiohttp call. :type response: aiohttp.client_reqrep.ClientResponse :param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API until the asynchronous API call has been processed :type await_final_result: bool :return: Dictionary containing the JSON response of the API call :rtype: dict """ try: data = await response.json() except aiohttp.client_exceptions.ContentTypeError: text = await response.text() logging.debug('Content returned by server not of type "application/json"\n Content: {}'.format(text)) raise CloudStackClientException(message="Could not decode content. Server did not return json content!") else: data = self._transform_data(data) if response.status != 200: raise CloudStackClientException(message="Async CloudStack call failed!", error_code=data.get("errorcode", response.status), error_text=data.get("errortext"), response=data) while await_final_result and ('jobid' in data): await asyncio.sleep(self.async_poll_latency) data = await self.queryAsyncJobResult(jobid=data['jobid']) if data['jobstatus']: # jobstatus is 0 for pending async CloudStack calls if not data['jobresultcode']: # exit code is zero try: return data['jobresult'] except KeyError: pass logging.debug("Async CloudStack call returned {}".format(str(data))) raise CloudStackClientException(message="Async CloudStack call failed!", error_code=data.get("errorcode"), error_text=data.get("errortext"), response=data) return data
[ "async", "def", "_handle_response", "(", "self", ",", "response", ":", "aiohttp", ".", "client_reqrep", ".", "ClientResponse", ",", "await_final_result", ":", "bool", ")", "->", "dict", ":", "try", ":", "data", "=", "await", "response", ".", "json", "(", ")", "except", "aiohttp", ".", "client_exceptions", ".", "ContentTypeError", ":", "text", "=", "await", "response", ".", "text", "(", ")", "logging", ".", "debug", "(", "'Content returned by server not of type \"application/json\"\\n Content: {}'", ".", "format", "(", "text", ")", ")", "raise", "CloudStackClientException", "(", "message", "=", "\"Could not decode content. Server did not return json content!\"", ")", "else", ":", "data", "=", "self", ".", "_transform_data", "(", "data", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "CloudStackClientException", "(", "message", "=", "\"Async CloudStack call failed!\"", ",", "error_code", "=", "data", ".", "get", "(", "\"errorcode\"", ",", "response", ".", "status", ")", ",", "error_text", "=", "data", ".", "get", "(", "\"errortext\"", ")", ",", "response", "=", "data", ")", "while", "await_final_result", "and", "(", "'jobid'", "in", "data", ")", ":", "await", "asyncio", ".", "sleep", "(", "self", ".", "async_poll_latency", ")", "data", "=", "await", "self", ".", "queryAsyncJobResult", "(", "jobid", "=", "data", "[", "'jobid'", "]", ")", "if", "data", "[", "'jobstatus'", "]", ":", "# jobstatus is 0 for pending async CloudStack calls", "if", "not", "data", "[", "'jobresultcode'", "]", ":", "# exit code is zero", "try", ":", "return", "data", "[", "'jobresult'", "]", "except", "KeyError", ":", "pass", "logging", ".", "debug", "(", "\"Async CloudStack call returned {}\"", ".", "format", "(", "str", "(", "data", ")", ")", ")", "raise", "CloudStackClientException", "(", "message", "=", "\"Async CloudStack call failed!\"", ",", "error_code", "=", "data", ".", "get", "(", "\"errorcode\"", ")", ",", "error_text", "=", "data", ".", "get", "(", "\"errortext\"", ")", ",", "response", "=", "data", ")", "return", "data" ]
Handles the response returned from the CloudStack API. Some CloudStack API are implemented asynchronous, which means that the API call returns just a job id. The actually expected API response is postponed and a specific asyncJobResults API has to be polled using the job id to get the final result once the API call has been processed. :param response: The response returned by the aiohttp call. :type response: aiohttp.client_reqrep.ClientResponse :param await_final_result: Specifier that indicates whether the function should poll the asyncJobResult API until the asynchronous API call has been processed :type await_final_result: bool :return: Dictionary containing the JSON response of the API call :rtype: dict
[ "Handles", "the", "response", "returned", "from", "the", "CloudStack", "API", ".", "Some", "CloudStack", "API", "are", "implemented", "asynchronous", "which", "means", "that", "the", "API", "call", "returns", "just", "a", "job", "id", ".", "The", "actually", "expected", "API", "response", "is", "postponed", "and", "a", "specific", "asyncJobResults", "API", "has", "to", "be", "polled", "using", "the", "job", "id", "to", "get", "the", "final", "result", "once", "the", "API", "call", "has", "been", "processed", "." ]
python
test
58.822222
ultradns/python_rest_api_client
ultra_rest_client/ultra_rest_client.py
https://github.com/ultradns/python_rest_api_client/blob/e4095f28f5cb5e258b768c06ef7cf8b1915aa5ec/ultra_rest_client/ultra_rest_client.py#L333-L356
def edit_rrset(self, zone_name, rtype, owner_name, ttl, rdata, profile=None): """Updates an existing RRSet in the specified zone. Arguments: zone_name -- The zone that contains the RRSet. The trailing dot is optional. rtype -- The type of the RRSet. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) ttl -- The updated TTL value for the RRSet. rdata -- The updated BIND data for the RRSet as a string. If there is a single resource record in the RRSet, you can pass in the single string. If there are multiple resource records in this RRSet, pass in a list of strings. profile -- The profile info if this is updating a resource pool """ if type(rdata) is not list: rdata = [rdata] rrset = {"ttl": ttl, "rdata": rdata} if profile: rrset["profile"] = profile uri = "/v1/zones/" + zone_name + "/rrsets/" + rtype + "/" + owner_name return self.rest_api_connection.put(uri, json.dumps(rrset))
[ "def", "edit_rrset", "(", "self", ",", "zone_name", ",", "rtype", ",", "owner_name", ",", "ttl", ",", "rdata", ",", "profile", "=", "None", ")", ":", "if", "type", "(", "rdata", ")", "is", "not", "list", ":", "rdata", "=", "[", "rdata", "]", "rrset", "=", "{", "\"ttl\"", ":", "ttl", ",", "\"rdata\"", ":", "rdata", "}", "if", "profile", ":", "rrset", "[", "\"profile\"", "]", "=", "profile", "uri", "=", "\"/v1/zones/\"", "+", "zone_name", "+", "\"/rrsets/\"", "+", "rtype", "+", "\"/\"", "+", "owner_name", "return", "self", ".", "rest_api_connection", ".", "put", "(", "uri", ",", "json", ".", "dumps", "(", "rrset", ")", ")" ]
Updates an existing RRSet in the specified zone. Arguments: zone_name -- The zone that contains the RRSet. The trailing dot is optional. rtype -- The type of the RRSet. This can be numeric (1) or if a well-known name is defined for the type (A), you can use it instead. owner_name -- The owner name for the RRSet. If no trailing dot is supplied, the owner_name is assumed to be relative (foo). If a trailing dot is supplied, the owner name is assumed to be absolute (foo.zonename.com.) ttl -- The updated TTL value for the RRSet. rdata -- The updated BIND data for the RRSet as a string. If there is a single resource record in the RRSet, you can pass in the single string. If there are multiple resource records in this RRSet, pass in a list of strings. profile -- The profile info if this is updating a resource pool
[ "Updates", "an", "existing", "RRSet", "in", "the", "specified", "zone", "." ]
python
train
56.958333
trombastic/PyScada
pyscada/utils/scheduler.py
https://github.com/trombastic/PyScada/blob/c5fc348a25f0df1340336f694ee9bc1aea62516a/pyscada/utils/scheduler.py#L709-L719
def stop(self, signum=None, frame=None): """ handel's a termination signal """ BackgroundProcess.objects.filter(pk=self.process_id ).update(pid=0, last_update=now(), message='stopping..') # run the cleanup self.cleanup() BackgroundProcess.objects.filter(pk=self.process_id).update(pid=0, last_update=now(), message='stopped')
[ "def", "stop", "(", "self", ",", "signum", "=", "None", ",", "frame", "=", "None", ")", ":", "BackgroundProcess", ".", "objects", ".", "filter", "(", "pk", "=", "self", ".", "process_id", ")", ".", "update", "(", "pid", "=", "0", ",", "last_update", "=", "now", "(", ")", ",", "message", "=", "'stopping..'", ")", "# run the cleanup", "self", ".", "cleanup", "(", ")", "BackgroundProcess", ".", "objects", ".", "filter", "(", "pk", "=", "self", ".", "process_id", ")", ".", "update", "(", "pid", "=", "0", ",", "last_update", "=", "now", "(", ")", ",", "message", "=", "'stopped'", ")" ]
handel's a termination signal
[ "handel", "s", "a", "termination", "signal" ]
python
train
49.818182
nickmilon/Hellas
Hellas/Sparta.py
https://github.com/nickmilon/Hellas/blob/542e4778692fbec90753942946f20100412ec9ee/Hellas/Sparta.py#L264-L288
def relations_dict(rel_lst): """constructs a relation's dictionary from a list that describes amphidromus relations between objects :param list rel_lst: a relationships list of the form [[a,b],[c, a, b]] # can include duplicates :returns: a dictionary :Example: >>> rl = [('a', 'b', 'c'), ('a', 'x', 'y'), ('x', 'y', 'z')] >>> relations_dict(rl) {'a': ['x', 'c', 'b', 'y'], 'c': ['a', 'b'], 'b': ['a', 'c'], 'y': ['a', 'x', 'z'], 'x': ['a', 'z', 'y'], 'z': ['y', 'x']} """ dc = {} for c in rel_lst: for i in c: for k in c: dc.setdefault(i, []) dc[i].append(k) do = {} for k in list(dc.keys()): if dc[k]: vl = list(set(dc[k])) # remove duplicates vl.remove(k) do[k] = vl return do
[ "def", "relations_dict", "(", "rel_lst", ")", ":", "dc", "=", "{", "}", "for", "c", "in", "rel_lst", ":", "for", "i", "in", "c", ":", "for", "k", "in", "c", ":", "dc", ".", "setdefault", "(", "i", ",", "[", "]", ")", "dc", "[", "i", "]", ".", "append", "(", "k", ")", "do", "=", "{", "}", "for", "k", "in", "list", "(", "dc", ".", "keys", "(", ")", ")", ":", "if", "dc", "[", "k", "]", ":", "vl", "=", "list", "(", "set", "(", "dc", "[", "k", "]", ")", ")", "# remove duplicates", "vl", ".", "remove", "(", "k", ")", "do", "[", "k", "]", "=", "vl", "return", "do" ]
constructs a relation's dictionary from a list that describes amphidromus relations between objects :param list rel_lst: a relationships list of the form [[a,b],[c, a, b]] # can include duplicates :returns: a dictionary :Example: >>> rl = [('a', 'b', 'c'), ('a', 'x', 'y'), ('x', 'y', 'z')] >>> relations_dict(rl) {'a': ['x', 'c', 'b', 'y'], 'c': ['a', 'b'], 'b': ['a', 'c'], 'y': ['a', 'x', 'z'], 'x': ['a', 'z', 'y'], 'z': ['y', 'x']}
[ "constructs", "a", "relation", "s", "dictionary", "from", "a", "list", "that", "describes", "amphidromus", "relations", "between", "objects" ]
python
train
33.16
bitesofcode/projexui
projexui/widgets/xviewwidget/xviewpanel.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xviewwidget/xviewpanel.py#L407-L418
def clear(self): """ Clears out all the items from this tab bar. """ self.blockSignals(True) items = list(self.items()) for item in items: item.close() self.blockSignals(False) self._currentIndex = -1 self.currentIndexChanged.emit(self._currentIndex)
[ "def", "clear", "(", "self", ")", ":", "self", ".", "blockSignals", "(", "True", ")", "items", "=", "list", "(", "self", ".", "items", "(", ")", ")", "for", "item", "in", "items", ":", "item", ".", "close", "(", ")", "self", ".", "blockSignals", "(", "False", ")", "self", ".", "_currentIndex", "=", "-", "1", "self", ".", "currentIndexChanged", ".", "emit", "(", "self", ".", "_currentIndex", ")" ]
Clears out all the items from this tab bar.
[ "Clears", "out", "all", "the", "items", "from", "this", "tab", "bar", "." ]
python
train
27
Telefonica/toolium
toolium/driver_wrapper.py
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/driver_wrapper.py#L108-L131
def configure_properties(self, tc_config_prop_filenames=None, behave_properties=None): """Configure selenium instance properties :param tc_config_prop_filenames: test case specific properties filenames :param behave_properties: dict with behave user data properties """ prop_filenames = DriverWrappersPool.get_configured_value('Config_prop_filenames', tc_config_prop_filenames, 'properties.cfg;local-properties.cfg') prop_filenames = [os.path.join(DriverWrappersPool.config_directory, filename) for filename in prop_filenames.split(';')] prop_filenames = ';'.join(prop_filenames) # Configure config only if properties filename has changed if self.config_properties_filenames != prop_filenames: # Initialize the config object self.config = ExtendedConfigParser.get_config_from_file(prop_filenames) self.config_properties_filenames = prop_filenames # Override properties with system properties self.config.update_properties(os.environ) # Override properties with behave userdata properties if behave_properties: self.config.update_properties(behave_properties)
[ "def", "configure_properties", "(", "self", ",", "tc_config_prop_filenames", "=", "None", ",", "behave_properties", "=", "None", ")", ":", "prop_filenames", "=", "DriverWrappersPool", ".", "get_configured_value", "(", "'Config_prop_filenames'", ",", "tc_config_prop_filenames", ",", "'properties.cfg;local-properties.cfg'", ")", "prop_filenames", "=", "[", "os", ".", "path", ".", "join", "(", "DriverWrappersPool", ".", "config_directory", ",", "filename", ")", "for", "filename", "in", "prop_filenames", ".", "split", "(", "';'", ")", "]", "prop_filenames", "=", "';'", ".", "join", "(", "prop_filenames", ")", "# Configure config only if properties filename has changed", "if", "self", ".", "config_properties_filenames", "!=", "prop_filenames", ":", "# Initialize the config object", "self", ".", "config", "=", "ExtendedConfigParser", ".", "get_config_from_file", "(", "prop_filenames", ")", "self", ".", "config_properties_filenames", "=", "prop_filenames", "# Override properties with system properties", "self", ".", "config", ".", "update_properties", "(", "os", ".", "environ", ")", "# Override properties with behave userdata properties", "if", "behave_properties", ":", "self", ".", "config", ".", "update_properties", "(", "behave_properties", ")" ]
Configure selenium instance properties :param tc_config_prop_filenames: test case specific properties filenames :param behave_properties: dict with behave user data properties
[ "Configure", "selenium", "instance", "properties" ]
python
train
53.416667
src-d/modelforge
modelforge/model.py
https://github.com/src-d/modelforge/blob/4f73c2bf0318261ac01bc8b6c0d4250a5d303418/modelforge/model.py#L504-L524
def split_strings(subtree: dict) -> List[str]: """ Produce the list of strings from the dictionary with concatenated chars \ and lengths. Opposite to :func:`merge_strings()`. :param subtree: The dict with "strings" and "lengths". :return: :class:`list` of :class:`str`-s or :class:`bytes`. """ strings = subtree["strings"] lengths = subtree["lengths"] if lengths.shape[0] == 0 and strings.shape[0] == 0: return [] strings = strings[0] if subtree.get("str", True): strings = strings.decode("utf-8") result = [None] * lengths.shape[0] offset = 0 for i, l in enumerate(lengths): result[i] = strings[offset:offset + l] offset += l return result
[ "def", "split_strings", "(", "subtree", ":", "dict", ")", "->", "List", "[", "str", "]", ":", "strings", "=", "subtree", "[", "\"strings\"", "]", "lengths", "=", "subtree", "[", "\"lengths\"", "]", "if", "lengths", ".", "shape", "[", "0", "]", "==", "0", "and", "strings", ".", "shape", "[", "0", "]", "==", "0", ":", "return", "[", "]", "strings", "=", "strings", "[", "0", "]", "if", "subtree", ".", "get", "(", "\"str\"", ",", "True", ")", ":", "strings", "=", "strings", ".", "decode", "(", "\"utf-8\"", ")", "result", "=", "[", "None", "]", "*", "lengths", ".", "shape", "[", "0", "]", "offset", "=", "0", "for", "i", ",", "l", "in", "enumerate", "(", "lengths", ")", ":", "result", "[", "i", "]", "=", "strings", "[", "offset", ":", "offset", "+", "l", "]", "offset", "+=", "l", "return", "result" ]
Produce the list of strings from the dictionary with concatenated chars \ and lengths. Opposite to :func:`merge_strings()`. :param subtree: The dict with "strings" and "lengths". :return: :class:`list` of :class:`str`-s or :class:`bytes`.
[ "Produce", "the", "list", "of", "strings", "from", "the", "dictionary", "with", "concatenated", "chars", "\\", "and", "lengths", ".", "Opposite", "to", ":", "func", ":", "merge_strings", "()", "." ]
python
train
33.952381
coleifer/peewee
playhouse/sqlite_ext.py
https://github.com/coleifer/peewee/blob/ea9403b01acb039adb3a2472186d795c796b77a0/playhouse/sqlite_ext.py#L391-L400
def search(cls, term, weights=None, with_score=False, score_alias='score', explicit_ordering=False): """Full-text search using selected `term`.""" return cls._search( term, weights, with_score, score_alias, cls.rank, explicit_ordering)
[ "def", "search", "(", "cls", ",", "term", ",", "weights", "=", "None", ",", "with_score", "=", "False", ",", "score_alias", "=", "'score'", ",", "explicit_ordering", "=", "False", ")", ":", "return", "cls", ".", "_search", "(", "term", ",", "weights", ",", "with_score", ",", "score_alias", ",", "cls", ".", "rank", ",", "explicit_ordering", ")" ]
Full-text search using selected `term`.
[ "Full", "-", "text", "search", "using", "selected", "term", "." ]
python
train
32.9
osrg/ryu
ryu/services/protocols/bgp/api/rtconf.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/api/rtconf.py#L45-L56
def _get_neighbor_conf(neigh_ip_address): """Returns neighbor configuration for given neighbor ip address. Raises exception if no neighbor with `neigh_ip_address` exists. """ neigh_conf = \ CORE_MANAGER.neighbors_conf.get_neighbor_conf(neigh_ip_address) if not neigh_conf: raise RuntimeConfigError(desc='No Neighbor configuration with IP' ' address %s' % neigh_ip_address) assert isinstance(neigh_conf, NeighborConf) return neigh_conf
[ "def", "_get_neighbor_conf", "(", "neigh_ip_address", ")", ":", "neigh_conf", "=", "CORE_MANAGER", ".", "neighbors_conf", ".", "get_neighbor_conf", "(", "neigh_ip_address", ")", "if", "not", "neigh_conf", ":", "raise", "RuntimeConfigError", "(", "desc", "=", "'No Neighbor configuration with IP'", "' address %s'", "%", "neigh_ip_address", ")", "assert", "isinstance", "(", "neigh_conf", ",", "NeighborConf", ")", "return", "neigh_conf" ]
Returns neighbor configuration for given neighbor ip address. Raises exception if no neighbor with `neigh_ip_address` exists.
[ "Returns", "neighbor", "configuration", "for", "given", "neighbor", "ip", "address", "." ]
python
train
41.75
pvlib/pvlib-python
pvlib/iotools/tmy.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/iotools/tmy.py#L19-L209
def read_tmy3(filename=None, coerce_year=None, recolumn=True): ''' Read a TMY3 file in to a pandas dataframe. Note that values contained in the metadata dictionary are unchanged from the TMY3 file (i.e. units are retained). In the case of any discrepencies between this documentation and the TMY3 User's Manual [1], the TMY3 User's Manual takes precedence. The TMY3 files were updated in Jan. 2015. This function requires the use of the updated files. Parameters ---------- filename : None or string, default None If None, attempts to use a Tkinter file browser. A string can be a relative file path, absolute file path, or url. coerce_year : None or int, default None If supplied, the year of the data will be set to this value. recolumn : bool, default True If True, apply standard names to TMY3 columns. Typically this results in stripping the units from the column name. Returns ------- Tuple of the form (data, metadata). data : DataFrame A pandas dataframe with the columns described in the table below. For more detailed descriptions of each component, please consult the TMY3 User's Manual ([1]), especially tables 1-1 through 1-6. metadata : dict The site metadata available in the file. Notes ----- The returned structures have the following fields. =============== ====== =================== key format description =============== ====== =================== altitude Float site elevation latitude Float site latitudeitude longitude Float site longitudeitude Name String site name State String state TZ Float UTC offset USAF Int USAF identifier =============== ====== =================== ============================= ====================================================================================================================================================== TMYData field description ============================= ====================================================================================================================================================== TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included) TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.GHISource See [1], Table 1-4 TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2 TMYData.DNISource See [1], Table 1-4 TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.DHISource See [1], Table 1-4 TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.GHillumSource See [1], Table 1-4 TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.DNillumSource See [1], Table 1-4 TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.DHillumSource See [1], Table 1-4 TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2 TMYData.ZenithlumSource See [1], Table 1-4 TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10 TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.TotCldUnertainty See [1], Table 1-6 TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.OpqCldUncertainty See [1], Table 1-6 TMYData.DryBulb Dry bulb temperature at the time indicated, deg C TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.DryBulbUncertainty See [1], Table 1-6 TMYData.DewPoint Dew-point temperature at the time indicated, deg C TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.DewPointUncertainty See [1], Table 1-6 TMYData.RHum Relatitudeive humidity at the time indicated, percent TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.RHumUncertainty See [1], Table 1-6 TMYData.Pressure Station pressure at the time indicated, 1 mbar TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.PressureUncertainty See [1], Table 1-6 TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm) TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.WdirUncertainty See [1], Table 1-6 TMYData.Wspd Wind speed at the time indicated, meter/second TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.WspdUncertainty See [1], Table 1-6 TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.HvisUncertainty See [1], Table 1-6 TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.CeilHgtUncertainty See [1], Table 1-6 TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.PwatUncertainty See [1], Table 1-6 TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.AODUncertainty See [1], Table 1-6 TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.AlbUncertainty See [1], Table 1-6 TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.LprecipUncertainty See [1], Table 1-6 TMYData.PresWth Present weather code, see [2]. TMYData.PresWthSource Present weather code source, see [2]. TMYData.PresWthUncertainty Present weather code uncertainty, see [2]. ============================= ====================================================================================================================================================== References ---------- [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets". NREL/TP-581-43156, Revised May 2008. [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005 Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364. ''' if filename is None: try: filename = _interactive_load() except ImportError: raise ImportError('Interactive load failed. Tkinter not supported ' 'on this system. Try installing X-Quartz and ' 'reloading') head = ['USAF', 'Name', 'State', 'TZ', 'latitude', 'longitude', 'altitude'] if filename.startswith('http'): request = Request(filename, headers={'User-Agent': ( 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 ' 'Safari/537.36')}) response = urlopen(request) csvdata = io.StringIO(response.read().decode(errors='ignore')) else: # assume it's accessible via the file system csvdata = open(filename, 'r') # read in file metadata, advance buffer to second line firstline = csvdata.readline() if 'Request Rejected' in firstline: raise IOError('Remote server rejected TMY file request') meta = dict(zip(head, firstline.rstrip('\n').split(","))) # convert metadata strings to numeric types meta['altitude'] = float(meta['altitude']) meta['latitude'] = float(meta['latitude']) meta['longitude'] = float(meta['longitude']) meta['TZ'] = float(meta['TZ']) meta['USAF'] = int(meta['USAF']) # use pandas to read the csv file/stringio buffer # header is actually the second line in file, but tell pandas to look for # header information on the 1st line (0 indexing) because we've already # advanced past the true first line with the readline call above. data = pd.read_csv( csvdata, header=0, parse_dates={'datetime': ['Date (MM/DD/YYYY)', 'Time (HH:MM)']}, date_parser=lambda *x: _parsedate(*x, year=coerce_year), index_col='datetime') if recolumn: data = _recolumn(data) # rename to standard column names data = data.tz_localize(int(meta['TZ'] * 3600)) return data, meta
[ "def", "read_tmy3", "(", "filename", "=", "None", ",", "coerce_year", "=", "None", ",", "recolumn", "=", "True", ")", ":", "if", "filename", "is", "None", ":", "try", ":", "filename", "=", "_interactive_load", "(", ")", "except", "ImportError", ":", "raise", "ImportError", "(", "'Interactive load failed. Tkinter not supported '", "'on this system. Try installing X-Quartz and '", "'reloading'", ")", "head", "=", "[", "'USAF'", ",", "'Name'", ",", "'State'", ",", "'TZ'", ",", "'latitude'", ",", "'longitude'", ",", "'altitude'", "]", "if", "filename", ".", "startswith", "(", "'http'", ")", ":", "request", "=", "Request", "(", "filename", ",", "headers", "=", "{", "'User-Agent'", ":", "(", "'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '", "'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 '", "'Safari/537.36'", ")", "}", ")", "response", "=", "urlopen", "(", "request", ")", "csvdata", "=", "io", ".", "StringIO", "(", "response", ".", "read", "(", ")", ".", "decode", "(", "errors", "=", "'ignore'", ")", ")", "else", ":", "# assume it's accessible via the file system", "csvdata", "=", "open", "(", "filename", ",", "'r'", ")", "# read in file metadata, advance buffer to second line", "firstline", "=", "csvdata", ".", "readline", "(", ")", "if", "'Request Rejected'", "in", "firstline", ":", "raise", "IOError", "(", "'Remote server rejected TMY file request'", ")", "meta", "=", "dict", "(", "zip", "(", "head", ",", "firstline", ".", "rstrip", "(", "'\\n'", ")", ".", "split", "(", "\",\"", ")", ")", ")", "# convert metadata strings to numeric types", "meta", "[", "'altitude'", "]", "=", "float", "(", "meta", "[", "'altitude'", "]", ")", "meta", "[", "'latitude'", "]", "=", "float", "(", "meta", "[", "'latitude'", "]", ")", "meta", "[", "'longitude'", "]", "=", "float", "(", "meta", "[", "'longitude'", "]", ")", "meta", "[", "'TZ'", "]", "=", "float", "(", "meta", "[", "'TZ'", "]", ")", "meta", "[", "'USAF'", "]", "=", "int", "(", "meta", "[", "'USAF'", "]", ")", "# use pandas to read the csv file/stringio buffer", "# header is actually the second line in file, but tell pandas to look for", "# header information on the 1st line (0 indexing) because we've already", "# advanced past the true first line with the readline call above.", "data", "=", "pd", ".", "read_csv", "(", "csvdata", ",", "header", "=", "0", ",", "parse_dates", "=", "{", "'datetime'", ":", "[", "'Date (MM/DD/YYYY)'", ",", "'Time (HH:MM)'", "]", "}", ",", "date_parser", "=", "lambda", "*", "x", ":", "_parsedate", "(", "*", "x", ",", "year", "=", "coerce_year", ")", ",", "index_col", "=", "'datetime'", ")", "if", "recolumn", ":", "data", "=", "_recolumn", "(", "data", ")", "# rename to standard column names", "data", "=", "data", ".", "tz_localize", "(", "int", "(", "meta", "[", "'TZ'", "]", "*", "3600", ")", ")", "return", "data", ",", "meta" ]
Read a TMY3 file in to a pandas dataframe. Note that values contained in the metadata dictionary are unchanged from the TMY3 file (i.e. units are retained). In the case of any discrepencies between this documentation and the TMY3 User's Manual [1], the TMY3 User's Manual takes precedence. The TMY3 files were updated in Jan. 2015. This function requires the use of the updated files. Parameters ---------- filename : None or string, default None If None, attempts to use a Tkinter file browser. A string can be a relative file path, absolute file path, or url. coerce_year : None or int, default None If supplied, the year of the data will be set to this value. recolumn : bool, default True If True, apply standard names to TMY3 columns. Typically this results in stripping the units from the column name. Returns ------- Tuple of the form (data, metadata). data : DataFrame A pandas dataframe with the columns described in the table below. For more detailed descriptions of each component, please consult the TMY3 User's Manual ([1]), especially tables 1-1 through 1-6. metadata : dict The site metadata available in the file. Notes ----- The returned structures have the following fields. =============== ====== =================== key format description =============== ====== =================== altitude Float site elevation latitude Float site latitudeitude longitude Float site longitudeitude Name String site name State String state TZ Float UTC offset USAF Int USAF identifier =============== ====== =================== ============================= ====================================================================================================================================================== TMYData field description ============================= ====================================================================================================================================================== TMYData.Index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included) TMYData.ETR Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.ETRN Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.GHI Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.GHISource See [1], Table 1-4 TMYData.GHIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DNI Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2 TMYData.DNISource See [1], Table 1-4 TMYData.DNIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DHI Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2 TMYData.DHISource See [1], Table 1-4 TMYData.DHIUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.GHillum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.GHillumSource See [1], Table 1-4 TMYData.GHillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DNillum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.DNillumSource See [1], Table 1-4 TMYData.DNillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.DHillum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx TMYData.DHillumSource See [1], Table 1-4 TMYData.DHillumUncertainty Uncertainty based on random and bias error estimates see [2] TMYData.Zenithlum Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2 TMYData.ZenithlumSource See [1], Table 1-4 TMYData.ZenithlumUncertainty Uncertainty based on random and bias error estimates see [1] section 2.10 TMYData.TotCld Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky TMYData.TotCldSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.TotCldUnertainty See [1], Table 1-6 TMYData.OpqCld Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky TMYData.OpqCldSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.OpqCldUncertainty See [1], Table 1-6 TMYData.DryBulb Dry bulb temperature at the time indicated, deg C TMYData.DryBulbSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.DryBulbUncertainty See [1], Table 1-6 TMYData.DewPoint Dew-point temperature at the time indicated, deg C TMYData.DewPointSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.DewPointUncertainty See [1], Table 1-6 TMYData.RHum Relatitudeive humidity at the time indicated, percent TMYData.RHumSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.RHumUncertainty See [1], Table 1-6 TMYData.Pressure Station pressure at the time indicated, 1 mbar TMYData.PressureSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.PressureUncertainty See [1], Table 1-6 TMYData.Wdir Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm) TMYData.WdirSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.WdirUncertainty See [1], Table 1-6 TMYData.Wspd Wind speed at the time indicated, meter/second TMYData.WspdSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.WspdUncertainty See [1], Table 1-6 TMYData.Hvis Distance to discernable remote objects at time indicated (7777=unlimited), meter TMYData.HvisSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.HvisUncertainty See [1], Table 1-6 TMYData.CeilHgt Height of cloud base above local terrain (7777=unlimited), meter TMYData.CeilHgtSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.CeilHgtUncertainty See [1], Table 1-6 TMYData.Pwat Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm TMYData.PwatSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.PwatUncertainty See [1], Table 1-6 TMYData.AOD The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless TMYData.AODSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.AODUncertainty See [1], Table 1-6 TMYData.Alb The ratio of reflected solar irradiance to global horizontal irradiance, unitless TMYData.AlbSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.AlbUncertainty See [1], Table 1-6 TMYData.Lprecipdepth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter TMYData.Lprecipquantity The period of accumulatitudeion for the liquid precipitation depth field, hour TMYData.LprecipSource See [1], Table 1-5, 8760x1 cell array of strings TMYData.LprecipUncertainty See [1], Table 1-6 TMYData.PresWth Present weather code, see [2]. TMYData.PresWthSource Present weather code source, see [2]. TMYData.PresWthUncertainty Present weather code uncertainty, see [2]. ============================= ====================================================================================================================================================== References ---------- [1] Wilcox, S and Marion, W. "Users Manual for TMY3 Data Sets". NREL/TP-581-43156, Revised May 2008. [2] Wilcox, S. (2007). National Solar Radiation Database 1991 2005 Update: Users Manual. 472 pp.; NREL Report No. TP-581-41364.
[ "Read", "a", "TMY3", "file", "in", "to", "a", "pandas", "dataframe", "." ]
python
train
60.031414
tensorflow/mesh
mesh_tensorflow/transformer/metric_utils.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/transformer/metric_utils.py#L28-L50
def get_metric_fns(metric_names, labels, outputs): """Generate a dictionary of metric name to metric function. Args: metric_names: list of strings in the format "prefix/metric_function_name". metric_function_name should refer to a function name in metrics.py. The prefix will be included in the key in the returned dict. labels: a tensor where batch is the first dimension. outputs: a tensor of model predictions, same dimensionality as labels. Returns: metric_fns: dict of metric functions keyed by their name. """ metric_fns = {} for metric_name in metric_names: metric_fn_name = metric_name.split("/")[-1] if hasattr(metrics, metric_fn_name): metric_fn = getattr(metrics, metric_fn_name) metric_fns[metric_name] = metric_fn(labels, outputs) else: raise ValueError("Metric {} is not implemented".format(metric_fn_name)) return metric_fns
[ "def", "get_metric_fns", "(", "metric_names", ",", "labels", ",", "outputs", ")", ":", "metric_fns", "=", "{", "}", "for", "metric_name", "in", "metric_names", ":", "metric_fn_name", "=", "metric_name", ".", "split", "(", "\"/\"", ")", "[", "-", "1", "]", "if", "hasattr", "(", "metrics", ",", "metric_fn_name", ")", ":", "metric_fn", "=", "getattr", "(", "metrics", ",", "metric_fn_name", ")", "metric_fns", "[", "metric_name", "]", "=", "metric_fn", "(", "labels", ",", "outputs", ")", "else", ":", "raise", "ValueError", "(", "\"Metric {} is not implemented\"", ".", "format", "(", "metric_fn_name", ")", ")", "return", "metric_fns" ]
Generate a dictionary of metric name to metric function. Args: metric_names: list of strings in the format "prefix/metric_function_name". metric_function_name should refer to a function name in metrics.py. The prefix will be included in the key in the returned dict. labels: a tensor where batch is the first dimension. outputs: a tensor of model predictions, same dimensionality as labels. Returns: metric_fns: dict of metric functions keyed by their name.
[ "Generate", "a", "dictionary", "of", "metric", "name", "to", "metric", "function", "." ]
python
train
38.782609
dcrosta/sendlib
sendlib.py
https://github.com/dcrosta/sendlib/blob/51ea5412a70cf83a62d51d5c515c0eeac725aea0/sendlib.py#L628-L680
def parse(schema): """ Parse `schema`, either a string or a file-like object, and return a :class:`MessageRegistry` with the loaded messages. """ if not isinstance(schema, basestring): # assume it is file-like schema = schema.read() message = re.compile(r'^\(([^,]+),\s*(\d+)\):\s*$') field = re.compile(r'^-\s*([^:]+):\s+(.+?)\s*$') registry = MessageRegistry({}) messages = registry.messages curr = None names = None for lineno, line in enumerate(schema.split('\n')): line = line.strip() if '#' in line: line = line[:line.index('#')] if line == '': continue f = field.match(line) if f: if curr is None: raise ParseError( 'field definition outside of message at line %d' % lineno) name = f.group(1) type = f.group(2) if name not in names: f = Field(curr, name, type) curr.fields.append(f) names.add(name) continue else: raise ParseError( 'duplicate field name "%s" at line %d' % (name, lineno)) m = message.match(line) if m: # new message definition name, vers = m.group(1), int(m.group(2)) if (name, vers) in messages: raise ParseError('Duplicate message (%s, %d)' % (name, vers)) curr = messages[(name, vers)] = Message(registry, name, vers, []) names = set() continue for message in registry.messages.values(): message.fields = tuple(message.fields) return registry
[ "def", "parse", "(", "schema", ")", ":", "if", "not", "isinstance", "(", "schema", ",", "basestring", ")", ":", "# assume it is file-like", "schema", "=", "schema", ".", "read", "(", ")", "message", "=", "re", ".", "compile", "(", "r'^\\(([^,]+),\\s*(\\d+)\\):\\s*$'", ")", "field", "=", "re", ".", "compile", "(", "r'^-\\s*([^:]+):\\s+(.+?)\\s*$'", ")", "registry", "=", "MessageRegistry", "(", "{", "}", ")", "messages", "=", "registry", ".", "messages", "curr", "=", "None", "names", "=", "None", "for", "lineno", ",", "line", "in", "enumerate", "(", "schema", ".", "split", "(", "'\\n'", ")", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "'#'", "in", "line", ":", "line", "=", "line", "[", ":", "line", ".", "index", "(", "'#'", ")", "]", "if", "line", "==", "''", ":", "continue", "f", "=", "field", ".", "match", "(", "line", ")", "if", "f", ":", "if", "curr", "is", "None", ":", "raise", "ParseError", "(", "'field definition outside of message at line %d'", "%", "lineno", ")", "name", "=", "f", ".", "group", "(", "1", ")", "type", "=", "f", ".", "group", "(", "2", ")", "if", "name", "not", "in", "names", ":", "f", "=", "Field", "(", "curr", ",", "name", ",", "type", ")", "curr", ".", "fields", ".", "append", "(", "f", ")", "names", ".", "add", "(", "name", ")", "continue", "else", ":", "raise", "ParseError", "(", "'duplicate field name \"%s\" at line %d'", "%", "(", "name", ",", "lineno", ")", ")", "m", "=", "message", ".", "match", "(", "line", ")", "if", "m", ":", "# new message definition", "name", ",", "vers", "=", "m", ".", "group", "(", "1", ")", ",", "int", "(", "m", ".", "group", "(", "2", ")", ")", "if", "(", "name", ",", "vers", ")", "in", "messages", ":", "raise", "ParseError", "(", "'Duplicate message (%s, %d)'", "%", "(", "name", ",", "vers", ")", ")", "curr", "=", "messages", "[", "(", "name", ",", "vers", ")", "]", "=", "Message", "(", "registry", ",", "name", ",", "vers", ",", "[", "]", ")", "names", "=", "set", "(", ")", "continue", "for", "message", "in", "registry", ".", "messages", ".", "values", "(", ")", ":", "message", ".", "fields", "=", "tuple", "(", "message", ".", "fields", ")", "return", "registry" ]
Parse `schema`, either a string or a file-like object, and return a :class:`MessageRegistry` with the loaded messages.
[ "Parse", "schema", "either", "a", "string", "or", "a", "file", "-", "like", "object", "and", "return", "a", ":", "class", ":", "MessageRegistry", "with", "the", "loaded", "messages", "." ]
python
train
31.45283
Infinidat/infi.clickhouse_orm
src/infi/clickhouse_orm/system_models.py
https://github.com/Infinidat/infi.clickhouse_orm/blob/595f2023e334e3925a5c3fbfdd6083a5992a7169/src/infi/clickhouse_orm/system_models.py#L109-L116
def fetch(self, zookeeper_path, settings=None): """ Download a partition from another server. :param zookeeper_path: Path in zookeeper to fetch from :param settings: Settings for executing request to ClickHouse over db.raw() method :return: SQL Query """ return self._partition_operation_sql('FETCH', settings=settings, from_part=zookeeper_path)
[ "def", "fetch", "(", "self", ",", "zookeeper_path", ",", "settings", "=", "None", ")", ":", "return", "self", ".", "_partition_operation_sql", "(", "'FETCH'", ",", "settings", "=", "settings", ",", "from_part", "=", "zookeeper_path", ")" ]
Download a partition from another server. :param zookeeper_path: Path in zookeeper to fetch from :param settings: Settings for executing request to ClickHouse over db.raw() method :return: SQL Query
[ "Download", "a", "partition", "from", "another", "server", ".", ":", "param", "zookeeper_path", ":", "Path", "in", "zookeeper", "to", "fetch", "from", ":", "param", "settings", ":", "Settings", "for", "executing", "request", "to", "ClickHouse", "over", "db", ".", "raw", "()", "method", ":", "return", ":", "SQL", "Query" ]
python
train
50.125
materialsproject/pymatgen
pymatgen/util/convergence.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/convergence.py#L285-L303
def measure(function, xs, ys, popt, weights): """ measure the quality of a fit """ m = 0 n = 0 for x in xs: try: if len(popt) == 2: m += (ys[n] - function(x, popt[0], popt[1]))**2 * weights[n] elif len(popt) == 3: m += (ys[n] - function(x, popt[0], popt[1], popt[2]))**2 * weights[n] else: raise NotImplementedError n += 1 except IndexError: raise RuntimeError('y does not exist for x = ', x, ' this should not happen') return m
[ "def", "measure", "(", "function", ",", "xs", ",", "ys", ",", "popt", ",", "weights", ")", ":", "m", "=", "0", "n", "=", "0", "for", "x", "in", "xs", ":", "try", ":", "if", "len", "(", "popt", ")", "==", "2", ":", "m", "+=", "(", "ys", "[", "n", "]", "-", "function", "(", "x", ",", "popt", "[", "0", "]", ",", "popt", "[", "1", "]", ")", ")", "**", "2", "*", "weights", "[", "n", "]", "elif", "len", "(", "popt", ")", "==", "3", ":", "m", "+=", "(", "ys", "[", "n", "]", "-", "function", "(", "x", ",", "popt", "[", "0", "]", ",", "popt", "[", "1", "]", ",", "popt", "[", "2", "]", ")", ")", "**", "2", "*", "weights", "[", "n", "]", "else", ":", "raise", "NotImplementedError", "n", "+=", "1", "except", "IndexError", ":", "raise", "RuntimeError", "(", "'y does not exist for x = '", ",", "x", ",", "' this should not happen'", ")", "return", "m" ]
measure the quality of a fit
[ "measure", "the", "quality", "of", "a", "fit" ]
python
train
29.631579
assamite/creamas
creamas/logging.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/logging.py#L115-L118
def get_file(self, attr_name): '''Return absolute path to logging file for obj's attribute.''' return os.path.abspath(os.path.join(self.folder, "{}.log" .format(attr_name)))
[ "def", "get_file", "(", "self", ",", "attr_name", ")", ":", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "folder", ",", "\"{}.log\"", ".", "format", "(", "attr_name", ")", ")", ")" ]
Return absolute path to logging file for obj's attribute.
[ "Return", "absolute", "path", "to", "logging", "file", "for", "obj", "s", "attribute", "." ]
python
train
57.5
mcs07/ChemDataExtractor
chemdataextractor/nlp/tokenize.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/nlp/tokenize.py#L573-L766
def _subspan(self, s, span, nextspan): """Recursively subdivide spans based on a series of rules.""" text = s[span[0]:span[1]] lowertext = text.lower() # Skip if only a single character or a split sequence if span[1] - span[0] < 2 or text in self.SPLIT or text in self.SPLIT_END_WORD or text in self.SPLIT_START_WORD or lowertext in self.NO_SPLIT: return [span] # Skip if it looks like URL if text.startswith('http://') or text.startswith('ftp://') or text.startswith('www.'): return [span] # Split full stop at end of final token (allow certain characters to follow) unless ellipsis if self.split_last_stop and nextspan is None and text not in self.NO_SPLIT_STOP and not text[-3:] == '...': if text[-1] == '.': return self._split_span(span, -1) ind = text.rfind('.') if ind > -1 and all(t in '\'‘’"“”)]}' for t in text[ind + 1:]): return self._split_span(span, ind, 1) # Split off certain sequences at the end of a token for spl in self.SPLIT_END: if text.endswith(spl) and len(text) > len(spl): return self._split_span(span, -len(spl), 0) # Split off certain sequences at the end of a word for spl in self.SPLIT_END_WORD: if text.endswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha(): return self._split_span(span, -len(spl), 0) # Split off certain sequences at the end of a word for spl in self.SPLIT_START_WORD: if text.startswith(spl) and len(text) > len(spl) and text[-len(spl) - 1].isalpha(): return self._split_span(span, len(spl), 0) # Split around certain sequences for spl in self.SPLIT: ind = text.find(spl) if ind > -1: return self._split_span(span, ind, len(spl)) # Split around certain sequences unless followed by a digit # - We skip this because of difficulty with chemical names. # for spl in self.SPLIT_NO_DIGIT: # ind = text.rfind(spl) # if ind > -1 and (len(text) <= ind + len(spl) or not text[ind + len(spl)].isdigit()): # return self._split_span(span, ind, len(spl)) # Split off certain sequences at the end of a token unless preceded by a digit for spl in self.SPLIT_END_NO_DIGIT: if text.endswith(spl) and len(text) > len(spl) and not text[-len(spl) - 1].isdigit(): return self._split_span(span, -len(spl), 0) # Regular Bracket at both start and end, break off both provided they correspond if text.startswith('(') and text.endswith(')') and self._closing_bracket_index(text) == len(text) - 1: return self._split_span(span, 1, len(text)-2) # Split things like IR(KBr) if text.startswith('IR(') and text.endswith(')'): return self._split_span(span, 2, 1) # Split things like \d+\.\d+([a-z]+) e.g. UV-vis/IR peaks with bracketed strength/shape m = re.match('^(\d+\.\d+|\d{3,})(\([a-z]+\))$', text, re.I) if m: return self._split_span(span, m.start(2), 1) # Split brackets off start and end if the corresponding bracket isn't within token for bpair in [('(', ')'), ('{', '}'), ('[', ']')]: #level = bracket_level(text, open=[bpair[0]], close=[bpair[1]]) # Bracket at start, bracketlevel > 0, break it off if text.startswith(bpair[0]) and self._closing_bracket_index(text, bpair=bpair) is None: return self._split_span(span, 1, 0) # Bracket at end, bracketlevel < 0, break it off if text.endswith(bpair[1]) and self._opening_bracket_index(text, bpair=bpair) is None: return self._split_span(span, -1, 0) # TODO: Consider splitting around comma in limited circumstances. Mainly to fix whitespace errors. # Characters to split around, but with exceptions for i, char in enumerate(text): before = text[:i] after = text[i+1:] if char in {':', ';'}: # Split around colon unless it looks like we're in a chemical name if not (before and after and after[0].isdigit() and before.rstrip('′\'')[-1:].isdigit() and '-' in after) and not (self.NO_SPLIT_CHEM.search(before) and self.NO_SPLIT_CHEM.search(after)): return self._split_span(span, i, 1) elif char in {'x', '+', '−'}: # Split around x, +, − (\u2212 minus) between two numbers or at start followed by numbers if (i == 0 or self._is_number(before)) and self._is_number(after): return self._split_span(span, i, 1) # Also plit around − (\u2212 minus) between two letters if char == '−' and before and before[-1].isalpha() and after and after[0].isalpha(): return self._split_span(span, i, 1) elif char == '±': # Split around ± unless surrounded by brackets if not (before and after and before[-1] == '(' and after[0] == ')'): return self._split_span(span, i, 1) elif char == '/': # Split around / unless '+/-' or '-/-' etc. if not (before and after and before[-1] in self.NO_SPLIT_SLASH and after[0] in self.NO_SPLIT_SLASH): return self._split_span(span, i, 1) elif char == '>': if not (before and before[-1] == '-'): # Split if preceding is not - return self._split_span(span, i, 1) if before and before[-1] == '-': # If preceding is -, split around -> unless in chemical name if not text == '->' and not self._is_saccharide_arrow(before[:-1], after): return self._split_span(span, i-1, 2) elif char is '→' and not self._is_saccharide_arrow(before, after): # TODO: 'is' should be '=='... this never splits!? # Split around → unless in chemical name return self._split_span(span, i, 1) elif char == '(' and self._is_number(before) and not '(' in after and not ')' in after: # Split around open bracket after a number return self._split_span(span, i, 1) elif char == '-': lowerbefore = lowertext[:i] lowerafter = lowertext[i+1:] # Always split on -of-the- -to- -in- -by- -of- -or- -and- -per- -the- if lowerafter[:7] == 'of-the-': return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 7), (span[0] + i + 7, span[0] + i + 8), (span[0] + i + 8, span[1])] if lowerafter[:5] in {'on-a-', 'of-a-'}: return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[0] + i + 6), (span[0] + i + 6, span[1])] if lowerafter[:3] in {'to-', 'in-', 'by-', 'of-', 'or-', 'on-'}: return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 3), (span[0] + i + 3, span[0] + i + 4), (span[0] + i + 4, span[1])] if lowerafter[:4] in {'and-', 'per-', 'the-'}: return [(span[0], span[0] + i), (span[0] + i, span[0] + i + 1), (span[0] + i + 1, span[0] + i + 4), (span[0] + i + 4, span[0] + i + 5), (span[0] + i + 5, span[1])] # By default we split on hyphens split = True if lowerafter == 'nmr': split = True # Always split NMR off end elif bracket_level(text) == 0 and (not bracket_level(after) == 0 or not bracket_level(before) == 0): split = False # Don't split if within brackets elif after and after[0] == '>': split = False # Don't split if followed by > elif lowerbefore in self.NO_SPLIT_PREFIX or lowerafter in self.NO_SPLIT_SUFFIX: split = False # Don't split if prefix or suffix in list elif self.NO_SPLIT_PREFIX_ENDING.search(lowerbefore): split = False # Don't split if prefix ends with pattern elif lowerafter in self.SPLIT_SUFFIX: split = True # Do split if suffix in list elif len(before) <= 1 or len(after) <= 2: split = False # Don't split if not at least 2 char before and 3 after elif self.NO_SPLIT_CHEM.search(lowerbefore) or self.NO_SPLIT_CHEM.search(lowerafter): split = False # Don't split if prefix or suffix match chem regex if split: return self._split_span(span, i, 1) # TODO: Errors: # [³H]-choline # S,S'-... # 1,4-di-substituted # 11-β - hydroxysteroid # Spelt out greek: 11beta - hydroxysteroid # ...-N-substituted like 2,5-dimethyl-N-substituted pyrroles # 4-(2-Butyl-6,7-dichloro-2-cyclopentyl-indan-1-on-5-yl) oxobutyric acid # Adenosine - monophosphate # Consistency for amino acids: Arg-Arg and Arg-Arg-Asp... probably always split # D,L-α-peptide? # N'-formylkynurenine # poly(D,L-lactic acid )? # poly(methyl metha-acrylate )? # Poly(N - alkyl Acrylamide ) # poly(N - isopropyl acrylamide ) # R,S - lorazepam # S,S - dioxide # Split units off the end of a numeric value quantity = self.QUANTITY_RE.search(text) if quantity: return self._split_span(span, len(quantity.group(6) or quantity.group(3) or quantity.group(2)), 0) # Split pH off the start of a numeric value if text.startswith('pH') and self._is_number(text[2:]): return self._split_span(span, 2, 0) # Split contraction words for contraction in self.CONTRACTIONS: if lowertext == contraction[0]: return self._split_span(span, contraction[1]) if nextspan: nexttext = s[nextspan[0]:nextspan[1]] # Split NMR isotope whitespace errors (joined with previous sentence full stop) if nexttext == 'NMR': ind = text.rfind('.') if ind > -1 and text[ind + 1:] in {'1H', '13C', '15N', '31P', '19F', '11B', '29Si', '170', '73Ge', '195Pt', '33S', '13C{1H}'}: return self._split_span(span, ind, 1) return [span]
[ "def", "_subspan", "(", "self", ",", "s", ",", "span", ",", "nextspan", ")", ":", "text", "=", "s", "[", "span", "[", "0", "]", ":", "span", "[", "1", "]", "]", "lowertext", "=", "text", ".", "lower", "(", ")", "# Skip if only a single character or a split sequence", "if", "span", "[", "1", "]", "-", "span", "[", "0", "]", "<", "2", "or", "text", "in", "self", ".", "SPLIT", "or", "text", "in", "self", ".", "SPLIT_END_WORD", "or", "text", "in", "self", ".", "SPLIT_START_WORD", "or", "lowertext", "in", "self", ".", "NO_SPLIT", ":", "return", "[", "span", "]", "# Skip if it looks like URL", "if", "text", ".", "startswith", "(", "'http://'", ")", "or", "text", ".", "startswith", "(", "'ftp://'", ")", "or", "text", ".", "startswith", "(", "'www.'", ")", ":", "return", "[", "span", "]", "# Split full stop at end of final token (allow certain characters to follow) unless ellipsis", "if", "self", ".", "split_last_stop", "and", "nextspan", "is", "None", "and", "text", "not", "in", "self", ".", "NO_SPLIT_STOP", "and", "not", "text", "[", "-", "3", ":", "]", "==", "'...'", ":", "if", "text", "[", "-", "1", "]", "==", "'.'", ":", "return", "self", ".", "_split_span", "(", "span", ",", "-", "1", ")", "ind", "=", "text", ".", "rfind", "(", "'.'", ")", "if", "ind", ">", "-", "1", "and", "all", "(", "t", "in", "'\\'‘’\"“”)]}' for t i", " te", "t", "in", " + 1", ":", "]):", "", "", "", "", "", "", "return", "self", ".", "_split_span", "(", "span", ",", "ind", ",", "1", ")", "# Split off certain sequences at the end of a token", "for", "spl", "in", "self", ".", "SPLIT_END", ":", "if", "text", ".", "endswith", "(", "spl", ")", "and", "len", "(", "text", ")", ">", "len", "(", "spl", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "-", "len", "(", "spl", ")", ",", "0", ")", "# Split off certain sequences at the end of a word", "for", "spl", "in", "self", ".", "SPLIT_END_WORD", ":", "if", "text", ".", "endswith", "(", "spl", ")", "and", "len", "(", "text", ")", ">", "len", "(", "spl", ")", "and", "text", "[", "-", "len", "(", "spl", ")", "-", "1", "]", ".", "isalpha", "(", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "-", "len", "(", "spl", ")", ",", "0", ")", "# Split off certain sequences at the end of a word", "for", "spl", "in", "self", ".", "SPLIT_START_WORD", ":", "if", "text", ".", "startswith", "(", "spl", ")", "and", "len", "(", "text", ")", ">", "len", "(", "spl", ")", "and", "text", "[", "-", "len", "(", "spl", ")", "-", "1", "]", ".", "isalpha", "(", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "len", "(", "spl", ")", ",", "0", ")", "# Split around certain sequences", "for", "spl", "in", "self", ".", "SPLIT", ":", "ind", "=", "text", ".", "find", "(", "spl", ")", "if", "ind", ">", "-", "1", ":", "return", "self", ".", "_split_span", "(", "span", ",", "ind", ",", "len", "(", "spl", ")", ")", "# Split around certain sequences unless followed by a digit", "# - We skip this because of difficulty with chemical names.", "# for spl in self.SPLIT_NO_DIGIT:", "# ind = text.rfind(spl)", "# if ind > -1 and (len(text) <= ind + len(spl) or not text[ind + len(spl)].isdigit()):", "# return self._split_span(span, ind, len(spl))", "# Split off certain sequences at the end of a token unless preceded by a digit", "for", "spl", "in", "self", ".", "SPLIT_END_NO_DIGIT", ":", "if", "text", ".", "endswith", "(", "spl", ")", "and", "len", "(", "text", ")", ">", "len", "(", "spl", ")", "and", "not", "text", "[", "-", "len", "(", "spl", ")", "-", "1", "]", ".", "isdigit", "(", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "-", "len", "(", "spl", ")", ",", "0", ")", "# Regular Bracket at both start and end, break off both provided they correspond", "if", "text", ".", "startswith", "(", "'('", ")", "and", "text", ".", "endswith", "(", "')'", ")", "and", "self", ".", "_closing_bracket_index", "(", "text", ")", "==", "len", "(", "text", ")", "-", "1", ":", "return", "self", ".", "_split_span", "(", "span", ",", "1", ",", "len", "(", "text", ")", "-", "2", ")", "# Split things like IR(KBr)", "if", "text", ".", "startswith", "(", "'IR('", ")", "and", "text", ".", "endswith", "(", "')'", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "2", ",", "1", ")", "# Split things like \\d+\\.\\d+([a-z]+) e.g. UV-vis/IR peaks with bracketed strength/shape", "m", "=", "re", ".", "match", "(", "'^(\\d+\\.\\d+|\\d{3,})(\\([a-z]+\\))$'", ",", "text", ",", "re", ".", "I", ")", "if", "m", ":", "return", "self", ".", "_split_span", "(", "span", ",", "m", ".", "start", "(", "2", ")", ",", "1", ")", "# Split brackets off start and end if the corresponding bracket isn't within token", "for", "bpair", "in", "[", "(", "'('", ",", "')'", ")", ",", "(", "'{'", ",", "'}'", ")", ",", "(", "'['", ",", "']'", ")", "]", ":", "#level = bracket_level(text, open=[bpair[0]], close=[bpair[1]])", "# Bracket at start, bracketlevel > 0, break it off", "if", "text", ".", "startswith", "(", "bpair", "[", "0", "]", ")", "and", "self", ".", "_closing_bracket_index", "(", "text", ",", "bpair", "=", "bpair", ")", "is", "None", ":", "return", "self", ".", "_split_span", "(", "span", ",", "1", ",", "0", ")", "# Bracket at end, bracketlevel < 0, break it off", "if", "text", ".", "endswith", "(", "bpair", "[", "1", "]", ")", "and", "self", ".", "_opening_bracket_index", "(", "text", ",", "bpair", "=", "bpair", ")", "is", "None", ":", "return", "self", ".", "_split_span", "(", "span", ",", "-", "1", ",", "0", ")", "# TODO: Consider splitting around comma in limited circumstances. Mainly to fix whitespace errors.", "# Characters to split around, but with exceptions", "for", "i", ",", "char", "in", "enumerate", "(", "text", ")", ":", "before", "=", "text", "[", ":", "i", "]", "after", "=", "text", "[", "i", "+", "1", ":", "]", "if", "char", "in", "{", "':'", ",", "';'", "}", ":", "# Split around colon unless it looks like we're in a chemical name", "if", "not", "(", "before", "and", "after", "and", "after", "[", "0", "]", ".", "isdigit", "(", ")", "and", "before", ".", "rstrip", "(", "'′\\'')[", "-", "1", ":", "]", ".", "i", "s", "digit()", " ", "a", "d '", "' i", " a", "ter) ", "a", "d n", "t (", "e", "lf.N", "O", "_SPLIT_CHEM.s", "e", "arch(b", "e", "fore) ", "a", "d s", "lf.N", "O", "_SPLIT_CHEM.s", "e", "arch(a", "f", "ter))", ":", "", "", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "elif", "char", "in", "{", "'x'", ",", "'+'", ",", "'−'}:", "", "", "# Split around x, +, − (\\u2212 minus) between two numbers or at start followed by numbers", "if", "(", "i", "==", "0", "or", "self", ".", "_is_number", "(", "before", ")", ")", "and", "self", ".", "_is_number", "(", "after", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "# Also plit around − (\\u2212 minus) between two letters", "if", "char", "==", "'−' a", "d b", "fore a", "d b", "fore[-", "1", "]", ".", "i", "s", "alpha()", " ", "a", "d a", "ter a", "d a", "ter[0", "]", ".", "i", "s", "alpha()", ":", "", "", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "elif", "char", "==", "'±':", "", "# Split around ± unless surrounded by brackets", "if", "not", "(", "before", "and", "after", "and", "before", "[", "-", "1", "]", "==", "'('", "and", "after", "[", "0", "]", "==", "')'", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "elif", "char", "==", "'/'", ":", "# Split around / unless '+/-' or '-/-' etc.", "if", "not", "(", "before", "and", "after", "and", "before", "[", "-", "1", "]", "in", "self", ".", "NO_SPLIT_SLASH", "and", "after", "[", "0", "]", "in", "self", ".", "NO_SPLIT_SLASH", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "elif", "char", "==", "'>'", ":", "if", "not", "(", "before", "and", "before", "[", "-", "1", "]", "==", "'-'", ")", ":", "# Split if preceding is not -", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "if", "before", "and", "before", "[", "-", "1", "]", "==", "'-'", ":", "# If preceding is -, split around -> unless in chemical name", "if", "not", "text", "==", "'->'", "and", "not", "self", ".", "_is_saccharide_arrow", "(", "before", "[", ":", "-", "1", "]", ",", "after", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "i", "-", "1", ",", "2", ")", "elif", "char", "is", "'→' a", "d n", "t s", "lf._", "i", "s_saccharide_arrow(b", "e", "fore, ", "a", "ter):", "", "", "# TODO: 'is' should be '=='... this never splits!?", "# Split around → unless in chemical name", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "elif", "char", "==", "'('", "and", "self", ".", "_is_number", "(", "before", ")", "and", "not", "'('", "in", "after", "and", "not", "')'", "in", "after", ":", "# Split around open bracket after a number", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "elif", "char", "==", "'-'", ":", "lowerbefore", "=", "lowertext", "[", ":", "i", "]", "lowerafter", "=", "lowertext", "[", "i", "+", "1", ":", "]", "# Always split on -of-the- -to- -in- -by- -of- -or- -and- -per- -the-", "if", "lowerafter", "[", ":", "7", "]", "==", "'of-the-'", ":", "return", "[", "(", "span", "[", "0", "]", ",", "span", "[", "0", "]", "+", "i", ")", ",", "(", "span", "[", "0", "]", "+", "i", ",", "span", "[", "0", "]", "+", "i", "+", "1", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "1", ",", "span", "[", "0", "]", "+", "i", "+", "3", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "3", ",", "span", "[", "0", "]", "+", "i", "+", "4", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "4", ",", "span", "[", "0", "]", "+", "i", "+", "7", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "7", ",", "span", "[", "0", "]", "+", "i", "+", "8", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "8", ",", "span", "[", "1", "]", ")", "]", "if", "lowerafter", "[", ":", "5", "]", "in", "{", "'on-a-'", ",", "'of-a-'", "}", ":", "return", "[", "(", "span", "[", "0", "]", ",", "span", "[", "0", "]", "+", "i", ")", ",", "(", "span", "[", "0", "]", "+", "i", ",", "span", "[", "0", "]", "+", "i", "+", "1", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "1", ",", "span", "[", "0", "]", "+", "i", "+", "3", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "3", ",", "span", "[", "0", "]", "+", "i", "+", "4", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "4", ",", "span", "[", "0", "]", "+", "i", "+", "5", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "5", ",", "span", "[", "0", "]", "+", "i", "+", "6", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "6", ",", "span", "[", "1", "]", ")", "]", "if", "lowerafter", "[", ":", "3", "]", "in", "{", "'to-'", ",", "'in-'", ",", "'by-'", ",", "'of-'", ",", "'or-'", ",", "'on-'", "}", ":", "return", "[", "(", "span", "[", "0", "]", ",", "span", "[", "0", "]", "+", "i", ")", ",", "(", "span", "[", "0", "]", "+", "i", ",", "span", "[", "0", "]", "+", "i", "+", "1", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "1", ",", "span", "[", "0", "]", "+", "i", "+", "3", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "3", ",", "span", "[", "0", "]", "+", "i", "+", "4", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "4", ",", "span", "[", "1", "]", ")", "]", "if", "lowerafter", "[", ":", "4", "]", "in", "{", "'and-'", ",", "'per-'", ",", "'the-'", "}", ":", "return", "[", "(", "span", "[", "0", "]", ",", "span", "[", "0", "]", "+", "i", ")", ",", "(", "span", "[", "0", "]", "+", "i", ",", "span", "[", "0", "]", "+", "i", "+", "1", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "1", ",", "span", "[", "0", "]", "+", "i", "+", "4", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "4", ",", "span", "[", "0", "]", "+", "i", "+", "5", ")", ",", "(", "span", "[", "0", "]", "+", "i", "+", "5", ",", "span", "[", "1", "]", ")", "]", "# By default we split on hyphens", "split", "=", "True", "if", "lowerafter", "==", "'nmr'", ":", "split", "=", "True", "# Always split NMR off end", "elif", "bracket_level", "(", "text", ")", "==", "0", "and", "(", "not", "bracket_level", "(", "after", ")", "==", "0", "or", "not", "bracket_level", "(", "before", ")", "==", "0", ")", ":", "split", "=", "False", "# Don't split if within brackets", "elif", "after", "and", "after", "[", "0", "]", "==", "'>'", ":", "split", "=", "False", "# Don't split if followed by >", "elif", "lowerbefore", "in", "self", ".", "NO_SPLIT_PREFIX", "or", "lowerafter", "in", "self", ".", "NO_SPLIT_SUFFIX", ":", "split", "=", "False", "# Don't split if prefix or suffix in list", "elif", "self", ".", "NO_SPLIT_PREFIX_ENDING", ".", "search", "(", "lowerbefore", ")", ":", "split", "=", "False", "# Don't split if prefix ends with pattern", "elif", "lowerafter", "in", "self", ".", "SPLIT_SUFFIX", ":", "split", "=", "True", "# Do split if suffix in list", "elif", "len", "(", "before", ")", "<=", "1", "or", "len", "(", "after", ")", "<=", "2", ":", "split", "=", "False", "# Don't split if not at least 2 char before and 3 after", "elif", "self", ".", "NO_SPLIT_CHEM", ".", "search", "(", "lowerbefore", ")", "or", "self", ".", "NO_SPLIT_CHEM", ".", "search", "(", "lowerafter", ")", ":", "split", "=", "False", "# Don't split if prefix or suffix match chem regex", "if", "split", ":", "return", "self", ".", "_split_span", "(", "span", ",", "i", ",", "1", ")", "# TODO: Errors:", "# [³H]-choline", "# S,S'-...", "# 1,4-di-substituted", "# 11-β - hydroxysteroid", "# Spelt out greek: 11beta - hydroxysteroid", "# ...-N-substituted like 2,5-dimethyl-N-substituted pyrroles", "# 4-(2-Butyl-6,7-dichloro-2-cyclopentyl-indan-1-on-5-yl) oxobutyric acid", "# Adenosine - monophosphate", "# Consistency for amino acids: Arg-Arg and Arg-Arg-Asp... probably always split", "# D,L-α-peptide?", "# N'-formylkynurenine", "# poly(D,L-lactic acid )?", "# poly(methyl metha-acrylate )?", "# Poly(N - alkyl Acrylamide )", "# poly(N - isopropyl acrylamide )", "# R,S - lorazepam", "# S,S - dioxide", "# Split units off the end of a numeric value", "quantity", "=", "self", ".", "QUANTITY_RE", ".", "search", "(", "text", ")", "if", "quantity", ":", "return", "self", ".", "_split_span", "(", "span", ",", "len", "(", "quantity", ".", "group", "(", "6", ")", "or", "quantity", ".", "group", "(", "3", ")", "or", "quantity", ".", "group", "(", "2", ")", ")", ",", "0", ")", "# Split pH off the start of a numeric value", "if", "text", ".", "startswith", "(", "'pH'", ")", "and", "self", ".", "_is_number", "(", "text", "[", "2", ":", "]", ")", ":", "return", "self", ".", "_split_span", "(", "span", ",", "2", ",", "0", ")", "# Split contraction words", "for", "contraction", "in", "self", ".", "CONTRACTIONS", ":", "if", "lowertext", "==", "contraction", "[", "0", "]", ":", "return", "self", ".", "_split_span", "(", "span", ",", "contraction", "[", "1", "]", ")", "if", "nextspan", ":", "nexttext", "=", "s", "[", "nextspan", "[", "0", "]", ":", "nextspan", "[", "1", "]", "]", "# Split NMR isotope whitespace errors (joined with previous sentence full stop)", "if", "nexttext", "==", "'NMR'", ":", "ind", "=", "text", ".", "rfind", "(", "'.'", ")", "if", "ind", ">", "-", "1", "and", "text", "[", "ind", "+", "1", ":", "]", "in", "{", "'1H'", ",", "'13C'", ",", "'15N'", ",", "'31P'", ",", "'19F'", ",", "'11B'", ",", "'29Si'", ",", "'170'", ",", "'73Ge'", ",", "'195Pt'", ",", "'33S'", ",", "'13C{1H}'", "}", ":", "return", "self", ".", "_split_span", "(", "span", ",", "ind", ",", "1", ")", "return", "[", "span", "]" ]
Recursively subdivide spans based on a series of rules.
[ "Recursively", "subdivide", "spans", "based", "on", "a", "series", "of", "rules", "." ]
python
train
56.180412
MolSSI-BSE/basis_set_exchange
basis_set_exchange/cli/bsecurate_handlers.py
https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/cli/bsecurate_handlers.py#L28-L41
def _bsecurate_cli_component_file_refs(args): '''Handles the component-file-refs subcommand''' data = curate.component_file_refs(args.files) s = '' for cfile, cdata in data.items(): s += cfile + '\n' rows = [] for el, refs in cdata: rows.append((' ' + el, ' '.join(refs))) s += '\n'.join(format_columns(rows)) + '\n\n' return s
[ "def", "_bsecurate_cli_component_file_refs", "(", "args", ")", ":", "data", "=", "curate", ".", "component_file_refs", "(", "args", ".", "files", ")", "s", "=", "''", "for", "cfile", ",", "cdata", "in", "data", ".", "items", "(", ")", ":", "s", "+=", "cfile", "+", "'\\n'", "rows", "=", "[", "]", "for", "el", ",", "refs", "in", "cdata", ":", "rows", ".", "append", "(", "(", "' '", "+", "el", ",", "' '", ".", "join", "(", "refs", ")", ")", ")", "s", "+=", "'\\n'", ".", "join", "(", "format_columns", "(", "rows", ")", ")", "+", "'\\n\\n'", "return", "s" ]
Handles the component-file-refs subcommand
[ "Handles", "the", "component", "-", "file", "-", "refs", "subcommand" ]
python
train
27.428571
indico/indico-plugins
livesync/indico_livesync/cli.py
https://github.com/indico/indico-plugins/blob/fe50085cc63be9b8161b09539e662e7b04e4b38e/livesync/indico_livesync/cli.py#L37-L41
def available_backends(): """Lists the currently available backend types""" print 'The following LiveSync agents are available:' for name, backend in current_plugin.backend_classes.iteritems(): print cformat(' - %{white!}{}%{reset}: {} ({})').format(name, backend.title, backend.description)
[ "def", "available_backends", "(", ")", ":", "print", "'The following LiveSync agents are available:'", "for", "name", ",", "backend", "in", "current_plugin", ".", "backend_classes", ".", "iteritems", "(", ")", ":", "print", "cformat", "(", "' - %{white!}{}%{reset}: {} ({})'", ")", ".", "format", "(", "name", ",", "backend", ".", "title", ",", "backend", ".", "description", ")" ]
Lists the currently available backend types
[ "Lists", "the", "currently", "available", "backend", "types" ]
python
train
61.6
UCL-INGI/INGInious
inginious/frontend/plugins/simple_grader/__init__.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/plugins/simple_grader/__init__.py#L17-L116
def init(plugin_manager, course_factory, client, config): """ Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database. Available configuration: :: plugins: - plugin_module: inginious.frontend.plugins.simple_grader courseid : "external" page_pattern: "/external" return_fields: "^(result|text|problems)$" The grader will only return fields that are in the job return dict if their key match return_fields. Different types of request are available : see documentation """ courseid = config.get('courseid', 'external') course = course_factory.get_course(courseid) page_pattern = config.get('page_pattern', '/external') return_fields = re.compile(config.get('return_fields', '^(result|text|problems)$')) client_buffer = ClientBuffer(client) client_sync = ClientSync(client) class ExternalGrader(INGIniousPage): """ Manages job from outside, using the default input """ def GET(self): """ GET request """ return """ <!DOCTYPE html> <html> <head> <title>External grade POST test</title> </head> <body> <form method="post"> <textarea style="width:100%; height:400px;" name="input">{"question1":"print 'Hello World!'"}</textarea><br/> <input type="text" name="taskid" value="helloworld"/> (taskid)<br/> <input type="checkbox" name="async"/> async?<br/> <input type="submit"/> </form> </body> </html>""" def keep_only_config_return_values(self, job_return): """ Keep only some useful return values """ return {key: value for key, value in job_return.items() if return_fields.match(key)} def POST(self): """ POST request """ web.header('Access-Control-Allow-Origin', '*') web.header('Content-Type', 'application/json') post_input = web.input() if "input" in post_input and "taskid" in post_input: # New job try: task_input = json.loads(post_input.input) except: return json.dumps({"status": "error", "status_message": "Cannot decode input"}) try: task = course.get_task(post_input.taskid) except: return json.dumps({"status": "error", "status_message": "Cannot open task"}) if not task.input_is_consistent(task_input, self.default_allowed_file_extensions, self.default_max_file_size): return json.dumps({"status": "error", "status_message": "Input is not consistent with the task"}) if post_input.get("async") is None: # New sync job try: result, grade, problems, tests, custom, state, archive, stdout, stderr = client_sync.new_job(task, task_input, "Plugin - Simple Grader") job_return = {"result":result, "grade": grade, "problems": problems, "tests": tests, "custom": custom, "state": state, "archive": archive, "stdout": stdout, "stderr": stderr} except: return json.dumps({"status": "error", "status_message": "An internal error occurred"}) return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items()))) else: # New async job jobid = client_buffer.new_job(task, task_input, "Plugin - Simple Grader") return json.dumps({"status": "done", "jobid": str(jobid)}) elif "jobid" in post_input: # Get status of async job if client_buffer.is_waiting(post_input["jobid"]): return json.dumps({"status": "waiting"}) elif client_buffer.is_done(post_input["jobid"]): result, grade, problems, tests, custom, state, archive, stdout, stderr = client_buffer.get_result(post_input["jobid"]) job_return = {"result": result, "grade": grade, "problems": problems, "tests": tests, "custom": custom, "archive": archive, "stdout": stdout, "stderr": stderr} return json.dumps(dict(list({"status": "done"}.items()) + list(self.keep_only_config_return_values(job_return).items()))) else: return json.dumps({"status": "error", "status_message": "There is no job with jobid {}".format(post_input["jobid"])}) else: return json.dumps({"status": "error", "status_message": "Unknown request type"}) plugin_manager.add_page(page_pattern, ExternalGrader)
[ "def", "init", "(", "plugin_manager", ",", "course_factory", ",", "client", ",", "config", ")", ":", "courseid", "=", "config", ".", "get", "(", "'courseid'", ",", "'external'", ")", "course", "=", "course_factory", ".", "get_course", "(", "courseid", ")", "page_pattern", "=", "config", ".", "get", "(", "'page_pattern'", ",", "'/external'", ")", "return_fields", "=", "re", ".", "compile", "(", "config", ".", "get", "(", "'return_fields'", ",", "'^(result|text|problems)$'", ")", ")", "client_buffer", "=", "ClientBuffer", "(", "client", ")", "client_sync", "=", "ClientSync", "(", "client", ")", "class", "ExternalGrader", "(", "INGIniousPage", ")", ":", "\"\"\" Manages job from outside, using the default input \"\"\"", "def", "GET", "(", "self", ")", ":", "\"\"\" GET request \"\"\"", "return", "\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>External grade POST test</title>\n </head>\n <body>\n <form method=\"post\">\n <textarea style=\"width:100%; height:400px;\" name=\"input\">{\"question1\":\"print 'Hello World!'\"}</textarea><br/>\n <input type=\"text\" name=\"taskid\" value=\"helloworld\"/> (taskid)<br/>\n <input type=\"checkbox\" name=\"async\"/> async?<br/>\n <input type=\"submit\"/>\n </form>\n </body>\n </html>\"\"\"", "def", "keep_only_config_return_values", "(", "self", ",", "job_return", ")", ":", "\"\"\" Keep only some useful return values \"\"\"", "return", "{", "key", ":", "value", "for", "key", ",", "value", "in", "job_return", ".", "items", "(", ")", "if", "return_fields", ".", "match", "(", "key", ")", "}", "def", "POST", "(", "self", ")", ":", "\"\"\" POST request \"\"\"", "web", ".", "header", "(", "'Access-Control-Allow-Origin'", ",", "'*'", ")", "web", ".", "header", "(", "'Content-Type'", ",", "'application/json'", ")", "post_input", "=", "web", ".", "input", "(", ")", "if", "\"input\"", "in", "post_input", "and", "\"taskid\"", "in", "post_input", ":", "# New job", "try", ":", "task_input", "=", "json", ".", "loads", "(", "post_input", ".", "input", ")", "except", ":", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"error\"", ",", "\"status_message\"", ":", "\"Cannot decode input\"", "}", ")", "try", ":", "task", "=", "course", ".", "get_task", "(", "post_input", ".", "taskid", ")", "except", ":", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"error\"", ",", "\"status_message\"", ":", "\"Cannot open task\"", "}", ")", "if", "not", "task", ".", "input_is_consistent", "(", "task_input", ",", "self", ".", "default_allowed_file_extensions", ",", "self", ".", "default_max_file_size", ")", ":", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"error\"", ",", "\"status_message\"", ":", "\"Input is not consistent with the task\"", "}", ")", "if", "post_input", ".", "get", "(", "\"async\"", ")", "is", "None", ":", "# New sync job", "try", ":", "result", ",", "grade", ",", "problems", ",", "tests", ",", "custom", ",", "state", ",", "archive", ",", "stdout", ",", "stderr", "=", "client_sync", ".", "new_job", "(", "task", ",", "task_input", ",", "\"Plugin - Simple Grader\"", ")", "job_return", "=", "{", "\"result\"", ":", "result", ",", "\"grade\"", ":", "grade", ",", "\"problems\"", ":", "problems", ",", "\"tests\"", ":", "tests", ",", "\"custom\"", ":", "custom", ",", "\"state\"", ":", "state", ",", "\"archive\"", ":", "archive", ",", "\"stdout\"", ":", "stdout", ",", "\"stderr\"", ":", "stderr", "}", "except", ":", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"error\"", ",", "\"status_message\"", ":", "\"An internal error occurred\"", "}", ")", "return", "json", ".", "dumps", "(", "dict", "(", "list", "(", "{", "\"status\"", ":", "\"done\"", "}", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "keep_only_config_return_values", "(", "job_return", ")", ".", "items", "(", ")", ")", ")", ")", "else", ":", "# New async job", "jobid", "=", "client_buffer", ".", "new_job", "(", "task", ",", "task_input", ",", "\"Plugin - Simple Grader\"", ")", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"done\"", ",", "\"jobid\"", ":", "str", "(", "jobid", ")", "}", ")", "elif", "\"jobid\"", "in", "post_input", ":", "# Get status of async job", "if", "client_buffer", ".", "is_waiting", "(", "post_input", "[", "\"jobid\"", "]", ")", ":", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"waiting\"", "}", ")", "elif", "client_buffer", ".", "is_done", "(", "post_input", "[", "\"jobid\"", "]", ")", ":", "result", ",", "grade", ",", "problems", ",", "tests", ",", "custom", ",", "state", ",", "archive", ",", "stdout", ",", "stderr", "=", "client_buffer", ".", "get_result", "(", "post_input", "[", "\"jobid\"", "]", ")", "job_return", "=", "{", "\"result\"", ":", "result", ",", "\"grade\"", ":", "grade", ",", "\"problems\"", ":", "problems", ",", "\"tests\"", ":", "tests", ",", "\"custom\"", ":", "custom", ",", "\"archive\"", ":", "archive", ",", "\"stdout\"", ":", "stdout", ",", "\"stderr\"", ":", "stderr", "}", "return", "json", ".", "dumps", "(", "dict", "(", "list", "(", "{", "\"status\"", ":", "\"done\"", "}", ".", "items", "(", ")", ")", "+", "list", "(", "self", ".", "keep_only_config_return_values", "(", "job_return", ")", ".", "items", "(", ")", ")", ")", ")", "else", ":", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"error\"", ",", "\"status_message\"", ":", "\"There is no job with jobid {}\"", ".", "format", "(", "post_input", "[", "\"jobid\"", "]", ")", "}", ")", "else", ":", "return", "json", ".", "dumps", "(", "{", "\"status\"", ":", "\"error\"", ",", "\"status_message\"", ":", "\"Unknown request type\"", "}", ")", "plugin_manager", ".", "add_page", "(", "page_pattern", ",", "ExternalGrader", ")" ]
Init the external grader plugin. This simple grader allows only anonymous requests, and submissions are not stored in database. Available configuration: :: plugins: - plugin_module: inginious.frontend.plugins.simple_grader courseid : "external" page_pattern: "/external" return_fields: "^(result|text|problems)$" The grader will only return fields that are in the job return dict if their key match return_fields. Different types of request are available : see documentation
[ "Init", "the", "external", "grader", "plugin", ".", "This", "simple", "grader", "allows", "only", "anonymous", "requests", "and", "submissions", "are", "not", "stored", "in", "database", "." ]
python
train
49.96
apache/airflow
airflow/contrib/hooks/gcp_vision_hook.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L271-L294
def update_product( self, product, location=None, product_id=None, update_mask=None, project_id=None, retry=None, timeout=None, metadata=None, ): """ For the documentation see: :class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator` """ client = self.get_conn() product = self.product_name_determiner.get_entity_with_name(product, product_id, location, project_id) self.log.info('Updating ProductSet: %s', product.name) response = client.update_product( product=product, update_mask=update_mask, retry=retry, timeout=timeout, metadata=metadata ) self.log.info('Product updated: %s', response.name if response else '') self.log.debug('Product updated:\n%s', response) return MessageToDict(response)
[ "def", "update_product", "(", "self", ",", "product", ",", "location", "=", "None", ",", "product_id", "=", "None", ",", "update_mask", "=", "None", ",", "project_id", "=", "None", ",", "retry", "=", "None", ",", "timeout", "=", "None", ",", "metadata", "=", "None", ",", ")", ":", "client", "=", "self", ".", "get_conn", "(", ")", "product", "=", "self", ".", "product_name_determiner", ".", "get_entity_with_name", "(", "product", ",", "product_id", ",", "location", ",", "project_id", ")", "self", ".", "log", ".", "info", "(", "'Updating ProductSet: %s'", ",", "product", ".", "name", ")", "response", "=", "client", ".", "update_product", "(", "product", "=", "product", ",", "update_mask", "=", "update_mask", ",", "retry", "=", "retry", ",", "timeout", "=", "timeout", ",", "metadata", "=", "metadata", ")", "self", ".", "log", ".", "info", "(", "'Product updated: %s'", ",", "response", ".", "name", "if", "response", "else", "''", ")", "self", ".", "log", ".", "debug", "(", "'Product updated:\\n%s'", ",", "response", ")", "return", "MessageToDict", "(", "response", ")" ]
For the documentation see: :class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionProductUpdateOperator`
[ "For", "the", "documentation", "see", ":", ":", "class", ":", "~airflow", ".", "contrib", ".", "operators", ".", "gcp_vision_operator", ".", "CloudVisionProductUpdateOperator" ]
python
test
37.125
Mindwerks/worldengine
worldengine/basic_map_operations.py
https://github.com/Mindwerks/worldengine/blob/64dff8eb7824ce46b5b6cb8006bcef21822ef144/worldengine/basic_map_operations.py#L10-L24
def index_of_nearest(p, hot_points, distance_f=distance): """Given a point and a set of hot points it found the hot point nearest to the given point. An arbitrary distance function can be specified :return the index of the nearest hot points, or None if the list of hot points is empty """ min_dist = None nearest_hp_i = None for i, hp in enumerate(hot_points): dist = distance_f(p, hp) if min_dist is None or dist < min_dist: min_dist = dist nearest_hp_i = i return nearest_hp_i
[ "def", "index_of_nearest", "(", "p", ",", "hot_points", ",", "distance_f", "=", "distance", ")", ":", "min_dist", "=", "None", "nearest_hp_i", "=", "None", "for", "i", ",", "hp", "in", "enumerate", "(", "hot_points", ")", ":", "dist", "=", "distance_f", "(", "p", ",", "hp", ")", "if", "min_dist", "is", "None", "or", "dist", "<", "min_dist", ":", "min_dist", "=", "dist", "nearest_hp_i", "=", "i", "return", "nearest_hp_i" ]
Given a point and a set of hot points it found the hot point nearest to the given point. An arbitrary distance function can be specified :return the index of the nearest hot points, or None if the list of hot points is empty
[ "Given", "a", "point", "and", "a", "set", "of", "hot", "points", "it", "found", "the", "hot", "point", "nearest", "to", "the", "given", "point", ".", "An", "arbitrary", "distance", "function", "can", "be", "specified", ":", "return", "the", "index", "of", "the", "nearest", "hot", "points", "or", "None", "if", "the", "list", "of", "hot", "points", "is", "empty" ]
python
train
36.866667
kislyuk/aegea
aegea/packages/github3/github.py
https://github.com/kislyuk/aegea/blob/94957e9dba036eae3052e2662c208b259c08399a/aegea/packages/github3/github.py#L896-L932
def markdown(self, text, mode='', context='', raw=False): """Render an arbitrary markdown document. :param str text: (required), the text of the document to render :param str mode: (optional), 'markdown' or 'gfm' :param str context: (optional), only important when using mode 'gfm', this is the repository to use as the context for the rendering :param bool raw: (optional), renders a document like a README.md, no gfm, no context :returns: str -- HTML formatted text """ data = None json = False headers = {} if raw: url = self._build_url('markdown', 'raw') data = text headers['content-type'] = 'text/plain' else: url = self._build_url('markdown') data = {} if text: data['text'] = text if mode in ('markdown', 'gfm'): data['mode'] = mode if context: data['context'] = context json = True if data: req = self._post(url, data=data, json=json, headers=headers) if req.ok: return req.content return ''
[ "def", "markdown", "(", "self", ",", "text", ",", "mode", "=", "''", ",", "context", "=", "''", ",", "raw", "=", "False", ")", ":", "data", "=", "None", "json", "=", "False", "headers", "=", "{", "}", "if", "raw", ":", "url", "=", "self", ".", "_build_url", "(", "'markdown'", ",", "'raw'", ")", "data", "=", "text", "headers", "[", "'content-type'", "]", "=", "'text/plain'", "else", ":", "url", "=", "self", ".", "_build_url", "(", "'markdown'", ")", "data", "=", "{", "}", "if", "text", ":", "data", "[", "'text'", "]", "=", "text", "if", "mode", "in", "(", "'markdown'", ",", "'gfm'", ")", ":", "data", "[", "'mode'", "]", "=", "mode", "if", "context", ":", "data", "[", "'context'", "]", "=", "context", "json", "=", "True", "if", "data", ":", "req", "=", "self", ".", "_post", "(", "url", ",", "data", "=", "data", ",", "json", "=", "json", ",", "headers", "=", "headers", ")", "if", "req", ".", "ok", ":", "return", "req", ".", "content", "return", "''" ]
Render an arbitrary markdown document. :param str text: (required), the text of the document to render :param str mode: (optional), 'markdown' or 'gfm' :param str context: (optional), only important when using mode 'gfm', this is the repository to use as the context for the rendering :param bool raw: (optional), renders a document like a README.md, no gfm, no context :returns: str -- HTML formatted text
[ "Render", "an", "arbitrary", "markdown", "document", "." ]
python
train
32.459459
oanda/v20-python
src/v20/primitives.py
https://github.com/oanda/v20-python/blob/f28192f4a31bce038cf6dfa302f5878bec192fe5/src/v20/primitives.py#L205-L230
def from_dict(data, ctx): """ Instantiate a new InstrumentCommission from a dict (generally from loading a JSON response). The data used to instantiate the InstrumentCommission is a shallow copy of the dict passed in, with any complex child types instantiated appropriately. """ data = data.copy() if data.get('commission') is not None: data['commission'] = ctx.convert_decimal_number( data.get('commission') ) if data.get('unitsTraded') is not None: data['unitsTraded'] = ctx.convert_decimal_number( data.get('unitsTraded') ) if data.get('minimumCommission') is not None: data['minimumCommission'] = ctx.convert_decimal_number( data.get('minimumCommission') ) return InstrumentCommission(**data)
[ "def", "from_dict", "(", "data", ",", "ctx", ")", ":", "data", "=", "data", ".", "copy", "(", ")", "if", "data", ".", "get", "(", "'commission'", ")", "is", "not", "None", ":", "data", "[", "'commission'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'commission'", ")", ")", "if", "data", ".", "get", "(", "'unitsTraded'", ")", "is", "not", "None", ":", "data", "[", "'unitsTraded'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'unitsTraded'", ")", ")", "if", "data", ".", "get", "(", "'minimumCommission'", ")", "is", "not", "None", ":", "data", "[", "'minimumCommission'", "]", "=", "ctx", ".", "convert_decimal_number", "(", "data", ".", "get", "(", "'minimumCommission'", ")", ")", "return", "InstrumentCommission", "(", "*", "*", "data", ")" ]
Instantiate a new InstrumentCommission from a dict (generally from loading a JSON response). The data used to instantiate the InstrumentCommission is a shallow copy of the dict passed in, with any complex child types instantiated appropriately.
[ "Instantiate", "a", "new", "InstrumentCommission", "from", "a", "dict", "(", "generally", "from", "loading", "a", "JSON", "response", ")", ".", "The", "data", "used", "to", "instantiate", "the", "InstrumentCommission", "is", "a", "shallow", "copy", "of", "the", "dict", "passed", "in", "with", "any", "complex", "child", "types", "instantiated", "appropriately", "." ]
python
train
34
mozilla/python_moztelemetry
moztelemetry/parse_scalars.py
https://github.com/mozilla/python_moztelemetry/blob/09ddf1ec7d953a4308dfdcb0ed968f27bd5921bb/moztelemetry/parse_scalars.py#L79-L156
def validate_types(self, definition): """This function performs some basic sanity checks on the scalar definition: - Checks that all the required fields are available. - Checks that all the fields have the expected types. :param definition: the dictionary containing the scalar properties. :raises ParserError: if a scalar definition field is of the wrong type. :raises ParserError: if a required field is missing or unknown fields are present. """ if not self._strict_type_checks: return # The required and optional fields in a scalar type definition. REQUIRED_FIELDS = { 'bug_numbers': list, # This contains ints. See LIST_FIELDS_CONTENT. 'description': string_types, 'expires': string_types, 'kind': string_types, 'notification_emails': list, # This contains strings. See LIST_FIELDS_CONTENT. 'record_in_processes': list, } OPTIONAL_FIELDS = { 'cpp_guard': string_types, 'release_channel_collection': string_types, 'keyed': bool, } # The types for the data within the fields that hold lists. LIST_FIELDS_CONTENT = { 'bug_numbers': int, 'notification_emails': string_types, 'record_in_processes': string_types, } # Concatenate the required and optional field definitions. ALL_FIELDS = REQUIRED_FIELDS.copy() ALL_FIELDS.update(OPTIONAL_FIELDS) # Checks that all the required fields are available. missing_fields = [f for f in REQUIRED_FIELDS.keys() if f not in definition] if len(missing_fields) > 0: raise ParserError(self._name + ' - missing required fields: ' + ', '.join(missing_fields) + '.\nSee: {}#required-fields'.format(BASE_DOC_URL)) # Do we have any unknown field? unknown_fields = [f for f in definition.keys() if f not in ALL_FIELDS] if len(unknown_fields) > 0: raise ParserError(self._name + ' - unknown fields: ' + ', '.join(unknown_fields) + '.\nSee: {}#required-fields'.format(BASE_DOC_URL)) # Checks the type for all the fields. wrong_type_names = ['{} must be {}'.format(f, utils.nice_type_name(ALL_FIELDS[f])) for f in definition.keys() if not isinstance(definition[f], ALL_FIELDS[f])] if len(wrong_type_names) > 0: raise ParserError(self._name + ' - ' + ', '.join(wrong_type_names) + '.\nSee: {}#required-fields'.format(BASE_DOC_URL)) # Check that the lists are not empty and that data in the lists # have the correct types. list_fields = [f for f in definition if isinstance(definition[f], list)] for field in list_fields: # Check for empty lists. if len(definition[field]) == 0: raise ParserError(("Field '{}' for probe '{}' must not be empty" + ".\nSee: {}#required-fields)") .format(field, self._name, BASE_DOC_URL)) # Check the type of the list content. broken_types =\ [not isinstance(v, LIST_FIELDS_CONTENT[field]) for v in definition[field]] if any(broken_types): raise ParserError(("Field '{}' for probe '{}' must only contain values of type {}" ".\nSee: {}#the-yaml-definition-file)") .format(field, self._name, utils.nice_type_name(LIST_FIELDS_CONTENT[field]), BASE_DOC_URL))
[ "def", "validate_types", "(", "self", ",", "definition", ")", ":", "if", "not", "self", ".", "_strict_type_checks", ":", "return", "# The required and optional fields in a scalar type definition.", "REQUIRED_FIELDS", "=", "{", "'bug_numbers'", ":", "list", ",", "# This contains ints. See LIST_FIELDS_CONTENT.", "'description'", ":", "string_types", ",", "'expires'", ":", "string_types", ",", "'kind'", ":", "string_types", ",", "'notification_emails'", ":", "list", ",", "# This contains strings. See LIST_FIELDS_CONTENT.", "'record_in_processes'", ":", "list", ",", "}", "OPTIONAL_FIELDS", "=", "{", "'cpp_guard'", ":", "string_types", ",", "'release_channel_collection'", ":", "string_types", ",", "'keyed'", ":", "bool", ",", "}", "# The types for the data within the fields that hold lists.", "LIST_FIELDS_CONTENT", "=", "{", "'bug_numbers'", ":", "int", ",", "'notification_emails'", ":", "string_types", ",", "'record_in_processes'", ":", "string_types", ",", "}", "# Concatenate the required and optional field definitions.", "ALL_FIELDS", "=", "REQUIRED_FIELDS", ".", "copy", "(", ")", "ALL_FIELDS", ".", "update", "(", "OPTIONAL_FIELDS", ")", "# Checks that all the required fields are available.", "missing_fields", "=", "[", "f", "for", "f", "in", "REQUIRED_FIELDS", ".", "keys", "(", ")", "if", "f", "not", "in", "definition", "]", "if", "len", "(", "missing_fields", ")", ">", "0", ":", "raise", "ParserError", "(", "self", ".", "_name", "+", "' - missing required fields: '", "+", "', '", ".", "join", "(", "missing_fields", ")", "+", "'.\\nSee: {}#required-fields'", ".", "format", "(", "BASE_DOC_URL", ")", ")", "# Do we have any unknown field?", "unknown_fields", "=", "[", "f", "for", "f", "in", "definition", ".", "keys", "(", ")", "if", "f", "not", "in", "ALL_FIELDS", "]", "if", "len", "(", "unknown_fields", ")", ">", "0", ":", "raise", "ParserError", "(", "self", ".", "_name", "+", "' - unknown fields: '", "+", "', '", ".", "join", "(", "unknown_fields", ")", "+", "'.\\nSee: {}#required-fields'", ".", "format", "(", "BASE_DOC_URL", ")", ")", "# Checks the type for all the fields.", "wrong_type_names", "=", "[", "'{} must be {}'", ".", "format", "(", "f", ",", "utils", ".", "nice_type_name", "(", "ALL_FIELDS", "[", "f", "]", ")", ")", "for", "f", "in", "definition", ".", "keys", "(", ")", "if", "not", "isinstance", "(", "definition", "[", "f", "]", ",", "ALL_FIELDS", "[", "f", "]", ")", "]", "if", "len", "(", "wrong_type_names", ")", ">", "0", ":", "raise", "ParserError", "(", "self", ".", "_name", "+", "' - '", "+", "', '", ".", "join", "(", "wrong_type_names", ")", "+", "'.\\nSee: {}#required-fields'", ".", "format", "(", "BASE_DOC_URL", ")", ")", "# Check that the lists are not empty and that data in the lists", "# have the correct types.", "list_fields", "=", "[", "f", "for", "f", "in", "definition", "if", "isinstance", "(", "definition", "[", "f", "]", ",", "list", ")", "]", "for", "field", "in", "list_fields", ":", "# Check for empty lists.", "if", "len", "(", "definition", "[", "field", "]", ")", "==", "0", ":", "raise", "ParserError", "(", "(", "\"Field '{}' for probe '{}' must not be empty\"", "+", "\".\\nSee: {}#required-fields)\"", ")", ".", "format", "(", "field", ",", "self", ".", "_name", ",", "BASE_DOC_URL", ")", ")", "# Check the type of the list content.", "broken_types", "=", "[", "not", "isinstance", "(", "v", ",", "LIST_FIELDS_CONTENT", "[", "field", "]", ")", "for", "v", "in", "definition", "[", "field", "]", "]", "if", "any", "(", "broken_types", ")", ":", "raise", "ParserError", "(", "(", "\"Field '{}' for probe '{}' must only contain values of type {}\"", "\".\\nSee: {}#the-yaml-definition-file)\"", ")", ".", "format", "(", "field", ",", "self", ".", "_name", ",", "utils", ".", "nice_type_name", "(", "LIST_FIELDS_CONTENT", "[", "field", "]", ")", ",", "BASE_DOC_URL", ")", ")" ]
This function performs some basic sanity checks on the scalar definition: - Checks that all the required fields are available. - Checks that all the fields have the expected types. :param definition: the dictionary containing the scalar properties. :raises ParserError: if a scalar definition field is of the wrong type. :raises ParserError: if a required field is missing or unknown fields are present.
[ "This", "function", "performs", "some", "basic", "sanity", "checks", "on", "the", "scalar", "definition", ":", "-", "Checks", "that", "all", "the", "required", "fields", "are", "available", ".", "-", "Checks", "that", "all", "the", "fields", "have", "the", "expected", "types", "." ]
python
train
48.24359
rsc-dev/pbd
pbd/__init__.py
https://github.com/rsc-dev/pbd/blob/16c2eed1e35df238a76a7a469c056ff9ea8ec2a2/pbd/__init__.py#L158-L174
def find_imports(self, pbds): """Find all missing imports in list of Pbd instances. """ # List of types used, but not defined imports = list(set(self.uses).difference(set(self.defines))) # Clumpsy, but enought for now for imp in imports: for p in pbds: if imp in p.defines: self.imports.append(p.name) break self.imports = list(set(self.imports)) for import_file in self.imports: self.lines.insert(2, 'import "{}";'.format(import_file))
[ "def", "find_imports", "(", "self", ",", "pbds", ")", ":", "# List of types used, but not defined", "imports", "=", "list", "(", "set", "(", "self", ".", "uses", ")", ".", "difference", "(", "set", "(", "self", ".", "defines", ")", ")", ")", "# Clumpsy, but enought for now ", "for", "imp", "in", "imports", ":", "for", "p", "in", "pbds", ":", "if", "imp", "in", "p", ".", "defines", ":", "self", ".", "imports", ".", "append", "(", "p", ".", "name", ")", "break", "self", ".", "imports", "=", "list", "(", "set", "(", "self", ".", "imports", ")", ")", "for", "import_file", "in", "self", ".", "imports", ":", "self", ".", "lines", ".", "insert", "(", "2", ",", "'import \"{}\";'", ".", "format", "(", "import_file", ")", ")" ]
Find all missing imports in list of Pbd instances.
[ "Find", "all", "missing", "imports", "in", "list", "of", "Pbd", "instances", "." ]
python
valid
34.823529
apache/airflow
airflow/plugins_manager.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/plugins_manager.py#L79-L98
def load_entrypoint_plugins(entry_points, airflow_plugins): """ Load AirflowPlugin subclasses from the entrypoints provided. The entry_point group should be 'airflow.plugins'. :param entry_points: A collection of entrypoints to search for plugins :type entry_points: Generator[setuptools.EntryPoint, None, None] :param airflow_plugins: A collection of existing airflow plugins to ensure we don't load duplicates :type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]] :rtype: list[airflow.plugins_manager.AirflowPlugin] """ for entry_point in entry_points: log.debug('Importing entry_point plugin %s', entry_point.name) plugin_obj = entry_point.load() if is_valid_plugin(plugin_obj, airflow_plugins): if callable(getattr(plugin_obj, 'on_load', None)): plugin_obj.on_load() airflow_plugins.append(plugin_obj) return airflow_plugins
[ "def", "load_entrypoint_plugins", "(", "entry_points", ",", "airflow_plugins", ")", ":", "for", "entry_point", "in", "entry_points", ":", "log", ".", "debug", "(", "'Importing entry_point plugin %s'", ",", "entry_point", ".", "name", ")", "plugin_obj", "=", "entry_point", ".", "load", "(", ")", "if", "is_valid_plugin", "(", "plugin_obj", ",", "airflow_plugins", ")", ":", "if", "callable", "(", "getattr", "(", "plugin_obj", ",", "'on_load'", ",", "None", ")", ")", ":", "plugin_obj", ".", "on_load", "(", ")", "airflow_plugins", ".", "append", "(", "plugin_obj", ")", "return", "airflow_plugins" ]
Load AirflowPlugin subclasses from the entrypoints provided. The entry_point group should be 'airflow.plugins'. :param entry_points: A collection of entrypoints to search for plugins :type entry_points: Generator[setuptools.EntryPoint, None, None] :param airflow_plugins: A collection of existing airflow plugins to ensure we don't load duplicates :type airflow_plugins: list[type[airflow.plugins_manager.AirflowPlugin]] :rtype: list[airflow.plugins_manager.AirflowPlugin]
[ "Load", "AirflowPlugin", "subclasses", "from", "the", "entrypoints", "provided", ".", "The", "entry_point", "group", "should", "be", "airflow", ".", "plugins", "." ]
python
test
47.45
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py#L905-L919
def show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_nbr_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info output = ET.SubElement(show_fabric_trunk_info, "output") show_trunk_list = ET.SubElement(output, "show-trunk-list") trunk_list_groups = ET.SubElement(show_trunk_list, "trunk-list-groups") trunk_list_member = ET.SubElement(trunk_list_groups, "trunk-list-member") trunk_list_nbr_interface_type = ET.SubElement(trunk_list_member, "trunk-list-nbr-interface-type") trunk_list_nbr_interface_type.text = kwargs.pop('trunk_list_nbr_interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_fabric_trunk_info_output_show_trunk_list_trunk_list_groups_trunk_list_member_trunk_list_nbr_interface_type", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_fabric_trunk_info", "=", "ET", ".", "Element", "(", "\"show_fabric_trunk_info\"", ")", "config", "=", "show_fabric_trunk_info", "output", "=", "ET", ".", "SubElement", "(", "show_fabric_trunk_info", ",", "\"output\"", ")", "show_trunk_list", "=", "ET", ".", "SubElement", "(", "output", ",", "\"show-trunk-list\"", ")", "trunk_list_groups", "=", "ET", ".", "SubElement", "(", "show_trunk_list", ",", "\"trunk-list-groups\"", ")", "trunk_list_member", "=", "ET", ".", "SubElement", "(", "trunk_list_groups", ",", "\"trunk-list-member\"", ")", "trunk_list_nbr_interface_type", "=", "ET", ".", "SubElement", "(", "trunk_list_member", ",", "\"trunk-list-nbr-interface-type\"", ")", "trunk_list_nbr_interface_type", ".", "text", "=", "kwargs", ".", "pop", "(", "'trunk_list_nbr_interface_type'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
59.266667
gem/oq-engine
openquake/baselib/parallel.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/parallel.py#L272-L291
def get_pickled_sizes(obj): """ Return the pickled sizes of an object and its direct attributes, ordered by decreasing size. Here is an example: >> total_size, partial_sizes = get_pickled_sizes(Monitor('')) >> total_size 345 >> partial_sizes [('_procs', 214), ('exc', 4), ('mem', 4), ('start_time', 4), ('_start_time', 4), ('duration', 4)] Notice that the sizes depend on the operating system and the machine. """ sizes = [] attrs = getattr(obj, '__dict__', {}) for name, value in attrs.items(): sizes.append((name, len(Pickled(value)))) return len(Pickled(obj)), sorted( sizes, key=lambda pair: pair[1], reverse=True)
[ "def", "get_pickled_sizes", "(", "obj", ")", ":", "sizes", "=", "[", "]", "attrs", "=", "getattr", "(", "obj", ",", "'__dict__'", ",", "{", "}", ")", "for", "name", ",", "value", "in", "attrs", ".", "items", "(", ")", ":", "sizes", ".", "append", "(", "(", "name", ",", "len", "(", "Pickled", "(", "value", ")", ")", ")", ")", "return", "len", "(", "Pickled", "(", "obj", ")", ")", ",", "sorted", "(", "sizes", ",", "key", "=", "lambda", "pair", ":", "pair", "[", "1", "]", ",", "reverse", "=", "True", ")" ]
Return the pickled sizes of an object and its direct attributes, ordered by decreasing size. Here is an example: >> total_size, partial_sizes = get_pickled_sizes(Monitor('')) >> total_size 345 >> partial_sizes [('_procs', 214), ('exc', 4), ('mem', 4), ('start_time', 4), ('_start_time', 4), ('duration', 4)] Notice that the sizes depend on the operating system and the machine.
[ "Return", "the", "pickled", "sizes", "of", "an", "object", "and", "its", "direct", "attributes", "ordered", "by", "decreasing", "size", ".", "Here", "is", "an", "example", ":" ]
python
train
33.9
jmurty/xml4h
xml4h/nodes.py
https://github.com/jmurty/xml4h/blob/adbb45e27a01a869a505aee7bc16bad2f517b511/xml4h/nodes.py#L857-L867
def attributes(self): """ Get or set this element's attributes as name/value pairs. .. note:: Setting element attributes via this accessor will **remove** any existing attributes, as opposed to the :meth:`set_attributes` method which only updates and replaces them. """ attr_impl_nodes = self.adapter.get_node_attributes(self.impl_node) return AttributeDict(attr_impl_nodes, self.impl_node, self.adapter)
[ "def", "attributes", "(", "self", ")", ":", "attr_impl_nodes", "=", "self", ".", "adapter", ".", "get_node_attributes", "(", "self", ".", "impl_node", ")", "return", "AttributeDict", "(", "attr_impl_nodes", ",", "self", ".", "impl_node", ",", "self", ".", "adapter", ")" ]
Get or set this element's attributes as name/value pairs. .. note:: Setting element attributes via this accessor will **remove** any existing attributes, as opposed to the :meth:`set_attributes` method which only updates and replaces them.
[ "Get", "or", "set", "this", "element", "s", "attributes", "as", "name", "/", "value", "pairs", "." ]
python
train
43.545455
jldantas/libmft
libmft/attribute.py
https://github.com/jldantas/libmft/blob/65a988605fe7663b788bd81dcb52c0a4eaad1549/libmft/attribute.py#L2190-L2212
def _from_binary_acl(cls, binary_stream): """See base class.""" ''' Revision number - 1 Padding - 1 Size - 2 ACE Count - 2 Padding - 2 ''' rev_number, size, ace_len = cls._REPR.unpack(binary_stream[:cls._REPR.size]) #content = cls._REPR.unpack(binary_stream[:cls._REPR.size]) aces = [] offset = cls._REPR.size for i in range(ace_len): ace = ACE.create_from_binary(binary_stream[offset:]) offset += len(ace) aces.append(ace) _MOD_LOGGER.debug("Next ACE offset = %d", offset) nw_obj = cls((rev_number, size, aces)) _MOD_LOGGER.debug("Attempted to unpack SID from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj) return nw_obj
[ "def", "_from_binary_acl", "(", "cls", ",", "binary_stream", ")", ":", "''' Revision number - 1\n Padding - 1\n Size - 2\n ACE Count - 2\n Padding - 2\n '''", "rev_number", ",", "size", ",", "ace_len", "=", "cls", ".", "_REPR", ".", "unpack", "(", "binary_stream", "[", ":", "cls", ".", "_REPR", ".", "size", "]", ")", "#content = cls._REPR.unpack(binary_stream[:cls._REPR.size])", "aces", "=", "[", "]", "offset", "=", "cls", ".", "_REPR", ".", "size", "for", "i", "in", "range", "(", "ace_len", ")", ":", "ace", "=", "ACE", ".", "create_from_binary", "(", "binary_stream", "[", "offset", ":", "]", ")", "offset", "+=", "len", "(", "ace", ")", "aces", ".", "append", "(", "ace", ")", "_MOD_LOGGER", ".", "debug", "(", "\"Next ACE offset = %d\"", ",", "offset", ")", "nw_obj", "=", "cls", "(", "(", "rev_number", ",", "size", ",", "aces", ")", ")", "_MOD_LOGGER", ".", "debug", "(", "\"Attempted to unpack SID from \\\"%s\\\"\\nResult: %s\"", ",", "binary_stream", ".", "tobytes", "(", ")", ",", "nw_obj", ")", "return", "nw_obj" ]
See base class.
[ "See", "base", "class", "." ]
python
train
31.173913
hyperledger/indy-node
indy_node/server/upgrader.py
https://github.com/hyperledger/indy-node/blob/8fabd364eaf7d940a56df2911d9215b1e512a2de/indy_node/server/upgrader.py#L251-L334
def handleUpgradeTxn(self, txn) -> None: """ Handles transaction of type POOL_UPGRADE Can schedule or cancel upgrade to a newer version at specified time :param txn: """ FINALIZING_EVENT_TYPES = [UpgradeLog.Events.succeeded, UpgradeLog.Events.failed] if get_type(txn) != POOL_UPGRADE: return logger.info("Node '{}' handles upgrade txn {}".format(self.nodeName, txn)) txn_data = get_payload_data(txn) action = txn_data[ACTION] version = txn_data[VERSION] justification = txn_data.get(JUSTIFICATION) pkg_name = txn_data.get(PACKAGE, self.config.UPGRADE_ENTRY) upgrade_id = self.get_action_id(txn) # TODO test try: version = src_version_cls(pkg_name)(version) except InvalidVersionError as exc: logger.warning( "{} can't handle upgrade txn with version {} for package {}: {}" .format(self, version, pkg_name, exc) ) return if action == START: # forced txn could have partial schedule list if self.nodeId not in txn_data[SCHEDULE]: logger.info("Node '{}' disregards upgrade txn {}".format( self.nodeName, txn)) return last_event = self.lastActionEventInfo if last_event: if (last_event.data.upgrade_id == upgrade_id and last_event.ev_type in FINALIZING_EVENT_TYPES): logger.info( "Node '{}' has already performed an upgrade with upgrade_id {}. " "Last recorded event is {}" .format(self.nodeName, upgrade_id, last_event.data)) return when = txn_data[SCHEDULE][self.nodeId] failTimeout = txn_data.get(TIMEOUT, self.defaultActionTimeout) if isinstance(when, str): when = dateutil.parser.parse(when) new_ev_data = UpgradeLogData(when, version, upgrade_id, pkg_name) if self.scheduledAction: if self.scheduledAction == new_ev_data: logger.debug( "Node {} already scheduled upgrade to version '{}' " .format(self.nodeName, version)) return else: logger.info( "Node '{}' cancels previous upgrade and schedules a new one to {}" .format(self.nodeName, version)) self._cancelScheduledUpgrade(justification) logger.info("Node '{}' schedules upgrade to {}".format(self.nodeName, version)) self._scheduleUpgrade(new_ev_data, failTimeout) return if action == CANCEL: if (self.scheduledAction and self.scheduledAction.version == version): self._cancelScheduledUpgrade(justification) logger.info("Node '{}' cancels upgrade to {}".format( self.nodeName, version)) return logger.error( "Got {} transaction with unsupported action {}".format( POOL_UPGRADE, action))
[ "def", "handleUpgradeTxn", "(", "self", ",", "txn", ")", "->", "None", ":", "FINALIZING_EVENT_TYPES", "=", "[", "UpgradeLog", ".", "Events", ".", "succeeded", ",", "UpgradeLog", ".", "Events", ".", "failed", "]", "if", "get_type", "(", "txn", ")", "!=", "POOL_UPGRADE", ":", "return", "logger", ".", "info", "(", "\"Node '{}' handles upgrade txn {}\"", ".", "format", "(", "self", ".", "nodeName", ",", "txn", ")", ")", "txn_data", "=", "get_payload_data", "(", "txn", ")", "action", "=", "txn_data", "[", "ACTION", "]", "version", "=", "txn_data", "[", "VERSION", "]", "justification", "=", "txn_data", ".", "get", "(", "JUSTIFICATION", ")", "pkg_name", "=", "txn_data", ".", "get", "(", "PACKAGE", ",", "self", ".", "config", ".", "UPGRADE_ENTRY", ")", "upgrade_id", "=", "self", ".", "get_action_id", "(", "txn", ")", "# TODO test", "try", ":", "version", "=", "src_version_cls", "(", "pkg_name", ")", "(", "version", ")", "except", "InvalidVersionError", "as", "exc", ":", "logger", ".", "warning", "(", "\"{} can't handle upgrade txn with version {} for package {}: {}\"", ".", "format", "(", "self", ",", "version", ",", "pkg_name", ",", "exc", ")", ")", "return", "if", "action", "==", "START", ":", "# forced txn could have partial schedule list", "if", "self", ".", "nodeId", "not", "in", "txn_data", "[", "SCHEDULE", "]", ":", "logger", ".", "info", "(", "\"Node '{}' disregards upgrade txn {}\"", ".", "format", "(", "self", ".", "nodeName", ",", "txn", ")", ")", "return", "last_event", "=", "self", ".", "lastActionEventInfo", "if", "last_event", ":", "if", "(", "last_event", ".", "data", ".", "upgrade_id", "==", "upgrade_id", "and", "last_event", ".", "ev_type", "in", "FINALIZING_EVENT_TYPES", ")", ":", "logger", ".", "info", "(", "\"Node '{}' has already performed an upgrade with upgrade_id {}. \"", "\"Last recorded event is {}\"", ".", "format", "(", "self", ".", "nodeName", ",", "upgrade_id", ",", "last_event", ".", "data", ")", ")", "return", "when", "=", "txn_data", "[", "SCHEDULE", "]", "[", "self", ".", "nodeId", "]", "failTimeout", "=", "txn_data", ".", "get", "(", "TIMEOUT", ",", "self", ".", "defaultActionTimeout", ")", "if", "isinstance", "(", "when", ",", "str", ")", ":", "when", "=", "dateutil", ".", "parser", ".", "parse", "(", "when", ")", "new_ev_data", "=", "UpgradeLogData", "(", "when", ",", "version", ",", "upgrade_id", ",", "pkg_name", ")", "if", "self", ".", "scheduledAction", ":", "if", "self", ".", "scheduledAction", "==", "new_ev_data", ":", "logger", ".", "debug", "(", "\"Node {} already scheduled upgrade to version '{}' \"", ".", "format", "(", "self", ".", "nodeName", ",", "version", ")", ")", "return", "else", ":", "logger", ".", "info", "(", "\"Node '{}' cancels previous upgrade and schedules a new one to {}\"", ".", "format", "(", "self", ".", "nodeName", ",", "version", ")", ")", "self", ".", "_cancelScheduledUpgrade", "(", "justification", ")", "logger", ".", "info", "(", "\"Node '{}' schedules upgrade to {}\"", ".", "format", "(", "self", ".", "nodeName", ",", "version", ")", ")", "self", ".", "_scheduleUpgrade", "(", "new_ev_data", ",", "failTimeout", ")", "return", "if", "action", "==", "CANCEL", ":", "if", "(", "self", ".", "scheduledAction", "and", "self", ".", "scheduledAction", ".", "version", "==", "version", ")", ":", "self", ".", "_cancelScheduledUpgrade", "(", "justification", ")", "logger", ".", "info", "(", "\"Node '{}' cancels upgrade to {}\"", ".", "format", "(", "self", ".", "nodeName", ",", "version", ")", ")", "return", "logger", ".", "error", "(", "\"Got {} transaction with unsupported action {}\"", ".", "format", "(", "POOL_UPGRADE", ",", "action", ")", ")" ]
Handles transaction of type POOL_UPGRADE Can schedule or cancel upgrade to a newer version at specified time :param txn:
[ "Handles", "transaction", "of", "type", "POOL_UPGRADE", "Can", "schedule", "or", "cancel", "upgrade", "to", "a", "newer", "version", "at", "specified", "time" ]
python
train
38.452381
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mwld.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mwld.py#L38-L59
def generate(env): """Add Builders and construction variables for lib to an Environment.""" SCons.Tool.createStaticLibBuilder(env) SCons.Tool.createSharedLibBuilder(env) SCons.Tool.createProgBuilder(env) env['AR'] = 'mwld' env['ARCOM'] = '$AR $ARFLAGS -library -o $TARGET $SOURCES' env['LIBDIRPREFIX'] = '-L' env['LIBDIRSUFFIX'] = '' env['LIBLINKPREFIX'] = '-l' env['LIBLINKSUFFIX'] = '.lib' env['LINK'] = 'mwld' env['LINKCOM'] = '$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS' env['SHLINK'] = '$LINK' env['SHLINKFLAGS'] = '$LINKFLAGS' env['SHLINKCOM'] = shlib_action env['SHLIBEMITTER']= shlib_emitter env['LDMODULEEMITTER']= shlib_emitter
[ "def", "generate", "(", "env", ")", ":", "SCons", ".", "Tool", ".", "createStaticLibBuilder", "(", "env", ")", "SCons", ".", "Tool", ".", "createSharedLibBuilder", "(", "env", ")", "SCons", ".", "Tool", ".", "createProgBuilder", "(", "env", ")", "env", "[", "'AR'", "]", "=", "'mwld'", "env", "[", "'ARCOM'", "]", "=", "'$AR $ARFLAGS -library -o $TARGET $SOURCES'", "env", "[", "'LIBDIRPREFIX'", "]", "=", "'-L'", "env", "[", "'LIBDIRSUFFIX'", "]", "=", "''", "env", "[", "'LIBLINKPREFIX'", "]", "=", "'-l'", "env", "[", "'LIBLINKSUFFIX'", "]", "=", "'.lib'", "env", "[", "'LINK'", "]", "=", "'mwld'", "env", "[", "'LINKCOM'", "]", "=", "'$LINK $LINKFLAGS -o $TARGET $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'", "env", "[", "'SHLINK'", "]", "=", "'$LINK'", "env", "[", "'SHLINKFLAGS'", "]", "=", "'$LINKFLAGS'", "env", "[", "'SHLINKCOM'", "]", "=", "shlib_action", "env", "[", "'SHLIBEMITTER'", "]", "=", "shlib_emitter", "env", "[", "'LDMODULEEMITTER'", "]", "=", "shlib_emitter" ]
Add Builders and construction variables for lib to an Environment.
[ "Add", "Builders", "and", "construction", "variables", "for", "lib", "to", "an", "Environment", "." ]
python
train
32.227273
saltstack/salt
salt/cloud/clouds/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L2658-L2694
def show_storage_container_metadata(kwargs=None, storage_conn=None, call=None): ''' .. versionadded:: 2015.8.0 Show a storage container's metadata CLI Example: .. code-block:: bash salt-cloud -f show_storage_container_metadata my-azure name=myservice name: Name of container to show. lease_id: If specified, show_storage_container_metadata only succeeds if the container's lease is active and matches this ID. ''' if call != 'function': raise SaltCloudSystemExit( 'The show_storage_container function must be called with -f or --function.' ) if kwargs is None: kwargs = {} if 'name' not in kwargs: raise SaltCloudSystemExit('An storage container name must be specified as "name"') if not storage_conn: storage_conn = get_storage_conn(conn_kwargs=kwargs) data = storage_conn.get_container_metadata( container_name=kwargs['name'], x_ms_lease_id=kwargs.get('lease_id', None), ) return data
[ "def", "show_storage_container_metadata", "(", "kwargs", "=", "None", ",", "storage_conn", "=", "None", ",", "call", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The show_storage_container function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "if", "'name'", "not", "in", "kwargs", ":", "raise", "SaltCloudSystemExit", "(", "'An storage container name must be specified as \"name\"'", ")", "if", "not", "storage_conn", ":", "storage_conn", "=", "get_storage_conn", "(", "conn_kwargs", "=", "kwargs", ")", "data", "=", "storage_conn", ".", "get_container_metadata", "(", "container_name", "=", "kwargs", "[", "'name'", "]", ",", "x_ms_lease_id", "=", "kwargs", ".", "get", "(", "'lease_id'", ",", "None", ")", ",", ")", "return", "data" ]
.. versionadded:: 2015.8.0 Show a storage container's metadata CLI Example: .. code-block:: bash salt-cloud -f show_storage_container_metadata my-azure name=myservice name: Name of container to show. lease_id: If specified, show_storage_container_metadata only succeeds if the container's lease is active and matches this ID.
[ "..", "versionadded", "::", "2015", ".", "8", ".", "0" ]
python
train
27.540541
materialsproject/pymatgen-db
matgendb/creator.py
https://github.com/materialsproject/pymatgen-db/blob/02e4351c2cea431407644f49193e8bf43ed39b9a/matgendb/creator.py#L692-L708
def contains_vasp_input(dir_name): """ Checks if a directory contains valid VASP input. Args: dir_name: Directory name to check. Returns: True if directory contains all four VASP input files (INCAR, POSCAR, KPOINTS and POTCAR). """ for f in ["INCAR", "POSCAR", "POTCAR", "KPOINTS"]: if not os.path.exists(os.path.join(dir_name, f)) and \ not os.path.exists(os.path.join(dir_name, f + ".orig")): return False return True
[ "def", "contains_vasp_input", "(", "dir_name", ")", ":", "for", "f", "in", "[", "\"INCAR\"", ",", "\"POSCAR\"", ",", "\"POTCAR\"", ",", "\"KPOINTS\"", "]", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "dir_name", ",", "f", ")", ")", "and", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "dir_name", ",", "f", "+", "\".orig\"", ")", ")", ":", "return", "False", "return", "True" ]
Checks if a directory contains valid VASP input. Args: dir_name: Directory name to check. Returns: True if directory contains all four VASP input files (INCAR, POSCAR, KPOINTS and POTCAR).
[ "Checks", "if", "a", "directory", "contains", "valid", "VASP", "input", "." ]
python
train
29.705882
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_genobstacles.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_genobstacles.py#L180-L189
def update(self, deltat=1.0): '''fly a square circuit''' DNFZ.update(self, deltat) self.dist_flown += self.speed * deltat if self.dist_flown > self.circuit_width: self.desired_heading = self.heading + 90 self.dist_flown = 0 if self.getalt() < self.ground_height() or self.getalt() > self.ground_height() + 2000: self.randpos() self.randalt()
[ "def", "update", "(", "self", ",", "deltat", "=", "1.0", ")", ":", "DNFZ", ".", "update", "(", "self", ",", "deltat", ")", "self", ".", "dist_flown", "+=", "self", ".", "speed", "*", "deltat", "if", "self", ".", "dist_flown", ">", "self", ".", "circuit_width", ":", "self", ".", "desired_heading", "=", "self", ".", "heading", "+", "90", "self", ".", "dist_flown", "=", "0", "if", "self", ".", "getalt", "(", ")", "<", "self", ".", "ground_height", "(", ")", "or", "self", ".", "getalt", "(", ")", ">", "self", ".", "ground_height", "(", ")", "+", "2000", ":", "self", ".", "randpos", "(", ")", "self", ".", "randalt", "(", ")" ]
fly a square circuit
[ "fly", "a", "square", "circuit" ]
python
train
42
DsixTools/python-smeftrunner
smeftrunner/classes.py
https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L23-L29
def set_initial(self, C_in, scale_in, scale_high): r"""Set the initial values for parameters and Wilson coefficients at the scale `scale_in`, setting the new physics scale $\Lambda$ to `scale_high`.""" self.C_in = C_in self.scale_in = scale_in self.scale_high = scale_high
[ "def", "set_initial", "(", "self", ",", "C_in", ",", "scale_in", ",", "scale_high", ")", ":", "self", ".", "C_in", "=", "C_in", "self", ".", "scale_in", "=", "scale_in", "self", ".", "scale_high", "=", "scale_high" ]
r"""Set the initial values for parameters and Wilson coefficients at the scale `scale_in`, setting the new physics scale $\Lambda$ to `scale_high`.
[ "r", "Set", "the", "initial", "values", "for", "parameters", "and", "Wilson", "coefficients", "at", "the", "scale", "scale_in", "setting", "the", "new", "physics", "scale", "$", "\\", "Lambda$", "to", "scale_high", "." ]
python
train
44.857143
python-tap/tappy
tap/parser.py
https://github.com/python-tap/tappy/blob/79a749313c61ea94ee49d67ba6a1534974bc03aa/tap/parser.py#L145-L154
def _parse_plan(self, match): """Parse a matching plan line.""" expected_tests = int(match.group("expected")) directive = Directive(match.group("directive")) # Only SKIP directives are allowed in the plan. if directive.text and not directive.skip: return Unknown() return Plan(expected_tests, directive)
[ "def", "_parse_plan", "(", "self", ",", "match", ")", ":", "expected_tests", "=", "int", "(", "match", ".", "group", "(", "\"expected\"", ")", ")", "directive", "=", "Directive", "(", "match", ".", "group", "(", "\"directive\"", ")", ")", "# Only SKIP directives are allowed in the plan.", "if", "directive", ".", "text", "and", "not", "directive", ".", "skip", ":", "return", "Unknown", "(", ")", "return", "Plan", "(", "expected_tests", ",", "directive", ")" ]
Parse a matching plan line.
[ "Parse", "a", "matching", "plan", "line", "." ]
python
train
35.6