repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
bitesofcode/projexui
projexui/widgets/xchart/xchart.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xchart/xchart.py#L217-L225
def addDataset(self, dataset): """ Adds the given data set to this chart widget. :param dataSet | <XChartDataset> """ self._datasets.append(dataset) self._dataChanged = True self._addDatasetAction(dataset)
[ "def", "addDataset", "(", "self", ",", "dataset", ")", ":", "self", ".", "_datasets", ".", "append", "(", "dataset", ")", "self", ".", "_dataChanged", "=", "True", "self", ".", "_addDatasetAction", "(", "dataset", ")" ]
Adds the given data set to this chart widget. :param dataSet | <XChartDataset>
[ "Adds", "the", "given", "data", "set", "to", "this", "chart", "widget", ".", ":", "param", "dataSet", "|", "<XChartDataset", ">" ]
python
train
30.555556
log2timeline/plaso
plaso/parsers/winreg_plugins/appcompatcache.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/parsers/winreg_plugins/appcompatcache.py#L170-L222
def _GetCachedEntryDataTypeMap( self, format_type, value_data, cached_entry_offset): """Determines the cached entry data type map. Args: format_type (int): format type. value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: dtfabric.DataTypeMap: data type map which contains a data type definition, such as a structure, that can be mapped onto binary data or None if the data type map is not defined. Raises: ParseError: if the cached entry data type map cannot be determined. """ if format_type not in self._SUPPORTED_FORMAT_TYPES: raise errors.ParseError('Unsupported format type: {0:d}'.format( format_type)) data_type_map_name = '' if format_type == self._FORMAT_TYPE_XP: data_type_map_name = 'appcompatcache_cached_entry_xp_32bit' elif format_type in (self._FORMAT_TYPE_8, self._FORMAT_TYPE_10): data_type_map_name = 'appcompatcache_cached_entry_header_8' else: cached_entry = self._ParseCommon2003CachedEntry( value_data, cached_entry_offset) # Assume the entry is 64-bit if the 32-bit path offset is 0 and # the 64-bit path offset is set. if (cached_entry.path_offset_32bit == 0 and cached_entry.path_offset_64bit != 0): number_of_bits = '64' else: number_of_bits = '32' if format_type == self._FORMAT_TYPE_2003: data_type_map_name = ( 'appcompatcache_cached_entry_2003_{0:s}bit'.format(number_of_bits)) elif format_type == self._FORMAT_TYPE_VISTA: data_type_map_name = ( 'appcompatcache_cached_entry_vista_{0:s}bit'.format(number_of_bits)) elif format_type == self._FORMAT_TYPE_7: data_type_map_name = ( 'appcompatcache_cached_entry_7_{0:s}bit'.format(number_of_bits)) return self._GetDataTypeMap(data_type_map_name)
[ "def", "_GetCachedEntryDataTypeMap", "(", "self", ",", "format_type", ",", "value_data", ",", "cached_entry_offset", ")", ":", "if", "format_type", "not", "in", "self", ".", "_SUPPORTED_FORMAT_TYPES", ":", "raise", "errors", ".", "ParseError", "(", "'Unsupported format type: {0:d}'", ".", "format", "(", "format_type", ")", ")", "data_type_map_name", "=", "''", "if", "format_type", "==", "self", ".", "_FORMAT_TYPE_XP", ":", "data_type_map_name", "=", "'appcompatcache_cached_entry_xp_32bit'", "elif", "format_type", "in", "(", "self", ".", "_FORMAT_TYPE_8", ",", "self", ".", "_FORMAT_TYPE_10", ")", ":", "data_type_map_name", "=", "'appcompatcache_cached_entry_header_8'", "else", ":", "cached_entry", "=", "self", ".", "_ParseCommon2003CachedEntry", "(", "value_data", ",", "cached_entry_offset", ")", "# Assume the entry is 64-bit if the 32-bit path offset is 0 and", "# the 64-bit path offset is set.", "if", "(", "cached_entry", ".", "path_offset_32bit", "==", "0", "and", "cached_entry", ".", "path_offset_64bit", "!=", "0", ")", ":", "number_of_bits", "=", "'64'", "else", ":", "number_of_bits", "=", "'32'", "if", "format_type", "==", "self", ".", "_FORMAT_TYPE_2003", ":", "data_type_map_name", "=", "(", "'appcompatcache_cached_entry_2003_{0:s}bit'", ".", "format", "(", "number_of_bits", ")", ")", "elif", "format_type", "==", "self", ".", "_FORMAT_TYPE_VISTA", ":", "data_type_map_name", "=", "(", "'appcompatcache_cached_entry_vista_{0:s}bit'", ".", "format", "(", "number_of_bits", ")", ")", "elif", "format_type", "==", "self", ".", "_FORMAT_TYPE_7", ":", "data_type_map_name", "=", "(", "'appcompatcache_cached_entry_7_{0:s}bit'", ".", "format", "(", "number_of_bits", ")", ")", "return", "self", ".", "_GetDataTypeMap", "(", "data_type_map_name", ")" ]
Determines the cached entry data type map. Args: format_type (int): format type. value_data (bytes): value data. cached_entry_offset (int): offset of the first cached entry data relative to the start of the value data. Returns: dtfabric.DataTypeMap: data type map which contains a data type definition, such as a structure, that can be mapped onto binary data or None if the data type map is not defined. Raises: ParseError: if the cached entry data type map cannot be determined.
[ "Determines", "the", "cached", "entry", "data", "type", "map", "." ]
python
train
36.90566
gem/oq-engine
openquake/commonlib/oqvalidation.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/commonlib/oqvalidation.py#L316-L344
def check_gsims(self, gsims): """ :param gsims: a sequence of GSIM instances """ imts = set(from_string(imt).name for imt in self.imtls) for gsim in gsims: restrict_imts = gsim.DEFINED_FOR_INTENSITY_MEASURE_TYPES if restrict_imts: names = set(cls.__name__ for cls in restrict_imts) invalid_imts = ', '.join(imts - names) if invalid_imts: raise ValueError( 'The IMT %s is not accepted by the GSIM %s' % (invalid_imts, gsim)) if 'site_model' not in self.inputs: # look at the required sites parameters: they must have # a valid value; the other parameters can keep a NaN # value since they are not used by the calculator for param in gsim.REQUIRES_SITES_PARAMETERS: if param in ('lon', 'lat'): # no check continue param_name = self.siteparam[param] param_value = getattr(self, param_name) if (isinstance(param_value, float) and numpy.isnan(param_value)): raise ValueError( 'Please set a value for %r, this is required by ' 'the GSIM %s' % (param_name, gsim))
[ "def", "check_gsims", "(", "self", ",", "gsims", ")", ":", "imts", "=", "set", "(", "from_string", "(", "imt", ")", ".", "name", "for", "imt", "in", "self", ".", "imtls", ")", "for", "gsim", "in", "gsims", ":", "restrict_imts", "=", "gsim", ".", "DEFINED_FOR_INTENSITY_MEASURE_TYPES", "if", "restrict_imts", ":", "names", "=", "set", "(", "cls", ".", "__name__", "for", "cls", "in", "restrict_imts", ")", "invalid_imts", "=", "', '", ".", "join", "(", "imts", "-", "names", ")", "if", "invalid_imts", ":", "raise", "ValueError", "(", "'The IMT %s is not accepted by the GSIM %s'", "%", "(", "invalid_imts", ",", "gsim", ")", ")", "if", "'site_model'", "not", "in", "self", ".", "inputs", ":", "# look at the required sites parameters: they must have", "# a valid value; the other parameters can keep a NaN", "# value since they are not used by the calculator", "for", "param", "in", "gsim", ".", "REQUIRES_SITES_PARAMETERS", ":", "if", "param", "in", "(", "'lon'", ",", "'lat'", ")", ":", "# no check", "continue", "param_name", "=", "self", ".", "siteparam", "[", "param", "]", "param_value", "=", "getattr", "(", "self", ",", "param_name", ")", "if", "(", "isinstance", "(", "param_value", ",", "float", ")", "and", "numpy", ".", "isnan", "(", "param_value", ")", ")", ":", "raise", "ValueError", "(", "'Please set a value for %r, this is required by '", "'the GSIM %s'", "%", "(", "param_name", ",", "gsim", ")", ")" ]
:param gsims: a sequence of GSIM instances
[ ":", "param", "gsims", ":", "a", "sequence", "of", "GSIM", "instances" ]
python
train
48.206897
graphql-python/graphql-core
graphql/validation/rules/overlapping_fields_can_be_merged.py
https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/validation/rules/overlapping_fields_can_be_merged.py#L504-L583
def _find_conflict( context, # type: ValidationContext cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]] compared_fragments, # type: PairSet parent_fields_are_mutually_exclusive, # type: bool response_name, # type: str field1, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField] field2, # type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField] ): # type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]] """Determines if there is a conflict between two particular fields.""" parent_type1, ast1, def1 = field1 parent_type2, ast2, def2 = field2 # If it is known that two fields could not possibly apply at the same # time, due to the parent types, then it is safe to permit them to diverge # in aliased field or arguments used as they will not present any ambiguity # by differing. # It is known that two parent types could never overlap if they are # different Object types. Interface or Union types might overlap - if not # in the current state of the schema, then perhaps in some future version, # thus may not safely diverge. are_mutually_exclusive = parent_fields_are_mutually_exclusive or ( parent_type1 != parent_type2 and isinstance(parent_type1, GraphQLObjectType) and isinstance(parent_type2, GraphQLObjectType) ) # The return type for each field. type1 = def1 and def1.type type2 = def2 and def2.type if not are_mutually_exclusive: # Two aliases must refer to the same field. name1 = ast1.name.value name2 = ast2.name.value if name1 != name2: return ( (response_name, "{} and {} are different fields".format(name1, name2)), [ast1], [ast2], ) # Two field calls must have the same arguments. if not _same_arguments(ast1.arguments, ast2.arguments): return ((response_name, "they have differing arguments"), [ast1], [ast2]) if type1 and type2 and do_types_conflict(type1, type2): return ( ( response_name, "they return conflicting types {} and {}".format(type1, type2), ), [ast1], [ast2], ) # Collect and compare sub-fields. Use the same "visited fragment names" list # for both collections so fields in a fragment reference are never # compared to themselves. selection_set1 = ast1.selection_set selection_set2 = ast2.selection_set if selection_set1 and selection_set2: conflicts = _find_conflicts_between_sub_selection_sets( # type: ignore context, cached_fields_and_fragment_names, compared_fragments, are_mutually_exclusive, get_named_type(type1), # type: ignore selection_set1, get_named_type(type2), # type: ignore selection_set2, ) return _subfield_conflicts(conflicts, response_name, ast1, ast2) return None
[ "def", "_find_conflict", "(", "context", ",", "# type: ValidationContext", "cached_fields_and_fragment_names", ",", "# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]", "compared_fragments", ",", "# type: PairSet", "parent_fields_are_mutually_exclusive", ",", "# type: bool", "response_name", ",", "# type: str", "field1", ",", "# type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]", "field2", ",", "# type: Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]", ")", ":", "# type: (...) -> Optional[Tuple[Tuple[str, str], List[Node], List[Node]]]", "parent_type1", ",", "ast1", ",", "def1", "=", "field1", "parent_type2", ",", "ast2", ",", "def2", "=", "field2", "# If it is known that two fields could not possibly apply at the same", "# time, due to the parent types, then it is safe to permit them to diverge", "# in aliased field or arguments used as they will not present any ambiguity", "# by differing.", "# It is known that two parent types could never overlap if they are", "# different Object types. Interface or Union types might overlap - if not", "# in the current state of the schema, then perhaps in some future version,", "# thus may not safely diverge.", "are_mutually_exclusive", "=", "parent_fields_are_mutually_exclusive", "or", "(", "parent_type1", "!=", "parent_type2", "and", "isinstance", "(", "parent_type1", ",", "GraphQLObjectType", ")", "and", "isinstance", "(", "parent_type2", ",", "GraphQLObjectType", ")", ")", "# The return type for each field.", "type1", "=", "def1", "and", "def1", ".", "type", "type2", "=", "def2", "and", "def2", ".", "type", "if", "not", "are_mutually_exclusive", ":", "# Two aliases must refer to the same field.", "name1", "=", "ast1", ".", "name", ".", "value", "name2", "=", "ast2", ".", "name", ".", "value", "if", "name1", "!=", "name2", ":", "return", "(", "(", "response_name", ",", "\"{} and {} are different fields\"", ".", "format", "(", "name1", ",", "name2", ")", ")", ",", "[", "ast1", "]", ",", "[", "ast2", "]", ",", ")", "# Two field calls must have the same arguments.", "if", "not", "_same_arguments", "(", "ast1", ".", "arguments", ",", "ast2", ".", "arguments", ")", ":", "return", "(", "(", "response_name", ",", "\"they have differing arguments\"", ")", ",", "[", "ast1", "]", ",", "[", "ast2", "]", ")", "if", "type1", "and", "type2", "and", "do_types_conflict", "(", "type1", ",", "type2", ")", ":", "return", "(", "(", "response_name", ",", "\"they return conflicting types {} and {}\"", ".", "format", "(", "type1", ",", "type2", ")", ",", ")", ",", "[", "ast1", "]", ",", "[", "ast2", "]", ",", ")", "# Collect and compare sub-fields. Use the same \"visited fragment names\" list", "# for both collections so fields in a fragment reference are never", "# compared to themselves.", "selection_set1", "=", "ast1", ".", "selection_set", "selection_set2", "=", "ast2", ".", "selection_set", "if", "selection_set1", "and", "selection_set2", ":", "conflicts", "=", "_find_conflicts_between_sub_selection_sets", "(", "# type: ignore", "context", ",", "cached_fields_and_fragment_names", ",", "compared_fragments", ",", "are_mutually_exclusive", ",", "get_named_type", "(", "type1", ")", ",", "# type: ignore", "selection_set1", ",", "get_named_type", "(", "type2", ")", ",", "# type: ignore", "selection_set2", ",", ")", "return", "_subfield_conflicts", "(", "conflicts", ",", "response_name", ",", "ast1", ",", "ast2", ")", "return", "None" ]
Determines if there is a conflict between two particular fields.
[ "Determines", "if", "there", "is", "a", "conflict", "between", "two", "particular", "fields", "." ]
python
train
39.7625
keon/algorithms
algorithms/stack/longest_abs_path.py
https://github.com/keon/algorithms/blob/4d6569464a62a75c1357acc97e2dd32ee2f9f4a3/algorithms/stack/longest_abs_path.py#L36-L58
def length_longest_path(input): """ :type input: str :rtype: int """ curr_len, max_len = 0, 0 # running length and max length stack = [] # keep track of the name length for s in input.split('\n'): print("---------") print("<path>:", s) depth = s.count('\t') # the depth of current dir or file print("depth: ", depth) print("stack: ", stack) print("curlen: ", curr_len) while len(stack) > depth: # go back to the correct depth curr_len -= stack.pop() stack.append(len(s.strip('\t'))+1) # 1 is the length of '/' curr_len += stack[-1] # increase current length print("stack: ", stack) print("curlen: ", curr_len) if '.' in s: # update maxlen only when it is a file max_len = max(max_len, curr_len-1) # -1 is to minus one '/' return max_len
[ "def", "length_longest_path", "(", "input", ")", ":", "curr_len", ",", "max_len", "=", "0", ",", "0", "# running length and max length", "stack", "=", "[", "]", "# keep track of the name length", "for", "s", "in", "input", ".", "split", "(", "'\\n'", ")", ":", "print", "(", "\"---------\"", ")", "print", "(", "\"<path>:\"", ",", "s", ")", "depth", "=", "s", ".", "count", "(", "'\\t'", ")", "# the depth of current dir or file", "print", "(", "\"depth: \"", ",", "depth", ")", "print", "(", "\"stack: \"", ",", "stack", ")", "print", "(", "\"curlen: \"", ",", "curr_len", ")", "while", "len", "(", "stack", ")", ">", "depth", ":", "# go back to the correct depth", "curr_len", "-=", "stack", ".", "pop", "(", ")", "stack", ".", "append", "(", "len", "(", "s", ".", "strip", "(", "'\\t'", ")", ")", "+", "1", ")", "# 1 is the length of '/'", "curr_len", "+=", "stack", "[", "-", "1", "]", "# increase current length", "print", "(", "\"stack: \"", ",", "stack", ")", "print", "(", "\"curlen: \"", ",", "curr_len", ")", "if", "'.'", "in", "s", ":", "# update maxlen only when it is a file", "max_len", "=", "max", "(", "max_len", ",", "curr_len", "-", "1", ")", "# -1 is to minus one '/'", "return", "max_len" ]
:type input: str :rtype: int
[ ":", "type", "input", ":", "str", ":", "rtype", ":", "int" ]
python
train
38.652174
lorien/grab
grab/proxylist.py
https://github.com/lorien/grab/blob/8b301db2a08c830245b61c589e58af6234f4db79/grab/proxylist.py#L156-L159
def load(self): """Load proxy list from configured proxy source""" self._list = self._source.load() self._list_iter = itertools.cycle(self._list)
[ "def", "load", "(", "self", ")", ":", "self", ".", "_list", "=", "self", ".", "_source", ".", "load", "(", ")", "self", ".", "_list_iter", "=", "itertools", ".", "cycle", "(", "self", ".", "_list", ")" ]
Load proxy list from configured proxy source
[ "Load", "proxy", "list", "from", "configured", "proxy", "source" ]
python
train
41.5
cloudera/cm_api
python/src/cm_api/endpoints/services.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/services.py#L1603-L1615
def trigger_replication_schedule(self, schedule_id, dry_run=False): """ Trigger replication immediately. Start and end dates on the schedule will be ignored. @param schedule_id: The id of the schedule to trigger. @param dry_run: Whether to execute a dry run. @return: The command corresponding to the replication job. @since: API v3 """ return self._post("replications/%s/run" % schedule_id, ApiCommand, params=dict(dryRun=dry_run), api_version=3)
[ "def", "trigger_replication_schedule", "(", "self", ",", "schedule_id", ",", "dry_run", "=", "False", ")", ":", "return", "self", ".", "_post", "(", "\"replications/%s/run\"", "%", "schedule_id", ",", "ApiCommand", ",", "params", "=", "dict", "(", "dryRun", "=", "dry_run", ")", ",", "api_version", "=", "3", ")" ]
Trigger replication immediately. Start and end dates on the schedule will be ignored. @param schedule_id: The id of the schedule to trigger. @param dry_run: Whether to execute a dry run. @return: The command corresponding to the replication job. @since: API v3
[ "Trigger", "replication", "immediately", ".", "Start", "and", "end", "dates", "on", "the", "schedule", "will", "be", "ignored", "." ]
python
train
37.538462
NetEaseGame/ATX
atx/patch.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/patch.py#L36-L58
def attachmethod(target): ''' Reference: https://blog.tonyseek.com/post/open-class-in-python/ class Spam(object): pass @attach_method(Spam) def egg1(self, name): print((self, name)) spam1 = Spam() # OpenClass 加入的方法 egg1 可用 spam1.egg1("Test1") # 输出Test1 ''' if isinstance(target, type): def decorator(func): setattr(target, func.__name__, func) else: def decorator(func): setattr(target, func.__name__, partial(func, target)) return decorator
[ "def", "attachmethod", "(", "target", ")", ":", "if", "isinstance", "(", "target", ",", "type", ")", ":", "def", "decorator", "(", "func", ")", ":", "setattr", "(", "target", ",", "func", ".", "__name__", ",", "func", ")", "else", ":", "def", "decorator", "(", "func", ")", ":", "setattr", "(", "target", ",", "func", ".", "__name__", ",", "partial", "(", "func", ",", "target", ")", ")", "return", "decorator" ]
Reference: https://blog.tonyseek.com/post/open-class-in-python/ class Spam(object): pass @attach_method(Spam) def egg1(self, name): print((self, name)) spam1 = Spam() # OpenClass 加入的方法 egg1 可用 spam1.egg1("Test1") # 输出Test1
[ "Reference", ":", "https", ":", "//", "blog", ".", "tonyseek", ".", "com", "/", "post", "/", "open", "-", "class", "-", "in", "-", "python", "/" ]
python
train
23.043478
Yelp/kafka-utils
kafka_utils/kafka_rolling_restart/main.py
https://github.com/Yelp/kafka-utils/blob/cdb4d64308f3079ee0873250bf7b34d0d94eca50/kafka_utils/kafka_rolling_restart/main.py#L360-L440
def execute_rolling_restart( brokers, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, skip, verbose, pre_stop_task, post_stop_task, start_command, stop_command, ssh_password=None ): """Execute the rolling restart on the specified brokers. It checks the number of under replicated partitions on each broker, using Jolokia. The check is performed at constant intervals, and a broker will be restarted when all the brokers are answering and are reporting zero under replicated partitions. :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :param check_interval: the number of seconds it will wait between each check :type check_interval: integer :param check_count: the number of times the check should be positive before restarting the next broker :type check_count: integer :param unhealthy_time_limit: the maximum number of seconds it will wait for the cluster to become stable before exiting with error :type unhealthy_time_limit: integer :param skip: the number of brokers to skip :type skip: integer :param verbose: print commend execution information :type verbose: bool :param pre_stop_task: a list of tasks to execute before running stop :type pre_stop_task: list :param post_stop_task: a list of task to execute after running stop :type post_stop_task: list :param start_command: the start command for kafka :type start_command: string :param stop_command: the stop command for kafka :type stop_command: string :param ssh_password: The ssh password to use if needed :type ssh_password: string """ all_hosts = [b[1] for b in brokers] for n, host in enumerate(all_hosts[skip:]): with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2, ssh_password=ssh_password) as connection: execute_task(pre_stop_task, host) wait_for_stable_cluster( all_hosts, jolokia_port, jolokia_prefix, check_interval, 1 if n == 0 else check_count, unhealthy_time_limit, ) print("Stopping {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip)) stop_broker(host, connection, stop_command, verbose) execute_task(post_stop_task, host) # we open a new SSH connection in case the hostname has a new IP with ssh(host=host, forward_agent=True, sudoable=True, max_attempts=3, max_timeout=2, ssh_password=ssh_password) as connection: print("Starting {0} ({1}/{2})".format(host, n + 1, len(all_hosts) - skip)) start_broker(host, connection, start_command, verbose) # Wait before terminating the script wait_for_stable_cluster( all_hosts, jolokia_port, jolokia_prefix, check_interval, check_count, unhealthy_time_limit, )
[ "def", "execute_rolling_restart", "(", "brokers", ",", "jolokia_port", ",", "jolokia_prefix", ",", "check_interval", ",", "check_count", ",", "unhealthy_time_limit", ",", "skip", ",", "verbose", ",", "pre_stop_task", ",", "post_stop_task", ",", "start_command", ",", "stop_command", ",", "ssh_password", "=", "None", ")", ":", "all_hosts", "=", "[", "b", "[", "1", "]", "for", "b", "in", "brokers", "]", "for", "n", ",", "host", "in", "enumerate", "(", "all_hosts", "[", "skip", ":", "]", ")", ":", "with", "ssh", "(", "host", "=", "host", ",", "forward_agent", "=", "True", ",", "sudoable", "=", "True", ",", "max_attempts", "=", "3", ",", "max_timeout", "=", "2", ",", "ssh_password", "=", "ssh_password", ")", "as", "connection", ":", "execute_task", "(", "pre_stop_task", ",", "host", ")", "wait_for_stable_cluster", "(", "all_hosts", ",", "jolokia_port", ",", "jolokia_prefix", ",", "check_interval", ",", "1", "if", "n", "==", "0", "else", "check_count", ",", "unhealthy_time_limit", ",", ")", "print", "(", "\"Stopping {0} ({1}/{2})\"", ".", "format", "(", "host", ",", "n", "+", "1", ",", "len", "(", "all_hosts", ")", "-", "skip", ")", ")", "stop_broker", "(", "host", ",", "connection", ",", "stop_command", ",", "verbose", ")", "execute_task", "(", "post_stop_task", ",", "host", ")", "# we open a new SSH connection in case the hostname has a new IP", "with", "ssh", "(", "host", "=", "host", ",", "forward_agent", "=", "True", ",", "sudoable", "=", "True", ",", "max_attempts", "=", "3", ",", "max_timeout", "=", "2", ",", "ssh_password", "=", "ssh_password", ")", "as", "connection", ":", "print", "(", "\"Starting {0} ({1}/{2})\"", ".", "format", "(", "host", ",", "n", "+", "1", ",", "len", "(", "all_hosts", ")", "-", "skip", ")", ")", "start_broker", "(", "host", ",", "connection", ",", "start_command", ",", "verbose", ")", "# Wait before terminating the script", "wait_for_stable_cluster", "(", "all_hosts", ",", "jolokia_port", ",", "jolokia_prefix", ",", "check_interval", ",", "check_count", ",", "unhealthy_time_limit", ",", ")" ]
Execute the rolling restart on the specified brokers. It checks the number of under replicated partitions on each broker, using Jolokia. The check is performed at constant intervals, and a broker will be restarted when all the brokers are answering and are reporting zero under replicated partitions. :param brokers: the brokers that will be restarted :type brokers: map of broker ids and host names :param jolokia_port: HTTP port for Jolokia :type jolokia_port: integer :param jolokia_prefix: HTTP prefix on the server for the Jolokia queries :type jolokia_prefix: string :param check_interval: the number of seconds it will wait between each check :type check_interval: integer :param check_count: the number of times the check should be positive before restarting the next broker :type check_count: integer :param unhealthy_time_limit: the maximum number of seconds it will wait for the cluster to become stable before exiting with error :type unhealthy_time_limit: integer :param skip: the number of brokers to skip :type skip: integer :param verbose: print commend execution information :type verbose: bool :param pre_stop_task: a list of tasks to execute before running stop :type pre_stop_task: list :param post_stop_task: a list of task to execute after running stop :type post_stop_task: list :param start_command: the start command for kafka :type start_command: string :param stop_command: the stop command for kafka :type stop_command: string :param ssh_password: The ssh password to use if needed :type ssh_password: string
[ "Execute", "the", "rolling", "restart", "on", "the", "specified", "brokers", ".", "It", "checks", "the", "number", "of", "under", "replicated", "partitions", "on", "each", "broker", "using", "Jolokia", "." ]
python
train
39.580247
StackStorm/pybind
pybind/nos/v6_0_2f/rbridge_id/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/rbridge_id/__init__.py#L1176-L1199
def _set_filter_change_update_delay(self, v, load=False): """ Setter method for filter_change_update_delay, mapped from YANG variable /rbridge_id/filter_change_update_delay (list) If this variable is read-only (config: false) in the source YANG file, then _set_filter_change_update_delay is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_filter_change_update_delay() directly. YANG Description: Change filter change update delay timer """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("filter_delay_value",filter_change_update_delay.filter_change_update_delay, yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """filter_change_update_delay must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("filter_delay_value",filter_change_update_delay.filter_change_update_delay, yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name="filter-change-update-delay", rest_name="filter-change-update-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""", }) self.__filter_change_update_delay = t if hasattr(self, '_set'): self._set()
[ "def", "_set_filter_change_update_delay", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", "=", "YANGListType", "(", "\"filter_delay_value\"", ",", "filter_change_update_delay", ".", "filter_change_update_delay", ",", "yang_name", "=", "\"filter-change-update-delay\"", ",", "rest_name", "=", "\"filter-change-update-delay\"", ",", "parent", "=", "self", ",", "is_container", "=", "'list'", ",", "user_ordered", "=", "False", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "yang_keys", "=", "'filter-delay-value'", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Change filter change update delay timer'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'sort-priority'", ":", "u'57'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'filterChangeUpdateDelay'", "}", "}", ")", ",", "is_container", "=", "'list'", ",", "yang_name", "=", "\"filter-change-update-delay\"", ",", "rest_name", "=", "\"filter-change-update-delay\"", ",", "parent", "=", "self", ",", "path_helper", "=", "self", ".", "_path_helper", ",", "extmethods", "=", "self", ".", "_extmethods", ",", "register_paths", "=", "True", ",", "extensions", "=", "{", "u'tailf-common'", ":", "{", "u'info'", ":", "u'Change filter change update delay timer'", ",", "u'cli-no-key-completion'", ":", "None", ",", "u'cli-suppress-mode'", ":", "None", ",", "u'sort-priority'", ":", "u'57'", ",", "u'cli-suppress-key-abbreviation'", ":", "None", ",", "u'callpoint'", ":", "u'filterChangeUpdateDelay'", "}", "}", ",", "namespace", "=", "'urn:brocade.com:mgmt:brocade-ip-policy'", ",", "defining_module", "=", "'brocade-ip-policy'", ",", "yang_type", "=", "'list'", ",", "is_config", "=", "True", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "{", "'error-string'", ":", "\"\"\"filter_change_update_delay must be of a type compatible with list\"\"\"", ",", "'defined-type'", ":", "\"list\"", ",", "'generated-type'", ":", "\"\"\"YANGDynClass(base=YANGListType(\"filter_delay_value\",filter_change_update_delay.filter_change_update_delay, yang_name=\"filter-change-update-delay\", rest_name=\"filter-change-update-delay\", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='filter-delay-value', extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}), is_container='list', yang_name=\"filter-change-update-delay\", rest_name=\"filter-change-update-delay\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Change filter change update delay timer', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'sort-priority': u'57', u'cli-suppress-key-abbreviation': None, u'callpoint': u'filterChangeUpdateDelay'}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)\"\"\"", ",", "}", ")", "self", ".", "__filter_change_update_delay", "=", "t", "if", "hasattr", "(", "self", ",", "'_set'", ")", ":", "self", ".", "_set", "(", ")" ]
Setter method for filter_change_update_delay, mapped from YANG variable /rbridge_id/filter_change_update_delay (list) If this variable is read-only (config: false) in the source YANG file, then _set_filter_change_update_delay is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_filter_change_update_delay() directly. YANG Description: Change filter change update delay timer
[ "Setter", "method", "for", "filter_change_update_delay", "mapped", "from", "YANG", "variable", "/", "rbridge_id", "/", "filter_change_update_delay", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", "then", "_set_filter_change_update_delay", "is", "considered", "as", "a", "private", "method", ".", "Backends", "looking", "to", "populate", "this", "variable", "should", "do", "so", "via", "calling", "thisObj", ".", "_set_filter_change_update_delay", "()", "directly", "." ]
python
train
132.125
worldcompany/djangoembed
oembed/providers.py
https://github.com/worldcompany/djangoembed/blob/f3f2be283441d91d1f89db780444dc75f7b51902/oembed/providers.py#L342-L376
def get_params(self, url): """ Extract the named parameters from a url regex. If the url regex does not contain named parameters, they will be keyed _0, _1, ... * Named parameters Regex: /photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/ URL: http://www2.ljworld.com/photos/2009/oct/11/12345/ Return Value: {u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'} * Unnamed parameters Regex: /blah/([\w-]+)/(\d+)/ URL: http://www.example.com/blah/hello/123/ Return Value: {u'_0': 'hello', u'_1': '123'} """ match = re.match(self.regex, url) if match is not None: params = match.groupdict() if not params: params = {} for i, group in enumerate(match.groups()[1:]): params['_%s' % i] = group return params raise OEmbedException('No regex matched the url %s' % (url))
[ "def", "get_params", "(", "self", ",", "url", ")", ":", "match", "=", "re", ".", "match", "(", "self", ".", "regex", ",", "url", ")", "if", "match", "is", "not", "None", ":", "params", "=", "match", ".", "groupdict", "(", ")", "if", "not", "params", ":", "params", "=", "{", "}", "for", "i", ",", "group", "in", "enumerate", "(", "match", ".", "groups", "(", ")", "[", "1", ":", "]", ")", ":", "params", "[", "'_%s'", "%", "i", "]", "=", "group", "return", "params", "raise", "OEmbedException", "(", "'No regex matched the url %s'", "%", "(", "url", ")", ")" ]
Extract the named parameters from a url regex. If the url regex does not contain named parameters, they will be keyed _0, _1, ... * Named parameters Regex: /photos/^(?P<year>\d{4})/(?P<month>\w{3})/(?P<day>\d{1,2})/(?P<object_id>\d+)/ URL: http://www2.ljworld.com/photos/2009/oct/11/12345/ Return Value: {u'day': '11', u'month': 'oct', u'object_id': '12345', u'year': '2009'} * Unnamed parameters Regex: /blah/([\w-]+)/(\d+)/ URL: http://www.example.com/blah/hello/123/ Return Value: {u'_0': 'hello', u'_1': '123'}
[ "Extract", "the", "named", "parameters", "from", "a", "url", "regex", ".", "If", "the", "url", "regex", "does", "not", "contain", "named", "parameters", "they", "will", "be", "keyed", "_0", "_1", "...", "*", "Named", "parameters", "Regex", ":", "/", "photos", "/", "^", "(", "?P<year", ">", "\\", "d", "{", "4", "}", ")", "/", "(", "?P<month", ">", "\\", "w", "{", "3", "}", ")", "/", "(", "?P<day", ">", "\\", "d", "{", "1", "2", "}", ")", "/", "(", "?P<object_id", ">", "\\", "d", "+", ")", "/", "URL", ":", "http", ":", "//", "www2", ".", "ljworld", ".", "com", "/", "photos", "/", "2009", "/", "oct", "/", "11", "/", "12345", "/", "Return", "Value", ":", "{", "u", "day", ":", "11", "u", "month", ":", "oct", "u", "object_id", ":", "12345", "u", "year", ":", "2009", "}", "*", "Unnamed", "parameters", "Regex", ":", "/", "blah", "/", "(", "[", "\\", "w", "-", "]", "+", ")", "/", "(", "\\", "d", "+", ")", "/", "URL", ":", "http", ":", "//", "www", ".", "example", ".", "com", "/", "blah", "/", "hello", "/", "123", "/", "Return", "Value", ":", "{", "u", "_0", ":", "hello", "u", "_1", ":", "123", "}" ]
python
valid
31.228571
SALib/SALib
src/SALib/sample/morris/__init__.py
https://github.com/SALib/SALib/blob/9744d73bb17cfcffc8282c7dc4a727efdc4bea3f/src/SALib/sample/morris/__init__.py#L53-L103
def sample(problem, N, num_levels=4, optimal_trajectories=None, local_optimization=True): """Generate model inputs using the Method of Morris Returns a NumPy matrix containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D` columns, where :math:`D` is the number of parameters, :math:`G` is the number of groups (if no groups are selected, the number of parameters). :math:`T` is the number of trajectories :math:`N`, or `optimal_trajectories` if selected. These model inputs are intended to be used with :func:`SALib.analyze.morris.analyze`. Parameters ---------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels optimal_trajectories : int The number of optimal trajectories to sample (between 2 and N) local_optimization : bool, default=True Flag whether to use local optimization according to Ruano et al. (2012) Speeds up the process tremendously for bigger N and num_levels. If set to ``False`` brute force method is used, unless ``gurobipy`` is available Returns ------- sample : numpy.ndarray Returns a numpy.ndarray containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and :math:`D` columns, where :math:`D` is the number of parameters. """ if problem.get('groups'): sample = _sample_groups(problem, N, num_levels) else: sample = _sample_oat(problem, N, num_levels) if optimal_trajectories: sample = _compute_optimised_trajectories(problem, sample, N, optimal_trajectories, local_optimization) scale_samples(sample, problem['bounds']) return sample
[ "def", "sample", "(", "problem", ",", "N", ",", "num_levels", "=", "4", ",", "optimal_trajectories", "=", "None", ",", "local_optimization", "=", "True", ")", ":", "if", "problem", ".", "get", "(", "'groups'", ")", ":", "sample", "=", "_sample_groups", "(", "problem", ",", "N", ",", "num_levels", ")", "else", ":", "sample", "=", "_sample_oat", "(", "problem", ",", "N", ",", "num_levels", ")", "if", "optimal_trajectories", ":", "sample", "=", "_compute_optimised_trajectories", "(", "problem", ",", "sample", ",", "N", ",", "optimal_trajectories", ",", "local_optimization", ")", "scale_samples", "(", "sample", ",", "problem", "[", "'bounds'", "]", ")", "return", "sample" ]
Generate model inputs using the Method of Morris Returns a NumPy matrix containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G+1)*T` rows and :math:`D` columns, where :math:`D` is the number of parameters, :math:`G` is the number of groups (if no groups are selected, the number of parameters). :math:`T` is the number of trajectories :math:`N`, or `optimal_trajectories` if selected. These model inputs are intended to be used with :func:`SALib.analyze.morris.analyze`. Parameters ---------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels optimal_trajectories : int The number of optimal trajectories to sample (between 2 and N) local_optimization : bool, default=True Flag whether to use local optimization according to Ruano et al. (2012) Speeds up the process tremendously for bigger N and num_levels. If set to ``False`` brute force method is used, unless ``gurobipy`` is available Returns ------- sample : numpy.ndarray Returns a numpy.ndarray containing the model inputs required for Method of Morris. The resulting matrix has :math:`(G/D+1)*N/T` rows and :math:`D` columns, where :math:`D` is the number of parameters.
[ "Generate", "model", "inputs", "using", "the", "Method", "of", "Morris" ]
python
train
39.980392
phaethon/kamene
kamene/contrib/gsm_um.py
https://github.com/phaethon/kamene/blob/11d4064844f4f68ac5d7546f5633ac7d02082914/kamene/contrib/gsm_um.py#L750-L766
def immediateAssignmentExtended(StartingTime_presence=0): """IMMEDIATE ASSIGNMENT EXTENDED Section 9.1.19""" a = L2PseudoLength() b = TpPd(pd=0x6) c = MessageType(mesType=0x39) # 00111001 d = PageModeAndSpareHalfOctets() f = ChannelDescription() g = RequestReference() h = TimingAdvance() i = MobileAllocation() packet = a / b / c / d / f / g / h / i if StartingTime_presence is 1: j = StartingTimeHdr(ieiST=0x7C, eightBitST=0x0) packet = packet / j k = IaxRestOctets() packet = packet / k return packet
[ "def", "immediateAssignmentExtended", "(", "StartingTime_presence", "=", "0", ")", ":", "a", "=", "L2PseudoLength", "(", ")", "b", "=", "TpPd", "(", "pd", "=", "0x6", ")", "c", "=", "MessageType", "(", "mesType", "=", "0x39", ")", "# 00111001", "d", "=", "PageModeAndSpareHalfOctets", "(", ")", "f", "=", "ChannelDescription", "(", ")", "g", "=", "RequestReference", "(", ")", "h", "=", "TimingAdvance", "(", ")", "i", "=", "MobileAllocation", "(", ")", "packet", "=", "a", "/", "b", "/", "c", "/", "d", "/", "f", "/", "g", "/", "h", "/", "i", "if", "StartingTime_presence", "is", "1", ":", "j", "=", "StartingTimeHdr", "(", "ieiST", "=", "0x7C", ",", "eightBitST", "=", "0x0", ")", "packet", "=", "packet", "/", "j", "k", "=", "IaxRestOctets", "(", ")", "packet", "=", "packet", "/", "k", "return", "packet" ]
IMMEDIATE ASSIGNMENT EXTENDED Section 9.1.19
[ "IMMEDIATE", "ASSIGNMENT", "EXTENDED", "Section", "9", ".", "1", ".", "19" ]
python
train
32.941176
nickfrostatx/flask-hookserver
flask_hookserver.py
https://github.com/nickfrostatx/flask-hookserver/blob/fb5c226473f54e3469234403ec56a354374d2c41/flask_hookserver.py#L132-L156
def _load_github_hooks(github_url='https://api.github.com'): """Request GitHub's IP block from their API. Return the IP network. If we detect a rate-limit error, raise an error message stating when the rate limit will reset. If something else goes wrong, raise a generic 503. """ try: resp = requests.get(github_url + '/meta') if resp.status_code == 200: return resp.json()['hooks'] else: if resp.headers.get('X-RateLimit-Remaining') == '0': reset_ts = int(resp.headers['X-RateLimit-Reset']) reset_string = time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime(reset_ts)) raise ServiceUnavailable('Rate limited from GitHub until ' + reset_string) else: raise ServiceUnavailable('Error reaching GitHub') except (KeyError, ValueError, requests.exceptions.ConnectionError): raise ServiceUnavailable('Error reaching GitHub')
[ "def", "_load_github_hooks", "(", "github_url", "=", "'https://api.github.com'", ")", ":", "try", ":", "resp", "=", "requests", ".", "get", "(", "github_url", "+", "'/meta'", ")", "if", "resp", ".", "status_code", "==", "200", ":", "return", "resp", ".", "json", "(", ")", "[", "'hooks'", "]", "else", ":", "if", "resp", ".", "headers", ".", "get", "(", "'X-RateLimit-Remaining'", ")", "==", "'0'", ":", "reset_ts", "=", "int", "(", "resp", ".", "headers", "[", "'X-RateLimit-Reset'", "]", ")", "reset_string", "=", "time", ".", "strftime", "(", "'%a, %d %b %Y %H:%M:%S GMT'", ",", "time", ".", "gmtime", "(", "reset_ts", ")", ")", "raise", "ServiceUnavailable", "(", "'Rate limited from GitHub until '", "+", "reset_string", ")", "else", ":", "raise", "ServiceUnavailable", "(", "'Error reaching GitHub'", ")", "except", "(", "KeyError", ",", "ValueError", ",", "requests", ".", "exceptions", ".", "ConnectionError", ")", ":", "raise", "ServiceUnavailable", "(", "'Error reaching GitHub'", ")" ]
Request GitHub's IP block from their API. Return the IP network. If we detect a rate-limit error, raise an error message stating when the rate limit will reset. If something else goes wrong, raise a generic 503.
[ "Request", "GitHub", "s", "IP", "block", "from", "their", "API", "." ]
python
train
42
ael-code/pyFsdb
fsdb/hashtools.py
https://github.com/ael-code/pyFsdb/blob/de33a0d41373307cb32cdd7ba1991b85ff495ee3/fsdb/hashtools.py#L17-L35
def calc_digest(origin, algorithm="sha1", block_size=None): """Calculate digest of a readable object Args: origin -- a readable object for which calculate digest algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms. block_size -- the size of the block to read at each iteration """ try: hashM = hashlib.new(algorithm) except ValueError: raise ValueError('hash algorithm not supported by the underlying platform: "{0}"'.format(algorithm)) while True: chunk = origin.read(block_size) if block_size else origin.read() if not chunk: break hashM.update(chunk) return hashM.hexdigest()
[ "def", "calc_digest", "(", "origin", ",", "algorithm", "=", "\"sha1\"", ",", "block_size", "=", "None", ")", ":", "try", ":", "hashM", "=", "hashlib", ".", "new", "(", "algorithm", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'hash algorithm not supported by the underlying platform: \"{0}\"'", ".", "format", "(", "algorithm", ")", ")", "while", "True", ":", "chunk", "=", "origin", ".", "read", "(", "block_size", ")", "if", "block_size", "else", "origin", ".", "read", "(", ")", "if", "not", "chunk", ":", "break", "hashM", ".", "update", "(", "chunk", ")", "return", "hashM", ".", "hexdigest", "(", ")" ]
Calculate digest of a readable object Args: origin -- a readable object for which calculate digest algorithn -- the algorithm to use. See ``hashlib.algorithms_available`` for supported algorithms. block_size -- the size of the block to read at each iteration
[ "Calculate", "digest", "of", "a", "readable", "object" ]
python
train
37.473684
chrisjsewell/jsonextended
jsonextended/edict.py
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L1452-L1508
def apply(d, leaf_key, func, new_name=None, remove_lkey=True, list_of_dicts=False, unflatten_level=0, deepcopy=True, **kwargs): """ apply a function to all values with a certain leaf (terminal) key Parameters ---------- d : dict leaf_key : str name of leaf key func : callable function to apply new_name : str if not None, rename leaf_key remove_lkey: bool whether to remove original leaf_key (if new_name is not None) list_of_dicts: bool treat list of dicts as additional branches unflatten_level : int or None the number of levels to leave unflattened before combining, for instance if you need dicts as inputs deepcopy: bool deepcopy values kwargs : dict additional keywords to parse to function Examples -------- >>> from pprint import pprint >>> d = {'a':1,'b':1} >>> func = lambda x: x+1 >>> pprint(apply(d,'a',func)) {'a': 2, 'b': 1} >>> pprint(apply(d,'a',func,new_name='c')) {'b': 1, 'c': 2} >>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False)) {'a': 1, 'b': 1, 'c': 2} >>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]} >>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2)) {'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]} """ # noqa: E501 list_of_dicts = '__list__' if list_of_dicts else None if unflatten_level == 0: flatd = flatten(d, list_of_dicts=list_of_dicts) else: flatd = flattennd(d, unflatten_level, list_of_dicts=list_of_dicts) newd = {k: (func(v, **kwargs) if k[-1] == leaf_key else v) for k, v in flatd.items()} if new_name is not None: newd = {(tuple(list(k[:-1]) + [new_name]) if k[-1] == leaf_key else k): v for k, v in newd.items()} if not remove_lkey: newd.update(flatd) return unflatten(newd, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
[ "def", "apply", "(", "d", ",", "leaf_key", ",", "func", ",", "new_name", "=", "None", ",", "remove_lkey", "=", "True", ",", "list_of_dicts", "=", "False", ",", "unflatten_level", "=", "0", ",", "deepcopy", "=", "True", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "list_of_dicts", "=", "'__list__'", "if", "list_of_dicts", "else", "None", "if", "unflatten_level", "==", "0", ":", "flatd", "=", "flatten", "(", "d", ",", "list_of_dicts", "=", "list_of_dicts", ")", "else", ":", "flatd", "=", "flattennd", "(", "d", ",", "unflatten_level", ",", "list_of_dicts", "=", "list_of_dicts", ")", "newd", "=", "{", "k", ":", "(", "func", "(", "v", ",", "*", "*", "kwargs", ")", "if", "k", "[", "-", "1", "]", "==", "leaf_key", "else", "v", ")", "for", "k", ",", "v", "in", "flatd", ".", "items", "(", ")", "}", "if", "new_name", "is", "not", "None", ":", "newd", "=", "{", "(", "tuple", "(", "list", "(", "k", "[", ":", "-", "1", "]", ")", "+", "[", "new_name", "]", ")", "if", "k", "[", "-", "1", "]", "==", "leaf_key", "else", "k", ")", ":", "v", "for", "k", ",", "v", "in", "newd", ".", "items", "(", ")", "}", "if", "not", "remove_lkey", ":", "newd", ".", "update", "(", "flatd", ")", "return", "unflatten", "(", "newd", ",", "list_of_dicts", "=", "list_of_dicts", ",", "deepcopy", "=", "deepcopy", ")" ]
apply a function to all values with a certain leaf (terminal) key Parameters ---------- d : dict leaf_key : str name of leaf key func : callable function to apply new_name : str if not None, rename leaf_key remove_lkey: bool whether to remove original leaf_key (if new_name is not None) list_of_dicts: bool treat list of dicts as additional branches unflatten_level : int or None the number of levels to leave unflattened before combining, for instance if you need dicts as inputs deepcopy: bool deepcopy values kwargs : dict additional keywords to parse to function Examples -------- >>> from pprint import pprint >>> d = {'a':1,'b':1} >>> func = lambda x: x+1 >>> pprint(apply(d,'a',func)) {'a': 2, 'b': 1} >>> pprint(apply(d,'a',func,new_name='c')) {'b': 1, 'c': 2} >>> pprint(apply(d,'a',func,new_name='c', remove_lkey=False)) {'a': 1, 'b': 1, 'c': 2} >>> test_dict = {"a":[{"b":[{"c":1, "d": 2}, {"e":3, "f": 4}]}, {"b":[{"c":5, "d": 6}, {"e":7, "f": 8}]}]} >>> pprint(apply(test_dict, "b", lambda x: x[-1], list_of_dicts=True, unflatten_level=2)) {'a': [{'b': {'e': 3, 'f': 4}}, {'b': {'e': 7, 'f': 8}}]}
[ "apply", "a", "function", "to", "all", "values", "with", "a", "certain", "leaf", "(", "terminal", ")", "key" ]
python
train
35.403509
peerplays-network/python-peerplays
peerplays/peerplays.py
https://github.com/peerplays-network/python-peerplays/blob/188f04238e7e21d5f73e9b01099eea44289ef6b7/peerplays/peerplays.py#L439-L520
def disallow( self, foreign, permission="active", account=None, threshold=None, **kwargs ): """ Remove additional access to an account by some other public key or account. :param str foreign: The foreign account that will obtain access :param str permission: (optional) The actual permission to modify (defaults to ``active``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) :param int threshold: The threshold that needs to be reached by signatures to be able to interact """ if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") if permission not in ["owner", "active"]: raise ValueError("Permission needs to be either 'owner', or 'active") account = Account(account, blockchain_instance=self) authority = account[permission] try: pubkey = PublicKey(foreign, prefix=self.prefix) affected_items = list( filter(lambda x: x[0] == str(pubkey), authority["key_auths"]) ) authority["key_auths"] = list( filter(lambda x: x[0] != str(pubkey), authority["key_auths"]) ) except: try: foreign_account = Account(foreign, blockchain_instance=self) affected_items = list( filter( lambda x: x[0] == foreign_account["id"], authority["account_auths"], ) ) authority["account_auths"] = list( filter( lambda x: x[0] != foreign_account["id"], authority["account_auths"], ) ) except: raise ValueError("Unknown foreign account or unvalid public key") if not affected_items: raise ValueError("Changes nothing!") removed_weight = affected_items[0][1] # Define threshold if threshold: authority["weight_threshold"] = threshold # Correct threshold (at most by the amount removed from the # authority) try: self._test_weights_treshold(authority) except: log.critical( "The account's threshold will be reduced by %d" % (removed_weight) ) authority["weight_threshold"] -= removed_weight self._test_weights_treshold(authority) op = operations.Account_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "account": account["id"], permission: authority, "extensions": {}, } ) if permission == "owner": return self.finalizeOp(op, account["name"], "owner", **kwargs) else: return self.finalizeOp(op, account["name"], "active", **kwargs)
[ "def", "disallow", "(", "self", ",", "foreign", ",", "permission", "=", "\"active\"", ",", "account", "=", "None", ",", "threshold", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "account", ":", "if", "\"default_account\"", "in", "self", ".", "config", ":", "account", "=", "self", ".", "config", "[", "\"default_account\"", "]", "if", "not", "account", ":", "raise", "ValueError", "(", "\"You need to provide an account\"", ")", "if", "permission", "not", "in", "[", "\"owner\"", ",", "\"active\"", "]", ":", "raise", "ValueError", "(", "\"Permission needs to be either 'owner', or 'active\"", ")", "account", "=", "Account", "(", "account", ",", "blockchain_instance", "=", "self", ")", "authority", "=", "account", "[", "permission", "]", "try", ":", "pubkey", "=", "PublicKey", "(", "foreign", ",", "prefix", "=", "self", ".", "prefix", ")", "affected_items", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "==", "str", "(", "pubkey", ")", ",", "authority", "[", "\"key_auths\"", "]", ")", ")", "authority", "[", "\"key_auths\"", "]", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "!=", "str", "(", "pubkey", ")", ",", "authority", "[", "\"key_auths\"", "]", ")", ")", "except", ":", "try", ":", "foreign_account", "=", "Account", "(", "foreign", ",", "blockchain_instance", "=", "self", ")", "affected_items", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "==", "foreign_account", "[", "\"id\"", "]", ",", "authority", "[", "\"account_auths\"", "]", ",", ")", ")", "authority", "[", "\"account_auths\"", "]", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", "[", "0", "]", "!=", "foreign_account", "[", "\"id\"", "]", ",", "authority", "[", "\"account_auths\"", "]", ",", ")", ")", "except", ":", "raise", "ValueError", "(", "\"Unknown foreign account or unvalid public key\"", ")", "if", "not", "affected_items", ":", "raise", "ValueError", "(", "\"Changes nothing!\"", ")", "removed_weight", "=", "affected_items", "[", "0", "]", "[", "1", "]", "# Define threshold", "if", "threshold", ":", "authority", "[", "\"weight_threshold\"", "]", "=", "threshold", "# Correct threshold (at most by the amount removed from the", "# authority)", "try", ":", "self", ".", "_test_weights_treshold", "(", "authority", ")", "except", ":", "log", ".", "critical", "(", "\"The account's threshold will be reduced by %d\"", "%", "(", "removed_weight", ")", ")", "authority", "[", "\"weight_threshold\"", "]", "-=", "removed_weight", "self", ".", "_test_weights_treshold", "(", "authority", ")", "op", "=", "operations", ".", "Account_update", "(", "*", "*", "{", "\"fee\"", ":", "{", "\"amount\"", ":", "0", ",", "\"asset_id\"", ":", "\"1.3.0\"", "}", ",", "\"account\"", ":", "account", "[", "\"id\"", "]", ",", "permission", ":", "authority", ",", "\"extensions\"", ":", "{", "}", ",", "}", ")", "if", "permission", "==", "\"owner\"", ":", "return", "self", ".", "finalizeOp", "(", "op", ",", "account", "[", "\"name\"", "]", ",", "\"owner\"", ",", "*", "*", "kwargs", ")", "else", ":", "return", "self", ".", "finalizeOp", "(", "op", ",", "account", "[", "\"name\"", "]", ",", "\"active\"", ",", "*", "*", "kwargs", ")" ]
Remove additional access to an account by some other public key or account. :param str foreign: The foreign account that will obtain access :param str permission: (optional) The actual permission to modify (defaults to ``active``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) :param int threshold: The threshold that needs to be reached by signatures to be able to interact
[ "Remove", "additional", "access", "to", "an", "account", "by", "some", "other", "public", "key", "or", "account", "." ]
python
train
38.146341
koalalorenzo/python-digitalocean
digitalocean/Droplet.py
https://github.com/koalalorenzo/python-digitalocean/blob/d0221b57856fb1e131cafecf99d826f7b07a947c/digitalocean/Droplet.py#L204-L230
def _perform_action(self, params, return_dict=True): """ Perform a droplet action. Args: params (dict): parameters of the action Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action """ action = self.get_data( "droplets/%s/actions/" % self.id, type=POST, params=params ) if return_dict: return action else: action = action[u'action'] return_action = Action(token=self.token) # Loading attributes for attr in action.keys(): setattr(return_action, attr, action[attr]) return return_action
[ "def", "_perform_action", "(", "self", ",", "params", ",", "return_dict", "=", "True", ")", ":", "action", "=", "self", ".", "get_data", "(", "\"droplets/%s/actions/\"", "%", "self", ".", "id", ",", "type", "=", "POST", ",", "params", "=", "params", ")", "if", "return_dict", ":", "return", "action", "else", ":", "action", "=", "action", "[", "u'action'", "]", "return_action", "=", "Action", "(", "token", "=", "self", ".", "token", ")", "# Loading attributes", "for", "attr", "in", "action", ".", "keys", "(", ")", ":", "setattr", "(", "return_action", ",", "attr", ",", "action", "[", "attr", "]", ")", "return", "return_action" ]
Perform a droplet action. Args: params (dict): parameters of the action Optional Args: return_dict (bool): Return a dict when True (default), otherwise return an Action. Returns dict or Action
[ "Perform", "a", "droplet", "action", "." ]
python
valid
29.740741
pyviz/holoviews
holoviews/plotting/util.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/util.py#L909-L955
def color_intervals(colors, levels, clip=None, N=255): """ Maps the supplied colors into bins defined by the supplied levels. If a clip tuple is defined the bins are clipped to the defined range otherwise the range is computed from the levels and returned. Arguments --------- colors: list List of colors (usually hex string or named colors) levels: list or array_like Levels specifying the bins to map the colors to clip: tuple (optional) Lower and upper limits of the color range N: int Number of discrete colors to map the range onto Returns ------- cmap: list List of colors clip: tuple Lower and upper bounds of the color range """ if len(colors) != len(levels)-1: raise ValueError('The number of colors in the colormap ' 'must match the intervals defined in the ' 'color_levels, expected %d colors found %d.' % (N, len(colors))) intervals = np.diff(levels) cmin, cmax = min(levels), max(levels) interval = cmax-cmin cmap = [] for intv, c in zip(intervals, colors): cmap += [c]*int(round(N*(intv/interval))) if clip is not None: clmin, clmax = clip lidx = int(round(N*((clmin-cmin)/interval))) uidx = int(round(N*((cmax-clmax)/interval))) uidx = N-uidx if lidx == uidx: uidx = lidx+1 cmap = cmap[lidx:uidx] if clmin == clmax: idx = np.argmin(np.abs(np.array(levels)-clmin)) clip = levels[idx: idx+2] if len(levels) > idx+2 else levels[idx-1: idx+1] return cmap, clip
[ "def", "color_intervals", "(", "colors", ",", "levels", ",", "clip", "=", "None", ",", "N", "=", "255", ")", ":", "if", "len", "(", "colors", ")", "!=", "len", "(", "levels", ")", "-", "1", ":", "raise", "ValueError", "(", "'The number of colors in the colormap '", "'must match the intervals defined in the '", "'color_levels, expected %d colors found %d.'", "%", "(", "N", ",", "len", "(", "colors", ")", ")", ")", "intervals", "=", "np", ".", "diff", "(", "levels", ")", "cmin", ",", "cmax", "=", "min", "(", "levels", ")", ",", "max", "(", "levels", ")", "interval", "=", "cmax", "-", "cmin", "cmap", "=", "[", "]", "for", "intv", ",", "c", "in", "zip", "(", "intervals", ",", "colors", ")", ":", "cmap", "+=", "[", "c", "]", "*", "int", "(", "round", "(", "N", "*", "(", "intv", "/", "interval", ")", ")", ")", "if", "clip", "is", "not", "None", ":", "clmin", ",", "clmax", "=", "clip", "lidx", "=", "int", "(", "round", "(", "N", "*", "(", "(", "clmin", "-", "cmin", ")", "/", "interval", ")", ")", ")", "uidx", "=", "int", "(", "round", "(", "N", "*", "(", "(", "cmax", "-", "clmax", ")", "/", "interval", ")", ")", ")", "uidx", "=", "N", "-", "uidx", "if", "lidx", "==", "uidx", ":", "uidx", "=", "lidx", "+", "1", "cmap", "=", "cmap", "[", "lidx", ":", "uidx", "]", "if", "clmin", "==", "clmax", ":", "idx", "=", "np", ".", "argmin", "(", "np", ".", "abs", "(", "np", ".", "array", "(", "levels", ")", "-", "clmin", ")", ")", "clip", "=", "levels", "[", "idx", ":", "idx", "+", "2", "]", "if", "len", "(", "levels", ")", ">", "idx", "+", "2", "else", "levels", "[", "idx", "-", "1", ":", "idx", "+", "1", "]", "return", "cmap", ",", "clip" ]
Maps the supplied colors into bins defined by the supplied levels. If a clip tuple is defined the bins are clipped to the defined range otherwise the range is computed from the levels and returned. Arguments --------- colors: list List of colors (usually hex string or named colors) levels: list or array_like Levels specifying the bins to map the colors to clip: tuple (optional) Lower and upper limits of the color range N: int Number of discrete colors to map the range onto Returns ------- cmap: list List of colors clip: tuple Lower and upper bounds of the color range
[ "Maps", "the", "supplied", "colors", "into", "bins", "defined", "by", "the", "supplied", "levels", ".", "If", "a", "clip", "tuple", "is", "defined", "the", "bins", "are", "clipped", "to", "the", "defined", "range", "otherwise", "the", "range", "is", "computed", "from", "the", "levels", "and", "returned", "." ]
python
train
34.851064
readbeyond/aeneas
aeneas/audiofilemfcc.py
https://github.com/readbeyond/aeneas/blob/9d95535ad63eef4a98530cfdff033b8c35315ee1/aeneas/audiofilemfcc.py#L452-L460
def middle_begin(self, index): """ Set the index where MIDDLE starts. :param int index: the new index for MIDDLE begin """ if (index < 0) or (index > self.all_length): raise ValueError(u"The given index is not valid") self.__middle_begin = index
[ "def", "middle_begin", "(", "self", ",", "index", ")", ":", "if", "(", "index", "<", "0", ")", "or", "(", "index", ">", "self", ".", "all_length", ")", ":", "raise", "ValueError", "(", "u\"The given index is not valid\"", ")", "self", ".", "__middle_begin", "=", "index" ]
Set the index where MIDDLE starts. :param int index: the new index for MIDDLE begin
[ "Set", "the", "index", "where", "MIDDLE", "starts", "." ]
python
train
33.111111
usc-isi-i2/etk
etk/data_extractors/htiExtractors/misc.py
https://github.com/usc-isi-i2/etk/blob/aab077c984ea20f5e8ae33af622fe11d3c4df866/etk/data_extractors/htiExtractors/misc.py#L26-L89
def phone_text_subs(): """ Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual Number value. Returns: dictionary of dictionaries containing Strings mapped to Numbers """ Small = { 'zero': 0, 'zer0': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'fuor': 4, 'five': 5, 'fith': 5, 'six': 6, 'seven': 7, 'sven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12, 'thirteen': 13, 'fourteen': 14, 'fifteen': 15, 'sixteen': 16, 'seventeen': 17, 'eighteen': 18, 'nineteen': 19, 'twenty': 20, 'thirty': 30, 'forty': 40, 'fifty': 50, 'sixty': 60, 'seventy': 70, 'eighty': 80, 'ninety': 90, 'oh': 0 } Magnitude = { 'thousand': 000, 'million': 000000, } Others = { '!': 1, 'o': 0, 'l': 1, 'i': 1 } output = {} output['Small'] = Small output['Magnitude'] = Magnitude output['Others'] = Others return output
[ "def", "phone_text_subs", "(", ")", ":", "Small", "=", "{", "'zero'", ":", "0", ",", "'zer0'", ":", "0", ",", "'one'", ":", "1", ",", "'two'", ":", "2", ",", "'three'", ":", "3", ",", "'four'", ":", "4", ",", "'fuor'", ":", "4", ",", "'five'", ":", "5", ",", "'fith'", ":", "5", ",", "'six'", ":", "6", ",", "'seven'", ":", "7", ",", "'sven'", ":", "7", ",", "'eight'", ":", "8", ",", "'nine'", ":", "9", ",", "'ten'", ":", "10", ",", "'eleven'", ":", "11", ",", "'twelve'", ":", "12", ",", "'thirteen'", ":", "13", ",", "'fourteen'", ":", "14", ",", "'fifteen'", ":", "15", ",", "'sixteen'", ":", "16", ",", "'seventeen'", ":", "17", ",", "'eighteen'", ":", "18", ",", "'nineteen'", ":", "19", ",", "'twenty'", ":", "20", ",", "'thirty'", ":", "30", ",", "'forty'", ":", "40", ",", "'fifty'", ":", "50", ",", "'sixty'", ":", "60", ",", "'seventy'", ":", "70", ",", "'eighty'", ":", "80", ",", "'ninety'", ":", "90", ",", "'oh'", ":", "0", "}", "Magnitude", "=", "{", "'thousand'", ":", "000", ",", "'million'", ":", "000000", ",", "}", "Others", "=", "{", "'!'", ":", "1", ",", "'o'", ":", "0", ",", "'l'", ":", "1", ",", "'i'", ":", "1", "}", "output", "=", "{", "}", "output", "[", "'Small'", "]", "=", "Small", "output", "[", "'Magnitude'", "]", "=", "Magnitude", "output", "[", "'Others'", "]", "=", "Others", "return", "output" ]
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual Number value. Returns: dictionary of dictionaries containing Strings mapped to Numbers
[ "Gets", "a", "dictionary", "of", "dictionaries", "that", "each", "contain", "alphabetic", "number", "manifestations", "mapped", "to", "their", "actual", "Number", "value", "." ]
python
train
15.609375
googleapis/google-cloud-python
core/google/cloud/operation.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/core/google/cloud/operation.py#L169-L188
def from_dict(cls, operation, client, **caller_metadata): """Factory: construct an instance from a dictionary. :type operation: dict :param operation: Operation as a JSON object. :type client: :class:`~google.cloud.client.Client` :param client: The client used to poll for the status of the operation. :type caller_metadata: dict :param caller_metadata: caller-assigned metadata about the operation :rtype: :class:`Operation` :returns: new instance, with attributes based on the protobuf. """ operation_pb = json_format.ParseDict(operation, operations_pb2.Operation()) result = cls(operation_pb.name, client, **caller_metadata) result._update_state(operation_pb) result._from_grpc = False return result
[ "def", "from_dict", "(", "cls", ",", "operation", ",", "client", ",", "*", "*", "caller_metadata", ")", ":", "operation_pb", "=", "json_format", ".", "ParseDict", "(", "operation", ",", "operations_pb2", ".", "Operation", "(", ")", ")", "result", "=", "cls", "(", "operation_pb", ".", "name", ",", "client", ",", "*", "*", "caller_metadata", ")", "result", ".", "_update_state", "(", "operation_pb", ")", "result", ".", "_from_grpc", "=", "False", "return", "result" ]
Factory: construct an instance from a dictionary. :type operation: dict :param operation: Operation as a JSON object. :type client: :class:`~google.cloud.client.Client` :param client: The client used to poll for the status of the operation. :type caller_metadata: dict :param caller_metadata: caller-assigned metadata about the operation :rtype: :class:`Operation` :returns: new instance, with attributes based on the protobuf.
[ "Factory", ":", "construct", "an", "instance", "from", "a", "dictionary", "." ]
python
train
40.35
sternoru/goscalecms
goscale/models.py
https://github.com/sternoru/goscalecms/blob/7eee50357c47ebdfe3e573a8b4be3b67892d229e/goscale/models.py#L211-L235
def update(self): """This method should be called to update associated Posts It will call content-specific methods: _get_data() to obtain list of entries _store_post() to store obtained entry object _get_data_source_url() to get an URL to identify Posts from this Data Source """ #get the raw data # self.posts.all().delete() # TODO: handle in update_posts if source changes without deleting every time data = self._get_data() #iterate through them and for each item msg = [] for entry in data: link = self._get_entry_link(entry) stored_entry, is_new = Post.objects.get_or_create(link=link) self._store_post(stored_entry, entry) if is_new is True: #self._set_dates(stored_entry) # self._store_post(stored_entry, entry) msg.append('Post "%s" added.' % stored_entry.link) else: msg.append('Post "%s" already saved.' % stored_entry.link) self.updated = utils.get_datetime_now() self.save(no_signals=True) return '<br />'.join(msg)
[ "def", "update", "(", "self", ")", ":", "#get the raw data", "# self.posts.all().delete() # TODO: handle in update_posts if source changes without deleting every time", "data", "=", "self", ".", "_get_data", "(", ")", "#iterate through them and for each item", "msg", "=", "[", "]", "for", "entry", "in", "data", ":", "link", "=", "self", ".", "_get_entry_link", "(", "entry", ")", "stored_entry", ",", "is_new", "=", "Post", ".", "objects", ".", "get_or_create", "(", "link", "=", "link", ")", "self", ".", "_store_post", "(", "stored_entry", ",", "entry", ")", "if", "is_new", "is", "True", ":", "#self._set_dates(stored_entry)", "# self._store_post(stored_entry, entry)", "msg", ".", "append", "(", "'Post \"%s\" added.'", "%", "stored_entry", ".", "link", ")", "else", ":", "msg", ".", "append", "(", "'Post \"%s\" already saved.'", "%", "stored_entry", ".", "link", ")", "self", ".", "updated", "=", "utils", ".", "get_datetime_now", "(", ")", "self", ".", "save", "(", "no_signals", "=", "True", ")", "return", "'<br />'", ".", "join", "(", "msg", ")" ]
This method should be called to update associated Posts It will call content-specific methods: _get_data() to obtain list of entries _store_post() to store obtained entry object _get_data_source_url() to get an URL to identify Posts from this Data Source
[ "This", "method", "should", "be", "called", "to", "update", "associated", "Posts", "It", "will", "call", "content", "-", "specific", "methods", ":", "_get_data", "()", "to", "obtain", "list", "of", "entries", "_store_post", "()", "to", "store", "obtained", "entry", "object", "_get_data_source_url", "()", "to", "get", "an", "URL", "to", "identify", "Posts", "from", "this", "Data", "Source" ]
python
train
46.72
mozillazg/baidu-pcs-python-sdk
baidupcs/api.py
https://github.com/mozillazg/baidu-pcs-python-sdk/blob/12fe3f13b2ecda8f8bdcc5334c876e934776a5cc/baidupcs/api.py#L769-L789
def query_download_tasks(self, task_ids, operate_type=1, expires=None, **kwargs): """根据任务ID号,查询离线下载任务信息及进度信息。 :param task_ids: 要查询的任务ID列表 :type task_ids: list or tuple :param operate_type: * 0:查任务信息 * 1:查进度信息,默认为1 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象 """ params = { 'task_ids': ','.join(map(str, task_ids)), 'op_type': operate_type, 'expires': expires, } return self._request('services/cloud_dl', 'query_task', extra_params=params, **kwargs)
[ "def", "query_download_tasks", "(", "self", ",", "task_ids", ",", "operate_type", "=", "1", ",", "expires", "=", "None", ",", "*", "*", "kwargs", ")", ":", "params", "=", "{", "'task_ids'", ":", "','", ".", "join", "(", "map", "(", "str", ",", "task_ids", ")", ")", ",", "'op_type'", ":", "operate_type", ",", "'expires'", ":", "expires", ",", "}", "return", "self", ".", "_request", "(", "'services/cloud_dl'", ",", "'query_task'", ",", "extra_params", "=", "params", ",", "*", "*", "kwargs", ")" ]
根据任务ID号,查询离线下载任务信息及进度信息。 :param task_ids: 要查询的任务ID列表 :type task_ids: list or tuple :param operate_type: * 0:查任务信息 * 1:查进度信息,默认为1 :param expires: 请求失效时间,如果有,则会校验。 :type expires: int :return: Response 对象
[ "根据任务ID号,查询离线下载任务信息及进度信息。" ]
python
train
33.190476
openeemeter/eemeter
eemeter/caltrack/usage_per_day.py
https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/caltrack/usage_per_day.py#L718-L771
def get_too_few_non_zero_degree_day_warning( model_type, balance_point, degree_day_type, degree_days, minimum_non_zero ): """ Return an empty list or a single warning wrapped in a list regarding non-zero degree days for a set of degree days. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). degree_days : :any:`pandas.Series` A series of degree day values. minimum_non_zero : :any:`int` Minimum allowable number of non-zero degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning. """ warnings = [] n_non_zero = int((degree_days > 0).sum()) if n_non_zero < minimum_non_zero: warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}".format( model_type=model_type, degree_day_type=degree_day_type ) ), description=( "Number of non-zero daily {degree_day_type} values below accepted minimum." " Candidate fit not attempted.".format( degree_day_type=degree_day_type.upper() ) ), data={ "n_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): n_non_zero, "minimum_non_zero_{degree_day_type}".format( degree_day_type=degree_day_type ): minimum_non_zero, "{degree_day_type}_balance_point".format( degree_day_type=degree_day_type ): balance_point, }, ) ) return warnings
[ "def", "get_too_few_non_zero_degree_day_warning", "(", "model_type", ",", "balance_point", ",", "degree_day_type", ",", "degree_days", ",", "minimum_non_zero", ")", ":", "warnings", "=", "[", "]", "n_non_zero", "=", "int", "(", "(", "degree_days", ">", "0", ")", ".", "sum", "(", ")", ")", "if", "n_non_zero", "<", "minimum_non_zero", ":", "warnings", ".", "append", "(", "EEMeterWarning", "(", "qualified_name", "=", "(", "\"eemeter.caltrack_daily.{model_type}.too_few_non_zero_{degree_day_type}\"", ".", "format", "(", "model_type", "=", "model_type", ",", "degree_day_type", "=", "degree_day_type", ")", ")", ",", "description", "=", "(", "\"Number of non-zero daily {degree_day_type} values below accepted minimum.\"", "\" Candidate fit not attempted.\"", ".", "format", "(", "degree_day_type", "=", "degree_day_type", ".", "upper", "(", ")", ")", ")", ",", "data", "=", "{", "\"n_non_zero_{degree_day_type}\"", ".", "format", "(", "degree_day_type", "=", "degree_day_type", ")", ":", "n_non_zero", ",", "\"minimum_non_zero_{degree_day_type}\"", ".", "format", "(", "degree_day_type", "=", "degree_day_type", ")", ":", "minimum_non_zero", ",", "\"{degree_day_type}_balance_point\"", ".", "format", "(", "degree_day_type", "=", "degree_day_type", ")", ":", "balance_point", ",", "}", ",", ")", ")", "return", "warnings" ]
Return an empty list or a single warning wrapped in a list regarding non-zero degree days for a set of degree days. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). balance_point : :any:`float` The balance point in question. degree_day_type : :any:`str` The type of degree days (``'cdd'`` or ``'hdd'``). degree_days : :any:`pandas.Series` A series of degree day values. minimum_non_zero : :any:`int` Minimum allowable number of non-zero degree day values. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
[ "Return", "an", "empty", "list", "or", "a", "single", "warning", "wrapped", "in", "a", "list", "regarding", "non", "-", "zero", "degree", "days", "for", "a", "set", "of", "degree", "days", "." ]
python
train
37.648148
HonzaKral/django-threadedcomments
threadedcomments/util.py
https://github.com/HonzaKral/django-threadedcomments/blob/7b36d9e0813fcbb634052990d1cf01bba8ef220f/threadedcomments/util.py#L37-L83
def annotate_tree_properties(comments): """ iterate through nodes and adds some magic properties to each of them representing opening list of children and closing it """ if not comments: return it = iter(comments) # get the first item, this will fail if no items ! old = next(it) # first item starts a new thread old.open = True last = set() for c in it: # if this comment has a parent, store its last child for future reference if old.last_child_id: last.add(old.last_child_id) # this is the last child, mark it if c.pk in last: c.last = True # increase the depth if c.depth > old.depth: c.open = True else: # c.depth <= old.depth # close some depths old.close = list(range(old.depth - c.depth)) # new thread if old.root_id != c.root_id: # close even the top depth old.close.append(len(old.close)) # and start a new thread c.open = True # empty the last set last = set() # iterate yield old old = c old.close = range(old.depth) yield old
[ "def", "annotate_tree_properties", "(", "comments", ")", ":", "if", "not", "comments", ":", "return", "it", "=", "iter", "(", "comments", ")", "# get the first item, this will fail if no items !", "old", "=", "next", "(", "it", ")", "# first item starts a new thread", "old", ".", "open", "=", "True", "last", "=", "set", "(", ")", "for", "c", "in", "it", ":", "# if this comment has a parent, store its last child for future reference", "if", "old", ".", "last_child_id", ":", "last", ".", "add", "(", "old", ".", "last_child_id", ")", "# this is the last child, mark it", "if", "c", ".", "pk", "in", "last", ":", "c", ".", "last", "=", "True", "# increase the depth", "if", "c", ".", "depth", ">", "old", ".", "depth", ":", "c", ".", "open", "=", "True", "else", ":", "# c.depth <= old.depth", "# close some depths", "old", ".", "close", "=", "list", "(", "range", "(", "old", ".", "depth", "-", "c", ".", "depth", ")", ")", "# new thread", "if", "old", ".", "root_id", "!=", "c", ".", "root_id", ":", "# close even the top depth", "old", ".", "close", ".", "append", "(", "len", "(", "old", ".", "close", ")", ")", "# and start a new thread", "c", ".", "open", "=", "True", "# empty the last set", "last", "=", "set", "(", ")", "# iterate", "yield", "old", "old", "=", "c", "old", ".", "close", "=", "range", "(", "old", ".", "depth", ")", "yield", "old" ]
iterate through nodes and adds some magic properties to each of them representing opening list of children and closing it
[ "iterate", "through", "nodes", "and", "adds", "some", "magic", "properties", "to", "each", "of", "them", "representing", "opening", "list", "of", "children", "and", "closing", "it" ]
python
train
26.06383
google/grumpy
compiler/stmt.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/compiler/stmt.py#L496-L566
def visit_function_inline(self, node): """Returns an GeneratedExpr for a function with the given body.""" # First pass collects the names of locals used in this function. Do this in # a separate pass so that we know whether to resolve a name as a local or a # global during the second pass. func_visitor = block.FunctionBlockVisitor(node) for child in node.body: func_visitor.visit(child) func_block = block.FunctionBlock(self.block, node.name, func_visitor.vars, func_visitor.is_generator) visitor = StatementVisitor(func_block, self.future_node) # Indent so that the function body is aligned with the goto labels. with visitor.writer.indent_block(): visitor._visit_each(node.body) # pylint: disable=protected-access result = self.block.alloc_temp() with self.block.alloc_temp('[]πg.Param') as func_args: args = node.args argc = len(args.args) self.writer.write('{} = make([]πg.Param, {})'.format( func_args.expr, argc)) # The list of defaults only contains args for which a default value is # specified so pad it with None to make it the same length as args. defaults = [None] * (argc - len(args.defaults)) + args.defaults for i, (a, d) in enumerate(zip(args.args, defaults)): with self.visit_expr(d) if d else expr.nil_expr as default: tmpl = '$args[$i] = πg.Param{Name: $name, Def: $default}' self.writer.write_tmpl(tmpl, args=func_args.expr, i=i, name=util.go_str(a.arg), default=default.expr) flags = [] if args.vararg: flags.append('πg.CodeFlagVarArg') if args.kwarg: flags.append('πg.CodeFlagKWArg') # The function object gets written to a temporary writer because we need # it as an expression that we subsequently bind to some variable. self.writer.write_tmpl( '$result = πg.NewFunction(πg.NewCode($name, $filename, $args, ' '$flags, func(πF *πg.Frame, πArgs []*πg.Object) ' '(*πg.Object, *πg.BaseException) {', result=result.name, name=util.go_str(node.name), filename=util.go_str(self.block.root.filename), args=func_args.expr, flags=' | '.join(flags) if flags else 0) with self.writer.indent_block(): for var in func_block.vars.values(): if var.type != block.Var.TYPE_GLOBAL: fmt = 'var {0} *πg.Object = {1}; _ = {0}' self.writer.write(fmt.format( util.adjust_local_name(var.name), var.init_expr)) self.writer.write_temp_decls(func_block) self.writer.write('var πR *πg.Object; _ = πR') self.writer.write('var πE *πg.BaseException; _ = πE') if func_block.is_generator: self.writer.write( 'return πg.NewGenerator(πF, func(πSent *πg.Object) ' '(*πg.Object, *πg.BaseException) {') with self.writer.indent_block(): self.writer.write_block(func_block, visitor.writer.getvalue()) self.writer.write('return nil, πE') self.writer.write('}).ToObject(), nil') else: self.writer.write_block(func_block, visitor.writer.getvalue()) self.writer.write(textwrap.dedent("""\ if πE != nil { \tπR = nil } else if πR == nil { \tπR = πg.None } return πR, πE""")) self.writer.write('}), πF.Globals()).ToObject()') return result
[ "def", "visit_function_inline", "(", "self", ",", "node", ")", ":", "# First pass collects the names of locals used in this function. Do this in", "# a separate pass so that we know whether to resolve a name as a local or a", "# global during the second pass.", "func_visitor", "=", "block", ".", "FunctionBlockVisitor", "(", "node", ")", "for", "child", "in", "node", ".", "body", ":", "func_visitor", ".", "visit", "(", "child", ")", "func_block", "=", "block", ".", "FunctionBlock", "(", "self", ".", "block", ",", "node", ".", "name", ",", "func_visitor", ".", "vars", ",", "func_visitor", ".", "is_generator", ")", "visitor", "=", "StatementVisitor", "(", "func_block", ",", "self", ".", "future_node", ")", "# Indent so that the function body is aligned with the goto labels.", "with", "visitor", ".", "writer", ".", "indent_block", "(", ")", ":", "visitor", ".", "_visit_each", "(", "node", ".", "body", ")", "# pylint: disable=protected-access", "result", "=", "self", ".", "block", ".", "alloc_temp", "(", ")", "with", "self", ".", "block", ".", "alloc_temp", "(", "'[]πg.Param')", " ", "s ", "unc_args:", "", "args", "=", "node", ".", "args", "argc", "=", "len", "(", "args", ".", "args", ")", "self", ".", "writer", ".", "write", "(", "'{} = make([]πg.Param, {})'.", "f", "ormat(", "", "func_args", ".", "expr", ",", "argc", ")", ")", "# The list of defaults only contains args for which a default value is", "# specified so pad it with None to make it the same length as args.", "defaults", "=", "[", "None", "]", "*", "(", "argc", "-", "len", "(", "args", ".", "defaults", ")", ")", "+", "args", ".", "defaults", "for", "i", ",", "(", "a", ",", "d", ")", "in", "enumerate", "(", "zip", "(", "args", ".", "args", ",", "defaults", ")", ")", ":", "with", "self", ".", "visit_expr", "(", "d", ")", "if", "d", "else", "expr", ".", "nil_expr", "as", "default", ":", "tmpl", "=", "'$args[$i] = πg.Param{Name: $name, Def: $default}'", "self", ".", "writer", ".", "write_tmpl", "(", "tmpl", ",", "args", "=", "func_args", ".", "expr", ",", "i", "=", "i", ",", "name", "=", "util", ".", "go_str", "(", "a", ".", "arg", ")", ",", "default", "=", "default", ".", "expr", ")", "flags", "=", "[", "]", "if", "args", ".", "vararg", ":", "flags", ".", "append", "(", "'πg.CodeFlagVarArg')", "", "if", "args", ".", "kwarg", ":", "flags", ".", "append", "(", "'πg.CodeFlagKWArg')", "", "# The function object gets written to a temporary writer because we need", "# it as an expression that we subsequently bind to some variable.", "self", ".", "writer", ".", "write_tmpl", "(", "'$result = πg.NewFunction(πg.NewCode($name, $filename, $args, '", "'$flags, func(πF *πg.Frame, πArgs []*πg.Object) '", "'(*πg.Object, *πg.BaseException) {',", "", "result", "=", "result", ".", "name", ",", "name", "=", "util", ".", "go_str", "(", "node", ".", "name", ")", ",", "filename", "=", "util", ".", "go_str", "(", "self", ".", "block", ".", "root", ".", "filename", ")", ",", "args", "=", "func_args", ".", "expr", ",", "flags", "=", "' | '", ".", "join", "(", "flags", ")", "if", "flags", "else", "0", ")", "with", "self", ".", "writer", ".", "indent_block", "(", ")", ":", "for", "var", "in", "func_block", ".", "vars", ".", "values", "(", ")", ":", "if", "var", ".", "type", "!=", "block", ".", "Var", ".", "TYPE_GLOBAL", ":", "fmt", "=", "'var {0} *πg.Object = {1}; _ = {0}'", "self", ".", "writer", ".", "write", "(", "fmt", ".", "format", "(", "util", ".", "adjust_local_name", "(", "var", ".", "name", ")", ",", "var", ".", "init_expr", ")", ")", "self", ".", "writer", ".", "write_temp_decls", "(", "func_block", ")", "self", ".", "writer", ".", "write", "(", "'var πR *πg.Object; _ = πR')", "", "self", ".", "writer", ".", "write", "(", "'var πE *πg.BaseException; _ = πE')", "", "if", "func_block", ".", "is_generator", ":", "self", ".", "writer", ".", "write", "(", "'return πg.NewGenerator(πF, func(πSent *πg.Object) '", "'(*πg.Object, *πg.BaseException) {')", "", "with", "self", ".", "writer", ".", "indent_block", "(", ")", ":", "self", ".", "writer", ".", "write_block", "(", "func_block", ",", "visitor", ".", "writer", ".", "getvalue", "(", ")", ")", "self", ".", "writer", ".", "write", "(", "'return nil, πE')", "", "self", ".", "writer", ".", "write", "(", "'}).ToObject(), nil'", ")", "else", ":", "self", ".", "writer", ".", "write_block", "(", "func_block", ",", "visitor", ".", "writer", ".", "getvalue", "(", ")", ")", "self", ".", "writer", ".", "write", "(", "textwrap", ".", "dedent", "(", "\"\"\"\\\n if πE != nil {\n \\tπR = nil\n } else if πR == nil {\n \\tπR = πg.None\n }\n return πR, πE\"\"\"))", "", "", "self", ".", "writer", ".", "write", "(", "'}), πF.Globals()).ToObject()')", "", "return", "result" ]
Returns an GeneratedExpr for a function with the given body.
[ "Returns", "an", "GeneratedExpr", "for", "a", "function", "with", "the", "given", "body", "." ]
python
valid
48.887324
pandas-dev/pandas
pandas/core/dtypes/common.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/dtypes/common.py#L1600-L1663
def is_bool_dtype(arr_or_dtype): """ Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.SparseArray([True, False])) True """ if arr_or_dtype is None: return False try: dtype = _get_dtype(arr_or_dtype) except TypeError: return False if isinstance(arr_or_dtype, CategoricalDtype): arr_or_dtype = arr_or_dtype.categories # now we use the special definition for Index if isinstance(arr_or_dtype, ABCIndexClass): # TODO(jreback) # we don't have a boolean Index class # so its object, we need to infer to # guess this return (arr_or_dtype.is_object and arr_or_dtype.inferred_type == 'boolean') elif is_extension_array_dtype(arr_or_dtype): dtype = getattr(arr_or_dtype, 'dtype', arr_or_dtype) return dtype._is_boolean return issubclass(dtype.type, np.bool_)
[ "def", "is_bool_dtype", "(", "arr_or_dtype", ")", ":", "if", "arr_or_dtype", "is", "None", ":", "return", "False", "try", ":", "dtype", "=", "_get_dtype", "(", "arr_or_dtype", ")", "except", "TypeError", ":", "return", "False", "if", "isinstance", "(", "arr_or_dtype", ",", "CategoricalDtype", ")", ":", "arr_or_dtype", "=", "arr_or_dtype", ".", "categories", "# now we use the special definition for Index", "if", "isinstance", "(", "arr_or_dtype", ",", "ABCIndexClass", ")", ":", "# TODO(jreback)", "# we don't have a boolean Index class", "# so its object, we need to infer to", "# guess this", "return", "(", "arr_or_dtype", ".", "is_object", "and", "arr_or_dtype", ".", "inferred_type", "==", "'boolean'", ")", "elif", "is_extension_array_dtype", "(", "arr_or_dtype", ")", ":", "dtype", "=", "getattr", "(", "arr_or_dtype", ",", "'dtype'", ",", "arr_or_dtype", ")", "return", "dtype", ".", "_is_boolean", "return", "issubclass", "(", "dtype", ".", "type", ",", "np", ".", "bool_", ")" ]
Check whether the provided array or dtype is of a boolean dtype. Parameters ---------- arr_or_dtype : array-like The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a boolean dtype. Notes ----- An ExtensionArray is considered boolean when the ``_is_boolean`` attribute is set to True. Examples -------- >>> is_bool_dtype(str) False >>> is_bool_dtype(int) False >>> is_bool_dtype(bool) True >>> is_bool_dtype(np.bool) True >>> is_bool_dtype(np.array(['a', 'b'])) False >>> is_bool_dtype(pd.Series([1, 2])) False >>> is_bool_dtype(np.array([True, False])) True >>> is_bool_dtype(pd.Categorical([True, False])) True >>> is_bool_dtype(pd.SparseArray([True, False])) True
[ "Check", "whether", "the", "provided", "array", "or", "dtype", "is", "of", "a", "boolean", "dtype", "." ]
python
train
25.109375
zhmcclient/python-zhmcclient
zhmcclient/_partition.py
https://github.com/zhmcclient/python-zhmcclient/blob/9657563e5d9184c51d3c903442a58b9725fdf335/zhmcclient/_partition.py#L254-L270
def hbas(self): """ :class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in this Partition. If the "dpm-storage-management" feature is enabled, this property is `None`. """ # We do here some lazy loading. if not self._hbas: try: dpm_sm = self.feature_enabled('dpm-storage-management') except ValueError: dpm_sm = False if not dpm_sm: self._hbas = HbaManager(self) return self._hbas
[ "def", "hbas", "(", "self", ")", ":", "# We do here some lazy loading.", "if", "not", "self", ".", "_hbas", ":", "try", ":", "dpm_sm", "=", "self", ".", "feature_enabled", "(", "'dpm-storage-management'", ")", "except", "ValueError", ":", "dpm_sm", "=", "False", "if", "not", "dpm_sm", ":", "self", ".", "_hbas", "=", "HbaManager", "(", "self", ")", "return", "self", ".", "_hbas" ]
:class:`~zhmcclient.HbaManager`: Access to the :term:`HBAs <HBA>` in this Partition. If the "dpm-storage-management" feature is enabled, this property is `None`.
[ ":", "class", ":", "~zhmcclient", ".", "HbaManager", ":", "Access", "to", "the", ":", "term", ":", "HBAs", "<HBA", ">", "in", "this", "Partition", "." ]
python
train
31.470588
proycon/pynlpl
pynlpl/formats/folia.py
https://github.com/proycon/pynlpl/blob/7707f69a91caaa6cde037f0d0379f1d42500a68b/pynlpl/formats/folia.py#L3052-L3078
def annotation(self, type, set=None): """Obtain a single annotation element. A further restriction can be made based on set. Arguments: Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement` Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned. Returns: An element (instance derived from :class:`AbstractElement`) Example:: sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls See also: :meth:`AllowTokenAnnotation.annotations` :meth:`AbstractElement.select` Raises: :class:`NoSuchAnnotation` if no such annotation exists """ """Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found""" for e in self.select(type,set,True,default_ignore_annotations): return e raise NoSuchAnnotation()
[ "def", "annotation", "(", "self", ",", "type", ",", "set", "=", "None", ")", ":", "\"\"\"Will return a **single** annotation (even if there are multiple). Raises a ``NoSuchAnnotation`` exception if none was found\"\"\"", "for", "e", "in", "self", ".", "select", "(", "type", ",", "set", ",", "True", ",", "default_ignore_annotations", ")", ":", "return", "e", "raise", "NoSuchAnnotation", "(", ")" ]
Obtain a single annotation element. A further restriction can be made based on set. Arguments: Class (class): The class to select; any python class (not instance) subclassed off :class:`AbstractElement` Set (str): The set to match against, only elements pertaining to this set will be returned. If set to None (default), all elements regardless of set will be returned. Returns: An element (instance derived from :class:`AbstractElement`) Example:: sense = word.annotation(folia.Sense, 'http://some/path/cornetto').cls See also: :meth:`AllowTokenAnnotation.annotations` :meth:`AbstractElement.select` Raises: :class:`NoSuchAnnotation` if no such annotation exists
[ "Obtain", "a", "single", "annotation", "element", "." ]
python
train
40.777778
happyleavesaoc/python-limitlessled
limitlessled/group/commands/legacy.py
https://github.com/happyleavesaoc/python-limitlessled/blob/70307c2bf8c91430a99579d2ad18b228ec7a8488/limitlessled/group/commands/legacy.py#L210-L217
def brightness(self, brightness): """ Build command for setting the brightness of the led. :param brightness: Value to set (0.0-1.0). :return: The command. """ return self._build_command(0x4E, self.convert_brightness(brightness), select=True, select_command=self.on())
[ "def", "brightness", "(", "self", ",", "brightness", ")", ":", "return", "self", ".", "_build_command", "(", "0x4E", ",", "self", ".", "convert_brightness", "(", "brightness", ")", ",", "select", "=", "True", ",", "select_command", "=", "self", ".", "on", "(", ")", ")" ]
Build command for setting the brightness of the led. :param brightness: Value to set (0.0-1.0). :return: The command.
[ "Build", "command", "for", "setting", "the", "brightness", "of", "the", "led", ".", ":", "param", "brightness", ":", "Value", "to", "set", "(", "0", ".", "0", "-", "1", ".", "0", ")", ".", ":", "return", ":", "The", "command", "." ]
python
train
43
IdentityPython/SATOSA
src/satosa/plugin_loader.py
https://github.com/IdentityPython/SATOSA/blob/49da5d4c0ac1a5ebf1a71b4f7aaf04f0e52d8fdb/src/satosa/plugin_loader.py#L30-L47
def load_backends(config, callback, internal_attributes): """ Load all backend modules specified in the config :type config: satosa.satosa_config.SATOSAConfig :type callback: (satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response :type internal_attributes: dict[string, dict[str, str | list[str]]] :rtype: Sequence[satosa.backends.base.BackendModule] :param config: The configuration of the satosa proxy :param callback: Function that will be called by the backend after the authentication is done. :return: A list of backend modules """ backend_modules = _load_plugins(config.get("CUSTOM_PLUGIN_MODULE_PATHS"), config["BACKEND_MODULES"], backend_filter, config["BASE"], internal_attributes, callback) logger.info("Setup backends: %s" % [backend.name for backend in backend_modules]) return backend_modules
[ "def", "load_backends", "(", "config", ",", "callback", ",", "internal_attributes", ")", ":", "backend_modules", "=", "_load_plugins", "(", "config", ".", "get", "(", "\"CUSTOM_PLUGIN_MODULE_PATHS\"", ")", ",", "config", "[", "\"BACKEND_MODULES\"", "]", ",", "backend_filter", ",", "config", "[", "\"BASE\"", "]", ",", "internal_attributes", ",", "callback", ")", "logger", ".", "info", "(", "\"Setup backends: %s\"", "%", "[", "backend", ".", "name", "for", "backend", "in", "backend_modules", "]", ")", "return", "backend_modules" ]
Load all backend modules specified in the config :type config: satosa.satosa_config.SATOSAConfig :type callback: (satosa.context.Context, satosa.internal.InternalData) -> satosa.response.Response :type internal_attributes: dict[string, dict[str, str | list[str]]] :rtype: Sequence[satosa.backends.base.BackendModule] :param config: The configuration of the satosa proxy :param callback: Function that will be called by the backend after the authentication is done. :return: A list of backend modules
[ "Load", "all", "backend", "modules", "specified", "in", "the", "config" ]
python
train
50.611111
aio-libs/aioredis
aioredis/commands/hash.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/hash.py#L40-L43
def hincrbyfloat(self, key, field, increment=1.0): """Increment the float value of a hash field by the given number.""" fut = self.execute(b'HINCRBYFLOAT', key, field, increment) return wait_convert(fut, float)
[ "def", "hincrbyfloat", "(", "self", ",", "key", ",", "field", ",", "increment", "=", "1.0", ")", ":", "fut", "=", "self", ".", "execute", "(", "b'HINCRBYFLOAT'", ",", "key", ",", "field", ",", "increment", ")", "return", "wait_convert", "(", "fut", ",", "float", ")" ]
Increment the float value of a hash field by the given number.
[ "Increment", "the", "float", "value", "of", "a", "hash", "field", "by", "the", "given", "number", "." ]
python
train
57.75
florianpaquet/mease
mease/server.py
https://github.com/florianpaquet/mease/blob/b9fbd08bbe162c8890c2a2124674371170c319ef/mease/server.py#L129-L140
def run_server(self): """ Runs the WebSocket server """ self.protocol = MeaseWebSocketServerProtocol reactor.listenTCP(port=self.port, factory=self, interface=self.host) logger.info("Websocket server listening on {address}".format( address=self.address)) reactor.run()
[ "def", "run_server", "(", "self", ")", ":", "self", ".", "protocol", "=", "MeaseWebSocketServerProtocol", "reactor", ".", "listenTCP", "(", "port", "=", "self", ".", "port", ",", "factory", "=", "self", ",", "interface", "=", "self", ".", "host", ")", "logger", ".", "info", "(", "\"Websocket server listening on {address}\"", ".", "format", "(", "address", "=", "self", ".", "address", ")", ")", "reactor", ".", "run", "(", ")" ]
Runs the WebSocket server
[ "Runs", "the", "WebSocket", "server" ]
python
train
27.333333
avihad/twistes
twistes/parser.py
https://github.com/avihad/twistes/blob/9ab8f5aa088b8886aefe3dec85a400e5035e034a/twistes/parser.py#L70-L91
def _parse_string_host(host_str): """ Parse host string into a dictionary host :param host_str: :return: """ host_str = EsParser._fix_host_prefix(host_str) parsed_url = urlparse(host_str) host = {HostParsing.HOST: parsed_url.hostname} if parsed_url.port: host[HostParsing.PORT] = parsed_url.port if parsed_url.scheme == HostParsing.HTTPS: host[HostParsing.PORT] = parsed_url.port or EsParser.SSL_DEFAULT_PORT host[HostParsing.USE_SSL] = True host[HostParsing.SCHEME] = HostParsing.HTTPS elif parsed_url.scheme: host[HostParsing.SCHEME] = parsed_url.scheme if parsed_url.username or parsed_url.password: host[HostParsing.HTTP_AUTH] = '%s:%s' % (parsed_url.username, parsed_url.password) if parsed_url.path and parsed_url.path != '/': host[HostParsing.URL_PREFIX] = parsed_url.path return host
[ "def", "_parse_string_host", "(", "host_str", ")", ":", "host_str", "=", "EsParser", ".", "_fix_host_prefix", "(", "host_str", ")", "parsed_url", "=", "urlparse", "(", "host_str", ")", "host", "=", "{", "HostParsing", ".", "HOST", ":", "parsed_url", ".", "hostname", "}", "if", "parsed_url", ".", "port", ":", "host", "[", "HostParsing", ".", "PORT", "]", "=", "parsed_url", ".", "port", "if", "parsed_url", ".", "scheme", "==", "HostParsing", ".", "HTTPS", ":", "host", "[", "HostParsing", ".", "PORT", "]", "=", "parsed_url", ".", "port", "or", "EsParser", ".", "SSL_DEFAULT_PORT", "host", "[", "HostParsing", ".", "USE_SSL", "]", "=", "True", "host", "[", "HostParsing", ".", "SCHEME", "]", "=", "HostParsing", ".", "HTTPS", "elif", "parsed_url", ".", "scheme", ":", "host", "[", "HostParsing", ".", "SCHEME", "]", "=", "parsed_url", ".", "scheme", "if", "parsed_url", ".", "username", "or", "parsed_url", ".", "password", ":", "host", "[", "HostParsing", ".", "HTTP_AUTH", "]", "=", "'%s:%s'", "%", "(", "parsed_url", ".", "username", ",", "parsed_url", ".", "password", ")", "if", "parsed_url", ".", "path", "and", "parsed_url", ".", "path", "!=", "'/'", ":", "host", "[", "HostParsing", ".", "URL_PREFIX", "]", "=", "parsed_url", ".", "path", "return", "host" ]
Parse host string into a dictionary host :param host_str: :return:
[ "Parse", "host", "string", "into", "a", "dictionary", "host", ":", "param", "host_str", ":", ":", "return", ":" ]
python
train
43.909091
tensorflow/datasets
tensorflow_datasets/core/features/text/text_encoder.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/text/text_encoder.py#L463-L470
def _make_reserved_tokens_re(reserved_tokens): """Constructs compiled regex to parse out reserved tokens.""" if not reserved_tokens: return None escaped_tokens = [_re_escape(rt) for rt in reserved_tokens] pattern = "(%s)" % "|".join(escaped_tokens) reserved_tokens_re = _re_compile(pattern) return reserved_tokens_re
[ "def", "_make_reserved_tokens_re", "(", "reserved_tokens", ")", ":", "if", "not", "reserved_tokens", ":", "return", "None", "escaped_tokens", "=", "[", "_re_escape", "(", "rt", ")", "for", "rt", "in", "reserved_tokens", "]", "pattern", "=", "\"(%s)\"", "%", "\"|\"", ".", "join", "(", "escaped_tokens", ")", "reserved_tokens_re", "=", "_re_compile", "(", "pattern", ")", "return", "reserved_tokens_re" ]
Constructs compiled regex to parse out reserved tokens.
[ "Constructs", "compiled", "regex", "to", "parse", "out", "reserved", "tokens", "." ]
python
train
40.625
sibirrer/lenstronomy
lenstronomy/ImSim/image_model.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/image_model.py#L296-L304
def reduced_chi2(self, model, error_map=0): """ returns reduced chi2 :param model: :param error_map: :return: """ chi2 = self.reduced_residuals(model, error_map) return np.sum(chi2**2) / self.num_data_evaluate()
[ "def", "reduced_chi2", "(", "self", ",", "model", ",", "error_map", "=", "0", ")", ":", "chi2", "=", "self", ".", "reduced_residuals", "(", "model", ",", "error_map", ")", "return", "np", ".", "sum", "(", "chi2", "**", "2", ")", "/", "self", ".", "num_data_evaluate", "(", ")" ]
returns reduced chi2 :param model: :param error_map: :return:
[ "returns", "reduced", "chi2", ":", "param", "model", ":", ":", "param", "error_map", ":", ":", "return", ":" ]
python
train
29.666667
Locu/chronology
kronos/kronos/core/validator.py
https://github.com/Locu/chronology/blob/0edf3ee3286c76e242cbf92436ffa9c836b428e2/kronos/kronos/core/validator.py#L18-L43
def _validate_and_get_value(options, options_name, key, _type): """ Check that `options` has a value for `key` with type `_type`. Return that value. `options_name` is a string representing a human-readable name for `options` to be used when printing errors. """ if isinstance(options, dict): has = lambda k: k in options get = lambda k: options[k] elif isinstance(options, object): has = lambda k: hasattr(options, k) get = lambda k: getattr(options, k) else: raise ImproperlyConfigured( '`{}` must be a dictionary-like object.'.format(options_name)) if not has(key): raise ImproperlyConfigured( '`{}` must be specified in `{}`'.format(key, options_name)) value = get(key) if not isinstance(value, _type): raise ImproperlyConfigured( '`{}` in `{}` must be a {}'.format(key, options_name, repr(_type))) return value
[ "def", "_validate_and_get_value", "(", "options", ",", "options_name", ",", "key", ",", "_type", ")", ":", "if", "isinstance", "(", "options", ",", "dict", ")", ":", "has", "=", "lambda", "k", ":", "k", "in", "options", "get", "=", "lambda", "k", ":", "options", "[", "k", "]", "elif", "isinstance", "(", "options", ",", "object", ")", ":", "has", "=", "lambda", "k", ":", "hasattr", "(", "options", ",", "k", ")", "get", "=", "lambda", "k", ":", "getattr", "(", "options", ",", "k", ")", "else", ":", "raise", "ImproperlyConfigured", "(", "'`{}` must be a dictionary-like object.'", ".", "format", "(", "options_name", ")", ")", "if", "not", "has", "(", "key", ")", ":", "raise", "ImproperlyConfigured", "(", "'`{}` must be specified in `{}`'", ".", "format", "(", "key", ",", "options_name", ")", ")", "value", "=", "get", "(", "key", ")", "if", "not", "isinstance", "(", "value", ",", "_type", ")", ":", "raise", "ImproperlyConfigured", "(", "'`{}` in `{}` must be a {}'", ".", "format", "(", "key", ",", "options_name", ",", "repr", "(", "_type", ")", ")", ")", "return", "value" ]
Check that `options` has a value for `key` with type `_type`. Return that value. `options_name` is a string representing a human-readable name for `options` to be used when printing errors.
[ "Check", "that", "options", "has", "a", "value", "for", "key", "with", "type", "_type", ".", "Return", "that", "value", ".", "options_name", "is", "a", "string", "representing", "a", "human", "-", "readable", "name", "for", "options", "to", "be", "used", "when", "printing", "errors", "." ]
python
train
33.384615
tzutalin/labelImg
libs/canvas.py
https://github.com/tzutalin/labelImg/blob/6afd15aa88f89f41254e0004ed219b3965eb2c0d/libs/canvas.py#L104-L208
def mouseMoveEvent(self, ev): """Update line with last point and current coordinates.""" pos = self.transformPos(ev.pos()) # Update coordinates in status bar if image is opened window = self.parent().window() if window.filePath is not None: self.parent().window().labelCoordinates.setText( 'X: %d; Y: %d' % (pos.x(), pos.y())) # Polygon drawing. if self.drawing(): self.overrideCursor(CURSOR_DRAW) if self.current: color = self.drawingLineColor if self.outOfPixmap(pos): # Don't allow the user to draw outside the pixmap. # Project the point to the pixmap's edges. pos = self.intersectionPoint(self.current[-1], pos) elif len(self.current) > 1 and self.closeEnough(pos, self.current[0]): # Attract line to starting point and colorise to alert the # user: pos = self.current[0] color = self.current.line_color self.overrideCursor(CURSOR_POINT) self.current.highlightVertex(0, Shape.NEAR_VERTEX) if self.drawSquare: initPos = self.current[0] minX = initPos.x() minY = initPos.y() min_size = min(abs(pos.x() - minX), abs(pos.y() - minY)) directionX = -1 if pos.x() - minX < 0 else 1 directionY = -1 if pos.y() - minY < 0 else 1 self.line[1] = QPointF(minX + directionX * min_size, minY + directionY * min_size) else: self.line[1] = pos self.line.line_color = color self.prevPoint = QPointF() self.current.highlightClear() else: self.prevPoint = pos self.repaint() return # Polygon copy moving. if Qt.RightButton & ev.buttons(): if self.selectedShapeCopy and self.prevPoint: self.overrideCursor(CURSOR_MOVE) self.boundedMoveShape(self.selectedShapeCopy, pos) self.repaint() elif self.selectedShape: self.selectedShapeCopy = self.selectedShape.copy() self.repaint() return # Polygon/Vertex moving. if Qt.LeftButton & ev.buttons(): if self.selectedVertex(): self.boundedMoveVertex(pos) self.shapeMoved.emit() self.repaint() elif self.selectedShape and self.prevPoint: self.overrideCursor(CURSOR_MOVE) self.boundedMoveShape(self.selectedShape, pos) self.shapeMoved.emit() self.repaint() return # Just hovering over the canvas, 2 posibilities: # - Highlight shapes # - Highlight vertex # Update shape/vertex fill and tooltip value accordingly. self.setToolTip("Image") for shape in reversed([s for s in self.shapes if self.isVisible(s)]): # Look for a nearby vertex to highlight. If that fails, # check if we happen to be inside a shape. index = shape.nearestVertex(pos, self.epsilon) if index is not None: if self.selectedVertex(): self.hShape.highlightClear() self.hVertex, self.hShape = index, shape shape.highlightVertex(index, shape.MOVE_VERTEX) self.overrideCursor(CURSOR_POINT) self.setToolTip("Click & drag to move point") self.setStatusTip(self.toolTip()) self.update() break elif shape.containsPoint(pos): if self.selectedVertex(): self.hShape.highlightClear() self.hVertex, self.hShape = None, shape self.setToolTip( "Click & drag to move shape '%s'" % shape.label) self.setStatusTip(self.toolTip()) self.overrideCursor(CURSOR_GRAB) self.update() break else: # Nothing found, clear highlights, reset state. if self.hShape: self.hShape.highlightClear() self.update() self.hVertex, self.hShape = None, None self.overrideCursor(CURSOR_DEFAULT)
[ "def", "mouseMoveEvent", "(", "self", ",", "ev", ")", ":", "pos", "=", "self", ".", "transformPos", "(", "ev", ".", "pos", "(", ")", ")", "# Update coordinates in status bar if image is opened", "window", "=", "self", ".", "parent", "(", ")", ".", "window", "(", ")", "if", "window", ".", "filePath", "is", "not", "None", ":", "self", ".", "parent", "(", ")", ".", "window", "(", ")", ".", "labelCoordinates", ".", "setText", "(", "'X: %d; Y: %d'", "%", "(", "pos", ".", "x", "(", ")", ",", "pos", ".", "y", "(", ")", ")", ")", "# Polygon drawing.", "if", "self", ".", "drawing", "(", ")", ":", "self", ".", "overrideCursor", "(", "CURSOR_DRAW", ")", "if", "self", ".", "current", ":", "color", "=", "self", ".", "drawingLineColor", "if", "self", ".", "outOfPixmap", "(", "pos", ")", ":", "# Don't allow the user to draw outside the pixmap.", "# Project the point to the pixmap's edges.", "pos", "=", "self", ".", "intersectionPoint", "(", "self", ".", "current", "[", "-", "1", "]", ",", "pos", ")", "elif", "len", "(", "self", ".", "current", ")", ">", "1", "and", "self", ".", "closeEnough", "(", "pos", ",", "self", ".", "current", "[", "0", "]", ")", ":", "# Attract line to starting point and colorise to alert the", "# user:", "pos", "=", "self", ".", "current", "[", "0", "]", "color", "=", "self", ".", "current", ".", "line_color", "self", ".", "overrideCursor", "(", "CURSOR_POINT", ")", "self", ".", "current", ".", "highlightVertex", "(", "0", ",", "Shape", ".", "NEAR_VERTEX", ")", "if", "self", ".", "drawSquare", ":", "initPos", "=", "self", ".", "current", "[", "0", "]", "minX", "=", "initPos", ".", "x", "(", ")", "minY", "=", "initPos", ".", "y", "(", ")", "min_size", "=", "min", "(", "abs", "(", "pos", ".", "x", "(", ")", "-", "minX", ")", ",", "abs", "(", "pos", ".", "y", "(", ")", "-", "minY", ")", ")", "directionX", "=", "-", "1", "if", "pos", ".", "x", "(", ")", "-", "minX", "<", "0", "else", "1", "directionY", "=", "-", "1", "if", "pos", ".", "y", "(", ")", "-", "minY", "<", "0", "else", "1", "self", ".", "line", "[", "1", "]", "=", "QPointF", "(", "minX", "+", "directionX", "*", "min_size", ",", "minY", "+", "directionY", "*", "min_size", ")", "else", ":", "self", ".", "line", "[", "1", "]", "=", "pos", "self", ".", "line", ".", "line_color", "=", "color", "self", ".", "prevPoint", "=", "QPointF", "(", ")", "self", ".", "current", ".", "highlightClear", "(", ")", "else", ":", "self", ".", "prevPoint", "=", "pos", "self", ".", "repaint", "(", ")", "return", "# Polygon copy moving.", "if", "Qt", ".", "RightButton", "&", "ev", ".", "buttons", "(", ")", ":", "if", "self", ".", "selectedShapeCopy", "and", "self", ".", "prevPoint", ":", "self", ".", "overrideCursor", "(", "CURSOR_MOVE", ")", "self", ".", "boundedMoveShape", "(", "self", ".", "selectedShapeCopy", ",", "pos", ")", "self", ".", "repaint", "(", ")", "elif", "self", ".", "selectedShape", ":", "self", ".", "selectedShapeCopy", "=", "self", ".", "selectedShape", ".", "copy", "(", ")", "self", ".", "repaint", "(", ")", "return", "# Polygon/Vertex moving.", "if", "Qt", ".", "LeftButton", "&", "ev", ".", "buttons", "(", ")", ":", "if", "self", ".", "selectedVertex", "(", ")", ":", "self", ".", "boundedMoveVertex", "(", "pos", ")", "self", ".", "shapeMoved", ".", "emit", "(", ")", "self", ".", "repaint", "(", ")", "elif", "self", ".", "selectedShape", "and", "self", ".", "prevPoint", ":", "self", ".", "overrideCursor", "(", "CURSOR_MOVE", ")", "self", ".", "boundedMoveShape", "(", "self", ".", "selectedShape", ",", "pos", ")", "self", ".", "shapeMoved", ".", "emit", "(", ")", "self", ".", "repaint", "(", ")", "return", "# Just hovering over the canvas, 2 posibilities:", "# - Highlight shapes", "# - Highlight vertex", "# Update shape/vertex fill and tooltip value accordingly.", "self", ".", "setToolTip", "(", "\"Image\"", ")", "for", "shape", "in", "reversed", "(", "[", "s", "for", "s", "in", "self", ".", "shapes", "if", "self", ".", "isVisible", "(", "s", ")", "]", ")", ":", "# Look for a nearby vertex to highlight. If that fails,", "# check if we happen to be inside a shape.", "index", "=", "shape", ".", "nearestVertex", "(", "pos", ",", "self", ".", "epsilon", ")", "if", "index", "is", "not", "None", ":", "if", "self", ".", "selectedVertex", "(", ")", ":", "self", ".", "hShape", ".", "highlightClear", "(", ")", "self", ".", "hVertex", ",", "self", ".", "hShape", "=", "index", ",", "shape", "shape", ".", "highlightVertex", "(", "index", ",", "shape", ".", "MOVE_VERTEX", ")", "self", ".", "overrideCursor", "(", "CURSOR_POINT", ")", "self", ".", "setToolTip", "(", "\"Click & drag to move point\"", ")", "self", ".", "setStatusTip", "(", "self", ".", "toolTip", "(", ")", ")", "self", ".", "update", "(", ")", "break", "elif", "shape", ".", "containsPoint", "(", "pos", ")", ":", "if", "self", ".", "selectedVertex", "(", ")", ":", "self", ".", "hShape", ".", "highlightClear", "(", ")", "self", ".", "hVertex", ",", "self", ".", "hShape", "=", "None", ",", "shape", "self", ".", "setToolTip", "(", "\"Click & drag to move shape '%s'\"", "%", "shape", ".", "label", ")", "self", ".", "setStatusTip", "(", "self", ".", "toolTip", "(", ")", ")", "self", ".", "overrideCursor", "(", "CURSOR_GRAB", ")", "self", ".", "update", "(", ")", "break", "else", ":", "# Nothing found, clear highlights, reset state.", "if", "self", ".", "hShape", ":", "self", ".", "hShape", ".", "highlightClear", "(", ")", "self", ".", "update", "(", ")", "self", ".", "hVertex", ",", "self", ".", "hShape", "=", "None", ",", "None", "self", ".", "overrideCursor", "(", "CURSOR_DEFAULT", ")" ]
Update line with last point and current coordinates.
[ "Update", "line", "with", "last", "point", "and", "current", "coordinates", "." ]
python
train
42.428571
jtambasco/modesolverpy
modesolverpy/design.py
https://github.com/jtambasco/modesolverpy/blob/85254a13b5aed2404187c52ac93b9b3ce99ee3a3/modesolverpy/design.py#L29-L60
def grating_coupler_period(wavelength, n_eff, n_clad, incidence_angle_deg, diffration_order=1): ''' Calculate the period needed for a grating coupler. Args: wavelength (float): The target wavelength for the grating coupler. n_eff (float): The effective index of the mode of a waveguide with the width of the grating coupler. n_clad (float): The refractive index of the cladding. incidence_angle_deg (float): The incidence angle the grating coupler should operate at [degrees]. diffration_order (int): The grating order the coupler should work at. Default is 1st order (1). Returns: float: The period needed for the grating coupler in the same units as the wavelength was given at. ''' k0 = 2. * np.pi / wavelength beta = n_eff.real * k0 n_inc = n_clad grating_period = (2.*np.pi*diffration_order) \ / (beta - k0*n_inc*np.sin(np.radians(incidence_angle_deg))) return grating_period
[ "def", "grating_coupler_period", "(", "wavelength", ",", "n_eff", ",", "n_clad", ",", "incidence_angle_deg", ",", "diffration_order", "=", "1", ")", ":", "k0", "=", "2.", "*", "np", ".", "pi", "/", "wavelength", "beta", "=", "n_eff", ".", "real", "*", "k0", "n_inc", "=", "n_clad", "grating_period", "=", "(", "2.", "*", "np", ".", "pi", "*", "diffration_order", ")", "/", "(", "beta", "-", "k0", "*", "n_inc", "*", "np", ".", "sin", "(", "np", ".", "radians", "(", "incidence_angle_deg", ")", ")", ")", "return", "grating_period" ]
Calculate the period needed for a grating coupler. Args: wavelength (float): The target wavelength for the grating coupler. n_eff (float): The effective index of the mode of a waveguide with the width of the grating coupler. n_clad (float): The refractive index of the cladding. incidence_angle_deg (float): The incidence angle the grating coupler should operate at [degrees]. diffration_order (int): The grating order the coupler should work at. Default is 1st order (1). Returns: float: The period needed for the grating coupler in the same units as the wavelength was given at.
[ "Calculate", "the", "period", "needed", "for", "a", "grating", "coupler", "." ]
python
train
35.1875
semente/django-smuggler
smuggler/views.py
https://github.com/semente/django-smuggler/blob/3be76f4e94e50e927a55a60741fac1a793df83de/smuggler/views.py#L63-L71
def dump_data(request): """Exports data from whole project. """ # Try to grab app_label data app_label = request.GET.get('app_label', []) if app_label: app_label = app_label.split(',') return dump_to_response(request, app_label=app_label, exclude=settings.SMUGGLER_EXCLUDE_LIST)
[ "def", "dump_data", "(", "request", ")", ":", "# Try to grab app_label data", "app_label", "=", "request", ".", "GET", ".", "get", "(", "'app_label'", ",", "[", "]", ")", "if", "app_label", ":", "app_label", "=", "app_label", ".", "split", "(", "','", ")", "return", "dump_to_response", "(", "request", ",", "app_label", "=", "app_label", ",", "exclude", "=", "settings", ".", "SMUGGLER_EXCLUDE_LIST", ")" ]
Exports data from whole project.
[ "Exports", "data", "from", "whole", "project", "." ]
python
train
36.666667
python-hyper/h11
h11/_connection.py
https://github.com/python-hyper/h11/blob/836d95d1c2af2f9153c86dbc8d9784341d73c6a6/h11/_connection.py#L443-L468
def send(self, event): """Convert a high-level event into bytes that can be sent to the peer, while updating our internal state machine. Args: event: The :ref:`event <events>` to send. Returns: If ``type(event) is ConnectionClosed``, then returns ``None``. Otherwise, returns a :term:`bytes-like object`. Raises: LocalProtocolError: Sending this event at this time would violate our understanding of the HTTP/1.1 protocol. If this method raises any exception then it also sets :attr:`Connection.our_state` to :data:`ERROR` -- see :ref:`error-handling` for discussion. """ data_list = self.send_with_data_passthrough(event) if data_list is None: return None else: return b"".join(data_list)
[ "def", "send", "(", "self", ",", "event", ")", ":", "data_list", "=", "self", ".", "send_with_data_passthrough", "(", "event", ")", "if", "data_list", "is", "None", ":", "return", "None", "else", ":", "return", "b\"\"", ".", "join", "(", "data_list", ")" ]
Convert a high-level event into bytes that can be sent to the peer, while updating our internal state machine. Args: event: The :ref:`event <events>` to send. Returns: If ``type(event) is ConnectionClosed``, then returns ``None``. Otherwise, returns a :term:`bytes-like object`. Raises: LocalProtocolError: Sending this event at this time would violate our understanding of the HTTP/1.1 protocol. If this method raises any exception then it also sets :attr:`Connection.our_state` to :data:`ERROR` -- see :ref:`error-handling` for discussion.
[ "Convert", "a", "high", "-", "level", "event", "into", "bytes", "that", "can", "be", "sent", "to", "the", "peer", "while", "updating", "our", "internal", "state", "machine", "." ]
python
train
33.423077
hyperledger/indy-plenum
plenum/server/node.py
https://github.com/hyperledger/indy-plenum/blob/dcd144e238af7f17a869ffc9412f13dc488b7020/plenum/server/node.py#L1455-L1510
def onConnsChanged(self, joined: Set[str], left: Set[str]): """ A series of operations to perform once a connection count has changed. - Set f to max number of failures this system can handle. - Set status to one of started, started_hungry or starting depending on the number of protocol instances. - Check protocol instances. See `checkInstances()` """ _prev_status = self.status if self.isGoing(): if self.connectedNodeCount == self.totalNodes: self.status = Status.started elif self.connectedNodeCount >= self.minimumNodes: self.status = Status.started_hungry else: self.status = Status.starting self.elector.nodeCount = self.connectedNodeCount if self.master_primary_name in joined: self.primaries_disconnection_times[self.master_replica.instId] = None if self.master_primary_name in left: logger.display('{} lost connection to primary of master'.format(self)) self.lost_master_primary() elif _prev_status == Status.starting and self.status == Status.started_hungry \ and self.primaries_disconnection_times[self.master_replica.instId] is not None \ and self.master_primary_name is not None: """ Such situation may occur if the pool has come back to reachable consensus but primary is still disconnected, so view change proposal makes sense now. """ self._schedule_view_change() for inst_id, replica in self.replicas.items(): if not replica.isMaster and replica.primaryName is not None: primary_node_name = replica.primaryName.split(':')[0] if primary_node_name in joined: self.primaries_disconnection_times[inst_id] = None elif primary_node_name in left: self.primaries_disconnection_times[inst_id] = time.perf_counter() self._schedule_replica_removal(inst_id) if self.isReady(): self.checkInstances() else: logger.info("{} joined nodes {} but status is {}".format(self, joined, self.status)) # Send ledger status whether ready (connected to enough nodes) or not for node in joined: self.send_ledger_status_to_newly_connected_node(node) for node in left: self.network_i3pc_watcher.disconnect(node) for node in joined: self.network_i3pc_watcher.connect(node)
[ "def", "onConnsChanged", "(", "self", ",", "joined", ":", "Set", "[", "str", "]", ",", "left", ":", "Set", "[", "str", "]", ")", ":", "_prev_status", "=", "self", ".", "status", "if", "self", ".", "isGoing", "(", ")", ":", "if", "self", ".", "connectedNodeCount", "==", "self", ".", "totalNodes", ":", "self", ".", "status", "=", "Status", ".", "started", "elif", "self", ".", "connectedNodeCount", ">=", "self", ".", "minimumNodes", ":", "self", ".", "status", "=", "Status", ".", "started_hungry", "else", ":", "self", ".", "status", "=", "Status", ".", "starting", "self", ".", "elector", ".", "nodeCount", "=", "self", ".", "connectedNodeCount", "if", "self", ".", "master_primary_name", "in", "joined", ":", "self", ".", "primaries_disconnection_times", "[", "self", ".", "master_replica", ".", "instId", "]", "=", "None", "if", "self", ".", "master_primary_name", "in", "left", ":", "logger", ".", "display", "(", "'{} lost connection to primary of master'", ".", "format", "(", "self", ")", ")", "self", ".", "lost_master_primary", "(", ")", "elif", "_prev_status", "==", "Status", ".", "starting", "and", "self", ".", "status", "==", "Status", ".", "started_hungry", "and", "self", ".", "primaries_disconnection_times", "[", "self", ".", "master_replica", ".", "instId", "]", "is", "not", "None", "and", "self", ".", "master_primary_name", "is", "not", "None", ":", "\"\"\"\n Such situation may occur if the pool has come back to reachable consensus but\n primary is still disconnected, so view change proposal makes sense now.\n \"\"\"", "self", ".", "_schedule_view_change", "(", ")", "for", "inst_id", ",", "replica", "in", "self", ".", "replicas", ".", "items", "(", ")", ":", "if", "not", "replica", ".", "isMaster", "and", "replica", ".", "primaryName", "is", "not", "None", ":", "primary_node_name", "=", "replica", ".", "primaryName", ".", "split", "(", "':'", ")", "[", "0", "]", "if", "primary_node_name", "in", "joined", ":", "self", ".", "primaries_disconnection_times", "[", "inst_id", "]", "=", "None", "elif", "primary_node_name", "in", "left", ":", "self", ".", "primaries_disconnection_times", "[", "inst_id", "]", "=", "time", ".", "perf_counter", "(", ")", "self", ".", "_schedule_replica_removal", "(", "inst_id", ")", "if", "self", ".", "isReady", "(", ")", ":", "self", ".", "checkInstances", "(", ")", "else", ":", "logger", ".", "info", "(", "\"{} joined nodes {} but status is {}\"", ".", "format", "(", "self", ",", "joined", ",", "self", ".", "status", ")", ")", "# Send ledger status whether ready (connected to enough nodes) or not", "for", "node", "in", "joined", ":", "self", ".", "send_ledger_status_to_newly_connected_node", "(", "node", ")", "for", "node", "in", "left", ":", "self", ".", "network_i3pc_watcher", ".", "disconnect", "(", "node", ")", "for", "node", "in", "joined", ":", "self", ".", "network_i3pc_watcher", ".", "connect", "(", "node", ")" ]
A series of operations to perform once a connection count has changed. - Set f to max number of failures this system can handle. - Set status to one of started, started_hungry or starting depending on the number of protocol instances. - Check protocol instances. See `checkInstances()`
[ "A", "series", "of", "operations", "to", "perform", "once", "a", "connection", "count", "has", "changed", "." ]
python
train
45.875
CodyKochmann/generators
generators/Generator.py
https://github.com/CodyKochmann/generators/blob/e4ca4dd25d5023a94b0349c69d6224070cc2526f/generators/Generator.py#L145-L151
def add_methods(methods_to_add): ''' use this to bulk add new methods to Generator ''' for i in methods_to_add: try: Generator.add_method(*i) except Exception as ex: raise Exception('issue adding {} - {}'.format(repr(i), ex))
[ "def", "add_methods", "(", "methods_to_add", ")", ":", "for", "i", "in", "methods_to_add", ":", "try", ":", "Generator", ".", "add_method", "(", "*", "i", ")", "except", "Exception", "as", "ex", ":", "raise", "Exception", "(", "'issue adding {} - {}'", ".", "format", "(", "repr", "(", "i", ")", ",", "ex", ")", ")" ]
use this to bulk add new methods to Generator
[ "use", "this", "to", "bulk", "add", "new", "methods", "to", "Generator" ]
python
train
41.571429
ff0000/scarlet
scarlet/cms/actions.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/cms/actions.py#L315-L348
def process_action(self, request, queryset): """ Publishes the selected objects by passing the value of \ 'when' to the object's publish method. The object's \ `purge_archives` method is also called to limit the number \ of old items that we keep around. The action is logged as \ either 'published' or 'scheduled' depending on the value of \ 'when', and the user is notified with a message. Returns a 'render redirect' to the result of the \ `get_done_url` method. """ form = self.form(request.POST) if form.is_valid(): when = form.cleaned_data.get('when') count = 0 for obj in queryset: count += 1 obj.publish(user=request.user, when=when) obj.purge_archives() object_url = self.get_object_url(obj) if obj.state == obj.PUBLISHED: self.log_action( obj, CMSLog.PUBLISH, url=object_url) else: self.log_action( obj, CMSLog.SCHEDULE, url=object_url) message = "%s objects published." % count self.write_message(message=message) return self.render(request, redirect_url= self.get_done_url(), message=message, collect_render_data=False) return self.render(request, queryset=queryset, publish_form=form, action='Publish')
[ "def", "process_action", "(", "self", ",", "request", ",", "queryset", ")", ":", "form", "=", "self", ".", "form", "(", "request", ".", "POST", ")", "if", "form", ".", "is_valid", "(", ")", ":", "when", "=", "form", ".", "cleaned_data", ".", "get", "(", "'when'", ")", "count", "=", "0", "for", "obj", "in", "queryset", ":", "count", "+=", "1", "obj", ".", "publish", "(", "user", "=", "request", ".", "user", ",", "when", "=", "when", ")", "obj", ".", "purge_archives", "(", ")", "object_url", "=", "self", ".", "get_object_url", "(", "obj", ")", "if", "obj", ".", "state", "==", "obj", ".", "PUBLISHED", ":", "self", ".", "log_action", "(", "obj", ",", "CMSLog", ".", "PUBLISH", ",", "url", "=", "object_url", ")", "else", ":", "self", ".", "log_action", "(", "obj", ",", "CMSLog", ".", "SCHEDULE", ",", "url", "=", "object_url", ")", "message", "=", "\"%s objects published.\"", "%", "count", "self", ".", "write_message", "(", "message", "=", "message", ")", "return", "self", ".", "render", "(", "request", ",", "redirect_url", "=", "self", ".", "get_done_url", "(", ")", ",", "message", "=", "message", ",", "collect_render_data", "=", "False", ")", "return", "self", ".", "render", "(", "request", ",", "queryset", "=", "queryset", ",", "publish_form", "=", "form", ",", "action", "=", "'Publish'", ")" ]
Publishes the selected objects by passing the value of \ 'when' to the object's publish method. The object's \ `purge_archives` method is also called to limit the number \ of old items that we keep around. The action is logged as \ either 'published' or 'scheduled' depending on the value of \ 'when', and the user is notified with a message. Returns a 'render redirect' to the result of the \ `get_done_url` method.
[ "Publishes", "the", "selected", "objects", "by", "passing", "the", "value", "of", "\\", "when", "to", "the", "object", "s", "publish", "method", ".", "The", "object", "s", "\\", "purge_archives", "method", "is", "also", "called", "to", "limit", "the", "number", "\\", "of", "old", "items", "that", "we", "keep", "around", ".", "The", "action", "is", "logged", "as", "\\", "either", "published", "or", "scheduled", "depending", "on", "the", "value", "of", "\\", "when", "and", "the", "user", "is", "notified", "with", "a", "message", "." ]
python
train
44.323529
aliyun/aliyun-odps-python-sdk
odps/ml/expr/mixin.py
https://github.com/aliyun/aliyun-odps-python-sdk/blob/4b0de18f5864386df6068f26f026e62f932c41e4/odps/ml/expr/mixin.py#L389-L405
def discrete(self): """ Set sequence to be discrete. :rtype: Column :Example: >>> # Table schema is create table test(f1 double, f2 string) >>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS >>> # Now we want to set ``f1`` and ``f2`` into continuous >>> new_ds = df.discrete('f1 f2') """ field_name = self.name new_df = copy_df(self) new_df._perform_operation(op.FieldContinuityOperation({field_name: False})) return new_df
[ "def", "discrete", "(", "self", ")", ":", "field_name", "=", "self", ".", "name", "new_df", "=", "copy_df", "(", "self", ")", "new_df", ".", "_perform_operation", "(", "op", ".", "FieldContinuityOperation", "(", "{", "field_name", ":", "False", "}", ")", ")", "return", "new_df" ]
Set sequence to be discrete. :rtype: Column :Example: >>> # Table schema is create table test(f1 double, f2 string) >>> # Original continuity: f1=CONTINUOUS, f2=CONTINUOUS >>> # Now we want to set ``f1`` and ``f2`` into continuous >>> new_ds = df.discrete('f1 f2')
[ "Set", "sequence", "to", "be", "discrete", "." ]
python
train
30.529412
twilio/twilio-python
twilio/rest/preview/bulk_exports/export_configuration.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/preview/bulk_exports/export_configuration.py#L215-L228
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ExportConfigurationContext for this ExportConfigurationInstance :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationContext """ if self._context is None: self._context = ExportConfigurationContext( self._version, resource_type=self._solution['resource_type'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "ExportConfigurationContext", "(", "self", ".", "_version", ",", "resource_type", "=", "self", ".", "_solution", "[", "'resource_type'", "]", ",", ")", "return", "self", ".", "_context" ]
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: ExportConfigurationContext for this ExportConfigurationInstance :rtype: twilio.rest.preview.bulk_exports.export_configuration.ExportConfigurationContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
42.928571
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Tags.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L1951-L1964
def getPeersWithAttrValues(self, attrName, attrValues): ''' getPeersWithAttrValues - Gets peers (elements on same level) whose attribute given by #attrName are in the list of possible vaues #attrValues @param attrName - Name of attribute @param attrValues - List of possible values which will match @return - None if no parent element (error condition), otherwise a TagCollection of peers that matched. ''' peers = self.peers if peers is None: return None return TagCollection([peer for peer in peers if peer.getAttribute(attrName) in attrValues])
[ "def", "getPeersWithAttrValues", "(", "self", ",", "attrName", ",", "attrValues", ")", ":", "peers", "=", "self", ".", "peers", "if", "peers", "is", "None", ":", "return", "None", "return", "TagCollection", "(", "[", "peer", "for", "peer", "in", "peers", "if", "peer", ".", "getAttribute", "(", "attrName", ")", "in", "attrValues", "]", ")" ]
getPeersWithAttrValues - Gets peers (elements on same level) whose attribute given by #attrName are in the list of possible vaues #attrValues @param attrName - Name of attribute @param attrValues - List of possible values which will match @return - None if no parent element (error condition), otherwise a TagCollection of peers that matched.
[ "getPeersWithAttrValues", "-", "Gets", "peers", "(", "elements", "on", "same", "level", ")", "whose", "attribute", "given", "by", "#attrName", "are", "in", "the", "list", "of", "possible", "vaues", "#attrValues" ]
python
train
46.642857
tonybaloney/wily
wily/archivers/git.py
https://github.com/tonybaloney/wily/blob/bae259354a91b57d56603f0ca7403186f086a84c/wily/archivers/git.py#L87-L98
def checkout(self, revision, options): """ Checkout a specific revision. :param revision: The revision identifier. :type revision: :class:`Revision` :param options: Any additional options. :type options: ``dict`` """ rev = revision.key self.repo.git.checkout(rev)
[ "def", "checkout", "(", "self", ",", "revision", ",", "options", ")", ":", "rev", "=", "revision", ".", "key", "self", ".", "repo", ".", "git", ".", "checkout", "(", "rev", ")" ]
Checkout a specific revision. :param revision: The revision identifier. :type revision: :class:`Revision` :param options: Any additional options. :type options: ``dict``
[ "Checkout", "a", "specific", "revision", "." ]
python
train
27.333333
jkawamoto/dsargparse
dsargparse.py
https://github.com/jkawamoto/dsargparse/blob/dbbcea11ff1ae7b84bdfccb9f97d1947574e4126/dsargparse.py#L183-L208
def add_argument(self, *args, **kwargs): """Add an argument. This method adds a new argument to the current parser. The function is same as ``argparse.ArgumentParser.add_argument``. However, this method tries to determine help messages for the adding argument from some docstrings. If the new arguments belong to some sub commands, the docstring of a function implements behavior of the sub command has ``Args:`` section, and defines same name variable, this function sets such definition to the help message. Positional Args: same positional arguments as argparse.ArgumentParser.add_argument. Keyword Args: same keywards arguments as argparse.ArgumentParser.add_argument. """ if _HELP not in kwargs: for name in args: name = name.replace("-", "") if name in self.__argmap: kwargs[_HELP] = self.__argmap[name] break return super(ArgumentParser, self).add_argument(*args, **kwargs)
[ "def", "add_argument", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "_HELP", "not", "in", "kwargs", ":", "for", "name", "in", "args", ":", "name", "=", "name", ".", "replace", "(", "\"-\"", ",", "\"\"", ")", "if", "name", "in", "self", ".", "__argmap", ":", "kwargs", "[", "_HELP", "]", "=", "self", ".", "__argmap", "[", "name", "]", "break", "return", "super", "(", "ArgumentParser", ",", "self", ")", ".", "add_argument", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Add an argument. This method adds a new argument to the current parser. The function is same as ``argparse.ArgumentParser.add_argument``. However, this method tries to determine help messages for the adding argument from some docstrings. If the new arguments belong to some sub commands, the docstring of a function implements behavior of the sub command has ``Args:`` section, and defines same name variable, this function sets such definition to the help message. Positional Args: same positional arguments as argparse.ArgumentParser.add_argument. Keyword Args: same keywards arguments as argparse.ArgumentParser.add_argument.
[ "Add", "an", "argument", "." ]
python
train
41.346154
eqcorrscan/EQcorrscan
eqcorrscan/utils/mag_calc.py
https://github.com/eqcorrscan/EQcorrscan/blob/3121b4aca801ee5d38f56ca297ce1c0f9515d9ff/eqcorrscan/utils/mag_calc.py#L825-L1027
def svd_moments(u, s, v, stachans, event_list, n_svs=2): """ Calculate relative moments/amplitudes using singular-value decomposition. Convert basis vectors calculated by singular value \ decomposition (see the SVD functions in clustering) into relative \ moments. For more information see the paper by \ `Rubinstein & Ellsworth (2010). <http://www.bssaonline.org/content/100/5A/1952.short>`_ :type u: list :param u: List of the :class:`numpy.ndarray` input basis vectors from the SVD, one array for each channel used. :type s: list :param s: List of the :class:`numpy.ndarray` of singular values, one array for each channel. :type v: list :param v: List of :class:`numpy.ndarray` of output basis vectors from SVD, one array per channel. :type stachans: list :param stachans: List of station.channel input :type event_list: list :param event_list: List of events for which you have data, such that \ event_list[i] corresponds to stachans[i], U[i] etc. and \ event_list[i][j] corresponds to event j in U[i]. These are a series \ of indexes that map the basis vectors to their relative events and \ channels - if you have every channel for every event generating these \ is trivial (see example). :type n_svs: int :param n_svs: Number of singular values to use, defaults to 4. :returns: M, array of relative moments :rtype: :class:`numpy.ndarray` :returns: events_out, list of events that relate to M (in order), \ does not include the magnitude information in the events, see note. :rtype: :class:`obspy.core.event.event.Event` .. note:: M is an array of relative moments (or amplitudes), these cannot be directly compared to true moments without calibration. .. note:: When comparing this method with the method used for creation of subspace detectors (Harris 2006) it is important to note that the input `design set` matrix in Harris contains waveforms as columns, whereas in Rubinstein & Ellsworth it contains waveforms as rows (i.e. the transpose of the Harris data matrix). The U and V matrices are therefore swapped between the two approaches. This is accounted for in EQcorrscan but may lead to confusion when reviewing the code. Here we use the Harris approach. .. rubric:: Example >>> from eqcorrscan.utils.mag_calc import svd_moments >>> from obspy import read >>> import glob >>> import os >>> from eqcorrscan.utils.clustering import svd >>> import numpy as np >>> # Do the set-up >>> testing_path = 'eqcorrscan/tests/test_data/similar_events' >>> stream_files = glob.glob(os.path.join(testing_path, '*')) >>> stream_list = [read(stream_file) for stream_file in stream_files] >>> event_list = [] >>> remove_list = [('WHAT2', 'SH1'), ('WV04', 'SHZ'), ('GCSZ', 'EHZ')] >>> for i, stream in enumerate(stream_list): ... st_list = [] ... for tr in stream: ... if (tr.stats.station, tr.stats.channel) not in remove_list: ... stream.remove(tr) ... continue ... tr.detrend('simple') ... tr.filter('bandpass', freqmin=5.0, freqmax=15.0) ... tr.trim(tr.stats.starttime + 40, tr.stats.endtime - 45) ... st_list.append(i) ... event_list.append(st_list) # doctest: +SKIP >>> event_list = np.asarray(event_list).T.tolist() >>> SVec, SVal, U, stachans = svd(stream_list=stream_list) # doctest: +SKIP ['GCSZ.EHZ', 'WV04.SHZ', 'WHAT2.SH1'] >>> M, events_out = svd_moments(u=U, s=SVal, v=SVec, stachans=stachans, ... event_list=event_list) # doctest: +SKIP """ # Define maximum number of events, will be the width of K K_width = max([max(ev_list) for ev_list in event_list]) + 1 # Sometimes the randomisation generates a singular matrix - rather than # attempting to regulerize this matrix I propose undertaking the # randomisation step a further time if len(stachans) == 1: print('Only provided data from one station-channel - ' 'will not try to invert') return u[0][:, 0], event_list[0] for i, stachan in enumerate(stachans): k = [] # Small kernel matrix for one station - channel # Copy the relevant vectors so as not to destroy them # Here we'll swap into the Rubinstein U and V matrices U_working = copy.deepcopy(v[i].T) V_working = copy.deepcopy(u[i]) s_working = copy.deepcopy(s[i].T) ev_list = event_list[i] if len(ev_list) > len(U_working): print('U is : ' + str(U_working.shape)) print('ev_list is len %s' % str(len(ev_list))) f_dump = open('mag_calc_U_working.pkl', 'wb') pickle.dump(U_working, f_dump) f_dump.close() raise IOError('More events than represented in U') # Set all non-important singular values to zero s_working[n_svs:len(s_working)] = 0 s_working = np.diag(s_working) # Convert to numpy matrices U_working = np.matrix(U_working) V_working = np.matrix(V_working) s_working = np.matrix(s_working) SVD_weights = U_working[:, 0] # If all the weights are negative take the abs if np.all(SVD_weights < 0): warnings.warn('All weights are negative - flipping them') SVD_weights = np.abs(SVD_weights) SVD_weights = np.array(SVD_weights).reshape(-1).tolist() # Shuffle the SVD_weights prior to pairing - will give one of multiple # pairwise options - see p1956 of Rubinstein & Ellsworth 2010 # We need to keep the real indexes though, otherwise, if there are # multiple events with the same weight we will end up with multiple # -1 values random_SVD_weights = np.copy(SVD_weights) # Tack on the indexes random_SVD_weights = random_SVD_weights.tolist() random_SVD_weights = [(random_SVD_weights[_i], _i) for _i in range(len(random_SVD_weights))] random.shuffle(random_SVD_weights) # Add the first element to the end so all elements will be paired twice random_SVD_weights.append(random_SVD_weights[0]) # Take pairs of all the SVD_weights (each weight appears in 2 pairs) pairs = [] for pair in _pairwise(random_SVD_weights): pairs.append(pair) # Deciding values for each place in kernel matrix using the pairs for pairsIndex in range(len(pairs)): # We will normalize by the minimum weight _weights = list(zip(*list(pairs[pairsIndex])))[0] _indeces = list(zip(*list(pairs[pairsIndex])))[1] min_weight = min(np.abs(_weights)) max_weight = max(np.abs(_weights)) min_index = _indeces[np.argmin(np.abs(_weights))] max_index = _indeces[np.argmax(np.abs(_weights))] row = [] # Working out values for each row of kernel matrix for j in range(len(SVD_weights)): if j == max_index: result = -1 elif j == min_index: normalised = max_weight / min_weight result = float(normalised) else: result = 0 row.append(result) # Add each row to the K matrix k.append(row) # k is now a square matrix, we need to flesh it out to be K_width k_filled = np.zeros([len(k), K_width]) for j in range(len(k)): for l, ev in enumerate(ev_list): k_filled[j, ev] = k[j][l] if 'K' not in locals(): K = k_filled else: K = np.concatenate([K, k_filled]) # Remove any empty rows K_nonempty = [] events_out = [] for i in range(0, K_width): if not np.all(K[:, i] == 0): K_nonempty.append(K[:, i]) events_out.append(i) K = np.array(K_nonempty).T K = K.tolist() K_width = len(K[0]) # Add an extra row to K, so average moment = 1 K.append(np.ones(K_width) * (1. / K_width)) print("Created Kernel matrix: ") del row print('\n'.join([''.join([str(round(float(item), 3)).ljust(6) for item in row]) for row in K])) Krounded = np.around(K, decimals=4) # Create a weighting matrix to put emphasis on the final row. W = np.matrix(np.identity(len(K))) # the final element of W = the number of stations*number of events W[-1, -1] = len(K) - 1 # Make K into a matrix K = np.matrix(K) ############ # Solve using the weighted least squares equation, K.T is K transpose Kinv = np.array(np.linalg.inv(K.T * W * K) * K.T * W) # M are the relative moments of the events M = Kinv[:, -1] # XXX TODO This still needs an outlier removal step return M, events_out
[ "def", "svd_moments", "(", "u", ",", "s", ",", "v", ",", "stachans", ",", "event_list", ",", "n_svs", "=", "2", ")", ":", "# Define maximum number of events, will be the width of K", "K_width", "=", "max", "(", "[", "max", "(", "ev_list", ")", "for", "ev_list", "in", "event_list", "]", ")", "+", "1", "# Sometimes the randomisation generates a singular matrix - rather than", "# attempting to regulerize this matrix I propose undertaking the", "# randomisation step a further time", "if", "len", "(", "stachans", ")", "==", "1", ":", "print", "(", "'Only provided data from one station-channel - '", "'will not try to invert'", ")", "return", "u", "[", "0", "]", "[", ":", ",", "0", "]", ",", "event_list", "[", "0", "]", "for", "i", ",", "stachan", "in", "enumerate", "(", "stachans", ")", ":", "k", "=", "[", "]", "# Small kernel matrix for one station - channel", "# Copy the relevant vectors so as not to destroy them", "# Here we'll swap into the Rubinstein U and V matrices", "U_working", "=", "copy", ".", "deepcopy", "(", "v", "[", "i", "]", ".", "T", ")", "V_working", "=", "copy", ".", "deepcopy", "(", "u", "[", "i", "]", ")", "s_working", "=", "copy", ".", "deepcopy", "(", "s", "[", "i", "]", ".", "T", ")", "ev_list", "=", "event_list", "[", "i", "]", "if", "len", "(", "ev_list", ")", ">", "len", "(", "U_working", ")", ":", "print", "(", "'U is : '", "+", "str", "(", "U_working", ".", "shape", ")", ")", "print", "(", "'ev_list is len %s'", "%", "str", "(", "len", "(", "ev_list", ")", ")", ")", "f_dump", "=", "open", "(", "'mag_calc_U_working.pkl'", ",", "'wb'", ")", "pickle", ".", "dump", "(", "U_working", ",", "f_dump", ")", "f_dump", ".", "close", "(", ")", "raise", "IOError", "(", "'More events than represented in U'", ")", "# Set all non-important singular values to zero", "s_working", "[", "n_svs", ":", "len", "(", "s_working", ")", "]", "=", "0", "s_working", "=", "np", ".", "diag", "(", "s_working", ")", "# Convert to numpy matrices", "U_working", "=", "np", ".", "matrix", "(", "U_working", ")", "V_working", "=", "np", ".", "matrix", "(", "V_working", ")", "s_working", "=", "np", ".", "matrix", "(", "s_working", ")", "SVD_weights", "=", "U_working", "[", ":", ",", "0", "]", "# If all the weights are negative take the abs", "if", "np", ".", "all", "(", "SVD_weights", "<", "0", ")", ":", "warnings", ".", "warn", "(", "'All weights are negative - flipping them'", ")", "SVD_weights", "=", "np", ".", "abs", "(", "SVD_weights", ")", "SVD_weights", "=", "np", ".", "array", "(", "SVD_weights", ")", ".", "reshape", "(", "-", "1", ")", ".", "tolist", "(", ")", "# Shuffle the SVD_weights prior to pairing - will give one of multiple", "# pairwise options - see p1956 of Rubinstein & Ellsworth 2010", "# We need to keep the real indexes though, otherwise, if there are", "# multiple events with the same weight we will end up with multiple", "# -1 values", "random_SVD_weights", "=", "np", ".", "copy", "(", "SVD_weights", ")", "# Tack on the indexes", "random_SVD_weights", "=", "random_SVD_weights", ".", "tolist", "(", ")", "random_SVD_weights", "=", "[", "(", "random_SVD_weights", "[", "_i", "]", ",", "_i", ")", "for", "_i", "in", "range", "(", "len", "(", "random_SVD_weights", ")", ")", "]", "random", ".", "shuffle", "(", "random_SVD_weights", ")", "# Add the first element to the end so all elements will be paired twice", "random_SVD_weights", ".", "append", "(", "random_SVD_weights", "[", "0", "]", ")", "# Take pairs of all the SVD_weights (each weight appears in 2 pairs)", "pairs", "=", "[", "]", "for", "pair", "in", "_pairwise", "(", "random_SVD_weights", ")", ":", "pairs", ".", "append", "(", "pair", ")", "# Deciding values for each place in kernel matrix using the pairs", "for", "pairsIndex", "in", "range", "(", "len", "(", "pairs", ")", ")", ":", "# We will normalize by the minimum weight", "_weights", "=", "list", "(", "zip", "(", "*", "list", "(", "pairs", "[", "pairsIndex", "]", ")", ")", ")", "[", "0", "]", "_indeces", "=", "list", "(", "zip", "(", "*", "list", "(", "pairs", "[", "pairsIndex", "]", ")", ")", ")", "[", "1", "]", "min_weight", "=", "min", "(", "np", ".", "abs", "(", "_weights", ")", ")", "max_weight", "=", "max", "(", "np", ".", "abs", "(", "_weights", ")", ")", "min_index", "=", "_indeces", "[", "np", ".", "argmin", "(", "np", ".", "abs", "(", "_weights", ")", ")", "]", "max_index", "=", "_indeces", "[", "np", ".", "argmax", "(", "np", ".", "abs", "(", "_weights", ")", ")", "]", "row", "=", "[", "]", "# Working out values for each row of kernel matrix", "for", "j", "in", "range", "(", "len", "(", "SVD_weights", ")", ")", ":", "if", "j", "==", "max_index", ":", "result", "=", "-", "1", "elif", "j", "==", "min_index", ":", "normalised", "=", "max_weight", "/", "min_weight", "result", "=", "float", "(", "normalised", ")", "else", ":", "result", "=", "0", "row", ".", "append", "(", "result", ")", "# Add each row to the K matrix", "k", ".", "append", "(", "row", ")", "# k is now a square matrix, we need to flesh it out to be K_width", "k_filled", "=", "np", ".", "zeros", "(", "[", "len", "(", "k", ")", ",", "K_width", "]", ")", "for", "j", "in", "range", "(", "len", "(", "k", ")", ")", ":", "for", "l", ",", "ev", "in", "enumerate", "(", "ev_list", ")", ":", "k_filled", "[", "j", ",", "ev", "]", "=", "k", "[", "j", "]", "[", "l", "]", "if", "'K'", "not", "in", "locals", "(", ")", ":", "K", "=", "k_filled", "else", ":", "K", "=", "np", ".", "concatenate", "(", "[", "K", ",", "k_filled", "]", ")", "# Remove any empty rows", "K_nonempty", "=", "[", "]", "events_out", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "K_width", ")", ":", "if", "not", "np", ".", "all", "(", "K", "[", ":", ",", "i", "]", "==", "0", ")", ":", "K_nonempty", ".", "append", "(", "K", "[", ":", ",", "i", "]", ")", "events_out", ".", "append", "(", "i", ")", "K", "=", "np", ".", "array", "(", "K_nonempty", ")", ".", "T", "K", "=", "K", ".", "tolist", "(", ")", "K_width", "=", "len", "(", "K", "[", "0", "]", ")", "# Add an extra row to K, so average moment = 1", "K", ".", "append", "(", "np", ".", "ones", "(", "K_width", ")", "*", "(", "1.", "/", "K_width", ")", ")", "print", "(", "\"Created Kernel matrix: \"", ")", "del", "row", "print", "(", "'\\n'", ".", "join", "(", "[", "''", ".", "join", "(", "[", "str", "(", "round", "(", "float", "(", "item", ")", ",", "3", ")", ")", ".", "ljust", "(", "6", ")", "for", "item", "in", "row", "]", ")", "for", "row", "in", "K", "]", ")", ")", "Krounded", "=", "np", ".", "around", "(", "K", ",", "decimals", "=", "4", ")", "# Create a weighting matrix to put emphasis on the final row.", "W", "=", "np", ".", "matrix", "(", "np", ".", "identity", "(", "len", "(", "K", ")", ")", ")", "# the final element of W = the number of stations*number of events", "W", "[", "-", "1", ",", "-", "1", "]", "=", "len", "(", "K", ")", "-", "1", "# Make K into a matrix", "K", "=", "np", ".", "matrix", "(", "K", ")", "############", "# Solve using the weighted least squares equation, K.T is K transpose", "Kinv", "=", "np", ".", "array", "(", "np", ".", "linalg", ".", "inv", "(", "K", ".", "T", "*", "W", "*", "K", ")", "*", "K", ".", "T", "*", "W", ")", "# M are the relative moments of the events", "M", "=", "Kinv", "[", ":", ",", "-", "1", "]", "# XXX TODO This still needs an outlier removal step", "return", "M", ",", "events_out" ]
Calculate relative moments/amplitudes using singular-value decomposition. Convert basis vectors calculated by singular value \ decomposition (see the SVD functions in clustering) into relative \ moments. For more information see the paper by \ `Rubinstein & Ellsworth (2010). <http://www.bssaonline.org/content/100/5A/1952.short>`_ :type u: list :param u: List of the :class:`numpy.ndarray` input basis vectors from the SVD, one array for each channel used. :type s: list :param s: List of the :class:`numpy.ndarray` of singular values, one array for each channel. :type v: list :param v: List of :class:`numpy.ndarray` of output basis vectors from SVD, one array per channel. :type stachans: list :param stachans: List of station.channel input :type event_list: list :param event_list: List of events for which you have data, such that \ event_list[i] corresponds to stachans[i], U[i] etc. and \ event_list[i][j] corresponds to event j in U[i]. These are a series \ of indexes that map the basis vectors to their relative events and \ channels - if you have every channel for every event generating these \ is trivial (see example). :type n_svs: int :param n_svs: Number of singular values to use, defaults to 4. :returns: M, array of relative moments :rtype: :class:`numpy.ndarray` :returns: events_out, list of events that relate to M (in order), \ does not include the magnitude information in the events, see note. :rtype: :class:`obspy.core.event.event.Event` .. note:: M is an array of relative moments (or amplitudes), these cannot be directly compared to true moments without calibration. .. note:: When comparing this method with the method used for creation of subspace detectors (Harris 2006) it is important to note that the input `design set` matrix in Harris contains waveforms as columns, whereas in Rubinstein & Ellsworth it contains waveforms as rows (i.e. the transpose of the Harris data matrix). The U and V matrices are therefore swapped between the two approaches. This is accounted for in EQcorrscan but may lead to confusion when reviewing the code. Here we use the Harris approach. .. rubric:: Example >>> from eqcorrscan.utils.mag_calc import svd_moments >>> from obspy import read >>> import glob >>> import os >>> from eqcorrscan.utils.clustering import svd >>> import numpy as np >>> # Do the set-up >>> testing_path = 'eqcorrscan/tests/test_data/similar_events' >>> stream_files = glob.glob(os.path.join(testing_path, '*')) >>> stream_list = [read(stream_file) for stream_file in stream_files] >>> event_list = [] >>> remove_list = [('WHAT2', 'SH1'), ('WV04', 'SHZ'), ('GCSZ', 'EHZ')] >>> for i, stream in enumerate(stream_list): ... st_list = [] ... for tr in stream: ... if (tr.stats.station, tr.stats.channel) not in remove_list: ... stream.remove(tr) ... continue ... tr.detrend('simple') ... tr.filter('bandpass', freqmin=5.0, freqmax=15.0) ... tr.trim(tr.stats.starttime + 40, tr.stats.endtime - 45) ... st_list.append(i) ... event_list.append(st_list) # doctest: +SKIP >>> event_list = np.asarray(event_list).T.tolist() >>> SVec, SVal, U, stachans = svd(stream_list=stream_list) # doctest: +SKIP ['GCSZ.EHZ', 'WV04.SHZ', 'WHAT2.SH1'] >>> M, events_out = svd_moments(u=U, s=SVal, v=SVec, stachans=stachans, ... event_list=event_list) # doctest: +SKIP
[ "Calculate", "relative", "moments", "/", "amplitudes", "using", "singular", "-", "value", "decomposition", "." ]
python
train
43.91133
xtream1101/web-wrapper
web_wrapper/selenium_utils.py
https://github.com/xtream1101/web-wrapper/blob/2bfc63caa7d316564088951f01a490db493ea240/web_wrapper/selenium_utils.py#L63-L101
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs): """ Try and return page content in the requested format using selenium """ try: # **TODO**: Find what exception this will throw and catch it and call # self.driver.execute_script("window.stop()") # Then still try and get the source from the page self.driver.set_page_load_timeout(timeout) self.driver.get(url) header_data = self.get_selenium_header() status_code = header_data['status-code'] # Set data to access from script self.status_code = status_code self.url = self.driver.current_url except TimeoutException: logger.warning("Page timeout: {}".format(url)) try: scraper_monitor.failed_url(url, 'Timeout') except (NameError, AttributeError): # Happens when scraper_monitor is not being used/setup pass except Exception: logger.exception("Unknown problem with scraper_monitor sending a failed url") except Exception as e: raise e.with_traceback(sys.exc_info()[2]) else: # If an exception was not thrown then check the http status code if status_code < 400: # If the http status code is not an error return self.driver.page_source else: # If http status code is 400 or greater raise SeleniumHTTPError("Status code >= 400", status_code=status_code)
[ "def", "_get_site", "(", "self", ",", "url", ",", "headers", ",", "cookies", ",", "timeout", ",", "driver_args", ",", "driver_kwargs", ")", ":", "try", ":", "# **TODO**: Find what exception this will throw and catch it and call", "# self.driver.execute_script(\"window.stop()\")", "# Then still try and get the source from the page", "self", ".", "driver", ".", "set_page_load_timeout", "(", "timeout", ")", "self", ".", "driver", ".", "get", "(", "url", ")", "header_data", "=", "self", ".", "get_selenium_header", "(", ")", "status_code", "=", "header_data", "[", "'status-code'", "]", "# Set data to access from script", "self", ".", "status_code", "=", "status_code", "self", ".", "url", "=", "self", ".", "driver", ".", "current_url", "except", "TimeoutException", ":", "logger", ".", "warning", "(", "\"Page timeout: {}\"", ".", "format", "(", "url", ")", ")", "try", ":", "scraper_monitor", ".", "failed_url", "(", "url", ",", "'Timeout'", ")", "except", "(", "NameError", ",", "AttributeError", ")", ":", "# Happens when scraper_monitor is not being used/setup", "pass", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Unknown problem with scraper_monitor sending a failed url\"", ")", "except", "Exception", "as", "e", ":", "raise", "e", ".", "with_traceback", "(", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "else", ":", "# If an exception was not thrown then check the http status code", "if", "status_code", "<", "400", ":", "# If the http status code is not an error", "return", "self", ".", "driver", ".", "page_source", "else", ":", "# If http status code is 400 or greater", "raise", "SeleniumHTTPError", "(", "\"Status code >= 400\"", ",", "status_code", "=", "status_code", ")" ]
Try and return page content in the requested format using selenium
[ "Try", "and", "return", "page", "content", "in", "the", "requested", "format", "using", "selenium" ]
python
train
41.025641
LuminosoInsight/luminoso-api-client-python
luminoso_api/v4_json_stream.py
https://github.com/LuminosoInsight/luminoso-api-client-python/blob/3bedf2a454aee39214c11fbf556ead3eecc27881/luminoso_api/v4_json_stream.py#L289-L306
def main(): """ Handle command line arguments to convert a file to a JSON stream as a script. """ logging.basicConfig(level=logging.INFO) import argparse parser = argparse.ArgumentParser( description="Translate CSV or JSON input to a JSON stream, or verify " "something that is already a JSON stream." ) parser.add_argument('input', help='A CSV, JSON, or JSON stream file to read.') parser.add_argument('output', nargs='?', default=None, help="The filename to output to. Recommended extension is .jsons. " "If omitted, use standard output.") args = parser.parse_args() transcode(args.input, args.output)
[ "def", "main", "(", ")", ":", "logging", ".", "basicConfig", "(", "level", "=", "logging", ".", "INFO", ")", "import", "argparse", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Translate CSV or JSON input to a JSON stream, or verify \"", "\"something that is already a JSON stream.\"", ")", "parser", ".", "add_argument", "(", "'input'", ",", "help", "=", "'A CSV, JSON, or JSON stream file to read.'", ")", "parser", ".", "add_argument", "(", "'output'", ",", "nargs", "=", "'?'", ",", "default", "=", "None", ",", "help", "=", "\"The filename to output to. Recommended extension is .jsons. \"", "\"If omitted, use standard output.\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "transcode", "(", "args", ".", "input", ",", "args", ".", "output", ")" ]
Handle command line arguments to convert a file to a JSON stream as a script.
[ "Handle", "command", "line", "arguments", "to", "convert", "a", "file", "to", "a", "JSON", "stream", "as", "a", "script", "." ]
python
test
38.388889
SUNCAT-Center/CatHub
cathub/reaction_networks.py
https://github.com/SUNCAT-Center/CatHub/blob/324625d1d8e740673f139658b2de4c9e1059739e/cathub/reaction_networks.py#L689-L721
def set_intermediates(self, intermediates, betas=None, transition_states=None): """Sets up intermediates and specifies whether it's an electrochemical step. Either provide individual contributions or net contributions. If both are given, only the net contributions are used. intermediate_list: list of basestrings transition_states: list of True and False electrochemical_steps: list of True and False betas = list of charge transfer coefficients net_corrections: A sum of all contributions per intermediate. """ self.intermediates = intermediates self.betas = betas self.transition_states = transition_states if self.corrections is None: self.net_corrections = [0.0 for _ in intermediates] if not self.betas: self.betas = [0.0 for _ in intermediates] if not self.transition_states: self.transition_states = [False for _ in intermediates] # check if all lists have same length: props = [len(self.intermediates), len(self.net_corrections), len(self.transition_states), len(self.betas)] if not len(set(props)) <= 1: raise ValueError('intermediate, net_corrections, transition_states and , ' 'betas all have to have the same length') self.get_corrections() return(True)
[ "def", "set_intermediates", "(", "self", ",", "intermediates", ",", "betas", "=", "None", ",", "transition_states", "=", "None", ")", ":", "self", ".", "intermediates", "=", "intermediates", "self", ".", "betas", "=", "betas", "self", ".", "transition_states", "=", "transition_states", "if", "self", ".", "corrections", "is", "None", ":", "self", ".", "net_corrections", "=", "[", "0.0", "for", "_", "in", "intermediates", "]", "if", "not", "self", ".", "betas", ":", "self", ".", "betas", "=", "[", "0.0", "for", "_", "in", "intermediates", "]", "if", "not", "self", ".", "transition_states", ":", "self", ".", "transition_states", "=", "[", "False", "for", "_", "in", "intermediates", "]", "# check if all lists have same length:", "props", "=", "[", "len", "(", "self", ".", "intermediates", ")", ",", "len", "(", "self", ".", "net_corrections", ")", ",", "len", "(", "self", ".", "transition_states", ")", ",", "len", "(", "self", ".", "betas", ")", "]", "if", "not", "len", "(", "set", "(", "props", ")", ")", "<=", "1", ":", "raise", "ValueError", "(", "'intermediate, net_corrections, transition_states and , '", "'betas all have to have the same length'", ")", "self", ".", "get_corrections", "(", ")", "return", "(", "True", ")" ]
Sets up intermediates and specifies whether it's an electrochemical step. Either provide individual contributions or net contributions. If both are given, only the net contributions are used. intermediate_list: list of basestrings transition_states: list of True and False electrochemical_steps: list of True and False betas = list of charge transfer coefficients net_corrections: A sum of all contributions per intermediate.
[ "Sets", "up", "intermediates", "and", "specifies", "whether", "it", "s", "an", "electrochemical", "step", ".", "Either", "provide", "individual", "contributions", "or", "net", "contributions", ".", "If", "both", "are", "given", "only", "the", "net", "contributions", "are", "used", "." ]
python
train
42.242424
inasafe/inasafe
safe/plugin.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/plugin.py#L338-L347
def _create_geonode_uploader_action(self): """Create action for Geonode uploader dialog.""" icon = resources_path('img', 'icons', 'geonode.png') label = tr('Geonode Uploader') self.action_geonode = QAction( QIcon(icon), label, self.iface.mainWindow()) self.action_geonode.setStatusTip(label) self.action_geonode.setWhatsThis(label) self.action_geonode.triggered.connect(self.show_geonode_uploader) self.add_action(self.action_geonode, add_to_toolbar=False)
[ "def", "_create_geonode_uploader_action", "(", "self", ")", ":", "icon", "=", "resources_path", "(", "'img'", ",", "'icons'", ",", "'geonode.png'", ")", "label", "=", "tr", "(", "'Geonode Uploader'", ")", "self", ".", "action_geonode", "=", "QAction", "(", "QIcon", "(", "icon", ")", ",", "label", ",", "self", ".", "iface", ".", "mainWindow", "(", ")", ")", "self", ".", "action_geonode", ".", "setStatusTip", "(", "label", ")", "self", ".", "action_geonode", ".", "setWhatsThis", "(", "label", ")", "self", ".", "action_geonode", ".", "triggered", ".", "connect", "(", "self", ".", "show_geonode_uploader", ")", "self", ".", "add_action", "(", "self", ".", "action_geonode", ",", "add_to_toolbar", "=", "False", ")" ]
Create action for Geonode uploader dialog.
[ "Create", "action", "for", "Geonode", "uploader", "dialog", "." ]
python
train
52.3
saltstack/salt
salt/modules/state.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/state.py#L527-L576
def template(tem, queue=False, **kwargs): ''' Execute the information stored in a template file on the minion. This function does not ask a master for a SLS file to render but instead directly processes the file at the provided path on the minion. CLI Example: .. code-block:: bash salt '*' state.template '<Path to template on the minion>' ''' if 'env' in kwargs: # "env" is not supported; Use "saltenv". kwargs.pop('env') conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) try: st_ = salt.state.HighState(opts, context=__context__, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.HighState(opts, context=__context__, initial_pillar=_get_initial_pillar(opts)) errors = _get_pillar_errors(kwargs, pillar=st_.opts['pillar']) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_PILLAR_FAILURE raise CommandExecutionError('Pillar failed to render', info=errors) if not tem.endswith('.sls'): tem = '{sls}.sls'.format(sls=tem) high_state, errors = st_.render_state(tem, kwargs.get('saltenv', ''), '', None, local=True) if errors: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return errors ret = st_.state.call_high(high_state) _set_retcode(ret, highstate=high_state) return ret
[ "def", "template", "(", "tem", ",", "queue", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "'env'", "in", "kwargs", ":", "# \"env\" is not supported; Use \"saltenv\".", "kwargs", ".", "pop", "(", "'env'", ")", "conflict", "=", "_check_queue", "(", "queue", ",", "kwargs", ")", "if", "conflict", "is", "not", "None", ":", "return", "conflict", "opts", "=", "salt", ".", "utils", ".", "state", ".", "get_sls_opts", "(", "__opts__", ",", "*", "*", "kwargs", ")", "try", ":", "st_", "=", "salt", ".", "state", ".", "HighState", "(", "opts", ",", "context", "=", "__context__", ",", "proxy", "=", "__proxy__", ",", "initial_pillar", "=", "_get_initial_pillar", "(", "opts", ")", ")", "except", "NameError", ":", "st_", "=", "salt", ".", "state", ".", "HighState", "(", "opts", ",", "context", "=", "__context__", ",", "initial_pillar", "=", "_get_initial_pillar", "(", "opts", ")", ")", "errors", "=", "_get_pillar_errors", "(", "kwargs", ",", "pillar", "=", "st_", ".", "opts", "[", "'pillar'", "]", ")", "if", "errors", ":", "__context__", "[", "'retcode'", "]", "=", "salt", ".", "defaults", ".", "exitcodes", ".", "EX_PILLAR_FAILURE", "raise", "CommandExecutionError", "(", "'Pillar failed to render'", ",", "info", "=", "errors", ")", "if", "not", "tem", ".", "endswith", "(", "'.sls'", ")", ":", "tem", "=", "'{sls}.sls'", ".", "format", "(", "sls", "=", "tem", ")", "high_state", ",", "errors", "=", "st_", ".", "render_state", "(", "tem", ",", "kwargs", ".", "get", "(", "'saltenv'", ",", "''", ")", ",", "''", ",", "None", ",", "local", "=", "True", ")", "if", "errors", ":", "__context__", "[", "'retcode'", "]", "=", "salt", ".", "defaults", ".", "exitcodes", ".", "EX_STATE_COMPILER_ERROR", "return", "errors", "ret", "=", "st_", ".", "state", ".", "call_high", "(", "high_state", ")", "_set_retcode", "(", "ret", ",", "highstate", "=", "high_state", ")", "return", "ret" ]
Execute the information stored in a template file on the minion. This function does not ask a master for a SLS file to render but instead directly processes the file at the provided path on the minion. CLI Example: .. code-block:: bash salt '*' state.template '<Path to template on the minion>'
[ "Execute", "the", "information", "stored", "in", "a", "template", "file", "on", "the", "minion", "." ]
python
train
36.34
saltstack/salt
salt/modules/osquery.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/osquery.py#L209-L221
def shared_memory(attrs=None, where=None): ''' Return shared_memory information from osquery CLI Example: .. code-block:: bash salt '*' osquery.shared_memory ''' if __grains__['os_family'] in ['RedHat', 'Debian']: return _osquery_cmd(table='shared_memory', attrs=attrs, where=where) return {'result': False, 'comment': 'Only available on Red Hat or Debian based systems.'}
[ "def", "shared_memory", "(", "attrs", "=", "None", ",", "where", "=", "None", ")", ":", "if", "__grains__", "[", "'os_family'", "]", "in", "[", "'RedHat'", ",", "'Debian'", "]", ":", "return", "_osquery_cmd", "(", "table", "=", "'shared_memory'", ",", "attrs", "=", "attrs", ",", "where", "=", "where", ")", "return", "{", "'result'", ":", "False", ",", "'comment'", ":", "'Only available on Red Hat or Debian based systems.'", "}" ]
Return shared_memory information from osquery CLI Example: .. code-block:: bash salt '*' osquery.shared_memory
[ "Return", "shared_memory", "information", "from", "osquery" ]
python
train
31.307692
tanghaibao/jcvi
jcvi/assembly/allmaps.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/assembly/allmaps.py#L203-L221
def prepare_ec(self, scaffolds, tour, weights): """ Prepare Evolutionary Computation. This converts scaffold names into indices (integer) in the scaffolds array. """ scaffolds_ii = dict((s, i) for i, s in enumerate(scaffolds)) scfs = [] ww = [] for mlg in self.linkage_groups: w = float(weights[mlg.mapname]) scf = {} for s, o in tour: si = scaffolds_ii[s] scf[si] = self.get_series(mlg.lg, s, orientation=o) scfs.append(scf) ww.append(w) tour = [scaffolds_ii[x] for x, o in tour] return scfs, tour, ww
[ "def", "prepare_ec", "(", "self", ",", "scaffolds", ",", "tour", ",", "weights", ")", ":", "scaffolds_ii", "=", "dict", "(", "(", "s", ",", "i", ")", "for", "i", ",", "s", "in", "enumerate", "(", "scaffolds", ")", ")", "scfs", "=", "[", "]", "ww", "=", "[", "]", "for", "mlg", "in", "self", ".", "linkage_groups", ":", "w", "=", "float", "(", "weights", "[", "mlg", ".", "mapname", "]", ")", "scf", "=", "{", "}", "for", "s", ",", "o", "in", "tour", ":", "si", "=", "scaffolds_ii", "[", "s", "]", "scf", "[", "si", "]", "=", "self", ".", "get_series", "(", "mlg", ".", "lg", ",", "s", ",", "orientation", "=", "o", ")", "scfs", ".", "append", "(", "scf", ")", "ww", ".", "append", "(", "w", ")", "tour", "=", "[", "scaffolds_ii", "[", "x", "]", "for", "x", ",", "o", "in", "tour", "]", "return", "scfs", ",", "tour", ",", "ww" ]
Prepare Evolutionary Computation. This converts scaffold names into indices (integer) in the scaffolds array.
[ "Prepare", "Evolutionary", "Computation", ".", "This", "converts", "scaffold", "names", "into", "indices", "(", "integer", ")", "in", "the", "scaffolds", "array", "." ]
python
train
34.578947
BernardFW/bernard
src/bernard/server/views.py
https://github.com/BernardFW/bernard/blob/9c55703e5ffe5717c9fa39793df59dbfa5b4c5ab/src/bernard/server/views.py#L99-L108
async def postback_send(msg: BaseMessage, platform: Platform) -> Response: """ Injects the POST body into the FSM as a Postback message. """ await platform.inject_message(msg) return json_response({ 'status': 'ok', })
[ "async", "def", "postback_send", "(", "msg", ":", "BaseMessage", ",", "platform", ":", "Platform", ")", "->", "Response", ":", "await", "platform", ".", "inject_message", "(", "msg", ")", "return", "json_response", "(", "{", "'status'", ":", "'ok'", ",", "}", ")" ]
Injects the POST body into the FSM as a Postback message.
[ "Injects", "the", "POST", "body", "into", "the", "FSM", "as", "a", "Postback", "message", "." ]
python
train
24.2
gem/oq-engine
openquake/hazardlib/gsim/bindi_2011.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/bindi_2011.py#L135-L148
def _compute_magnitude(self, rup, C): """ Compute the third term of the equation 1: e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh e1 + b3 * (M-Mh) otherwise """ m_h = 6.75 b_3 = 0.0 if rup.mag <= m_h: return C["e1"] + (C['b1'] * (rup.mag - m_h)) +\ (C['b2'] * (rup.mag - m_h) ** 2) else: return C["e1"] + (b_3 * (rup.mag - m_h))
[ "def", "_compute_magnitude", "(", "self", ",", "rup", ",", "C", ")", ":", "m_h", "=", "6.75", "b_3", "=", "0.0", "if", "rup", ".", "mag", "<=", "m_h", ":", "return", "C", "[", "\"e1\"", "]", "+", "(", "C", "[", "'b1'", "]", "*", "(", "rup", ".", "mag", "-", "m_h", ")", ")", "+", "(", "C", "[", "'b2'", "]", "*", "(", "rup", ".", "mag", "-", "m_h", ")", "**", "2", ")", "else", ":", "return", "C", "[", "\"e1\"", "]", "+", "(", "b_3", "*", "(", "rup", ".", "mag", "-", "m_h", ")", ")" ]
Compute the third term of the equation 1: e1 + b1 * (M-Mh) + b2 * (M-Mh)**2 for M<=Mh e1 + b3 * (M-Mh) otherwise
[ "Compute", "the", "third", "term", "of", "the", "equation", "1", ":" ]
python
train
30.428571
Alignak-monitoring/alignak
alignak/external_command.py
https://github.com/Alignak-monitoring/alignak/blob/f3c145207e83159b799d3714e4241399c7740a64/alignak/external_command.py#L3507-L3537
def schedule_hostgroup_host_downtime(self, hostgroup, start_time, end_time, fixed, trigger_id, duration, author, comment): """Schedule a downtime for each host of a hostgroup Format of the line that triggers function call:: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param hostgroup: hostgroup to schedule :type hostgroup: alignak.objects.hostgroup.Hostgroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None """ for host_id in hostgroup.get_hosts(): if host_id in self.daemon.hosts: host = self.daemon.hosts[host_id] self.schedule_host_downtime(host, start_time, end_time, fixed, trigger_id, duration, author, comment)
[ "def", "schedule_hostgroup_host_downtime", "(", "self", ",", "hostgroup", ",", "start_time", ",", "end_time", ",", "fixed", ",", "trigger_id", ",", "duration", ",", "author", ",", "comment", ")", ":", "for", "host_id", "in", "hostgroup", ".", "get_hosts", "(", ")", ":", "if", "host_id", "in", "self", ".", "daemon", ".", "hosts", ":", "host", "=", "self", ".", "daemon", ".", "hosts", "[", "host_id", "]", "self", ".", "schedule_host_downtime", "(", "host", ",", "start_time", ",", "end_time", ",", "fixed", ",", "trigger_id", ",", "duration", ",", "author", ",", "comment", ")" ]
Schedule a downtime for each host of a hostgroup Format of the line that triggers function call:: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;<hostgroup_name>;<start_time>;<end_time>; <fixed>;<trigger_id>;<duration>;<author>;<comment> :param hostgroup: hostgroup to schedule :type hostgroup: alignak.objects.hostgroup.Hostgroup :param start_time: downtime start time :type start_time: :param end_time: downtime end time :type end_time: :param fixed: is downtime fixed :type fixed: :param trigger_id: downtime id that triggered this one :type trigger_id: str :param duration: downtime duration :type duration: int :param author: downtime author :type author: str :param comment: downtime comment :type comment: str :return: None
[ "Schedule", "a", "downtime", "for", "each", "host", "of", "a", "hostgroup", "Format", "of", "the", "line", "that", "triggers", "function", "call", "::" ]
python
train
43.16129
dropbox/stone
stone/frontend/ir_generator.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/ir_generator.py#L1056-L1136
def _resolve_type(self, env, type_ref, enforce_fully_defined=False): """ Resolves the data type referenced by type_ref. If `enforce_fully_defined` is True, then the referenced type must be fully populated (fields, parent_type, ...), and not simply a forward reference. """ loc = type_ref.lineno, type_ref.path orig_namespace_name = env.namespace_name if type_ref.ns: # TODO(kelkabany): If a spec file imports a namespace, it is # available to all spec files that are part of the same namespace. # Might want to introduce the concept of an environment specific # to a file. if type_ref.ns not in env: raise InvalidSpec( 'Namespace %s is not imported' % quote(type_ref.ns), *loc) env = env[type_ref.ns] if not isinstance(env, Environment): raise InvalidSpec( '%s is not a namespace.' % quote(type_ref.ns), *loc) if type_ref.name not in env: raise InvalidSpec( 'Symbol %s is undefined.' % quote(type_ref.name), *loc) obj = env[type_ref.name] if obj is Void and type_ref.nullable: raise InvalidSpec('Void cannot be marked nullable.', *loc) elif inspect.isclass(obj): resolved_data_type_args = self._resolve_args(env, type_ref.args) data_type = self._instantiate_data_type( obj, resolved_data_type_args, (type_ref.lineno, type_ref.path)) elif isinstance(obj, ApiRoutesByVersion): raise InvalidSpec('A route cannot be referenced here.', *loc) elif type_ref.args[0] or type_ref.args[1]: # An instance of a type cannot have any additional # attributes specified. raise InvalidSpec('Attributes cannot be specified for ' 'instantiated type %s.' % quote(type_ref.name), *loc) else: data_type = env[type_ref.name] if type_ref.ns: # Add the source namespace as an import. namespace = self.api.ensure_namespace(orig_namespace_name) if isinstance(data_type, UserDefined): namespace.add_imported_namespace( self.api.ensure_namespace(type_ref.ns), imported_data_type=True) elif isinstance(data_type, Alias): namespace.add_imported_namespace( self.api.ensure_namespace(type_ref.ns), imported_alias=True) if (enforce_fully_defined and isinstance(data_type, UserDefined) and data_type._is_forward_ref): if data_type in self._resolution_in_progress: raise InvalidSpec( 'Unresolvable circular reference for type %s.' % quote(type_ref.name), *loc) self._resolution_in_progress.add(data_type) if isinstance(data_type, Struct): self._populate_struct_type_attributes(env, data_type) elif isinstance(data_type, Union): self._populate_union_type_attributes(env, data_type) self._resolution_in_progress.remove(data_type) if type_ref.nullable: unwrapped_dt, _ = unwrap_aliases(data_type) if isinstance(unwrapped_dt, Nullable): raise InvalidSpec( 'Cannot mark reference to nullable type as nullable.', *loc) data_type = Nullable(data_type) return data_type
[ "def", "_resolve_type", "(", "self", ",", "env", ",", "type_ref", ",", "enforce_fully_defined", "=", "False", ")", ":", "loc", "=", "type_ref", ".", "lineno", ",", "type_ref", ".", "path", "orig_namespace_name", "=", "env", ".", "namespace_name", "if", "type_ref", ".", "ns", ":", "# TODO(kelkabany): If a spec file imports a namespace, it is", "# available to all spec files that are part of the same namespace.", "# Might want to introduce the concept of an environment specific", "# to a file.", "if", "type_ref", ".", "ns", "not", "in", "env", ":", "raise", "InvalidSpec", "(", "'Namespace %s is not imported'", "%", "quote", "(", "type_ref", ".", "ns", ")", ",", "*", "loc", ")", "env", "=", "env", "[", "type_ref", ".", "ns", "]", "if", "not", "isinstance", "(", "env", ",", "Environment", ")", ":", "raise", "InvalidSpec", "(", "'%s is not a namespace.'", "%", "quote", "(", "type_ref", ".", "ns", ")", ",", "*", "loc", ")", "if", "type_ref", ".", "name", "not", "in", "env", ":", "raise", "InvalidSpec", "(", "'Symbol %s is undefined.'", "%", "quote", "(", "type_ref", ".", "name", ")", ",", "*", "loc", ")", "obj", "=", "env", "[", "type_ref", ".", "name", "]", "if", "obj", "is", "Void", "and", "type_ref", ".", "nullable", ":", "raise", "InvalidSpec", "(", "'Void cannot be marked nullable.'", ",", "*", "loc", ")", "elif", "inspect", ".", "isclass", "(", "obj", ")", ":", "resolved_data_type_args", "=", "self", ".", "_resolve_args", "(", "env", ",", "type_ref", ".", "args", ")", "data_type", "=", "self", ".", "_instantiate_data_type", "(", "obj", ",", "resolved_data_type_args", ",", "(", "type_ref", ".", "lineno", ",", "type_ref", ".", "path", ")", ")", "elif", "isinstance", "(", "obj", ",", "ApiRoutesByVersion", ")", ":", "raise", "InvalidSpec", "(", "'A route cannot be referenced here.'", ",", "*", "loc", ")", "elif", "type_ref", ".", "args", "[", "0", "]", "or", "type_ref", ".", "args", "[", "1", "]", ":", "# An instance of a type cannot have any additional", "# attributes specified.", "raise", "InvalidSpec", "(", "'Attributes cannot be specified for '", "'instantiated type %s.'", "%", "quote", "(", "type_ref", ".", "name", ")", ",", "*", "loc", ")", "else", ":", "data_type", "=", "env", "[", "type_ref", ".", "name", "]", "if", "type_ref", ".", "ns", ":", "# Add the source namespace as an import.", "namespace", "=", "self", ".", "api", ".", "ensure_namespace", "(", "orig_namespace_name", ")", "if", "isinstance", "(", "data_type", ",", "UserDefined", ")", ":", "namespace", ".", "add_imported_namespace", "(", "self", ".", "api", ".", "ensure_namespace", "(", "type_ref", ".", "ns", ")", ",", "imported_data_type", "=", "True", ")", "elif", "isinstance", "(", "data_type", ",", "Alias", ")", ":", "namespace", ".", "add_imported_namespace", "(", "self", ".", "api", ".", "ensure_namespace", "(", "type_ref", ".", "ns", ")", ",", "imported_alias", "=", "True", ")", "if", "(", "enforce_fully_defined", "and", "isinstance", "(", "data_type", ",", "UserDefined", ")", "and", "data_type", ".", "_is_forward_ref", ")", ":", "if", "data_type", "in", "self", ".", "_resolution_in_progress", ":", "raise", "InvalidSpec", "(", "'Unresolvable circular reference for type %s.'", "%", "quote", "(", "type_ref", ".", "name", ")", ",", "*", "loc", ")", "self", ".", "_resolution_in_progress", ".", "add", "(", "data_type", ")", "if", "isinstance", "(", "data_type", ",", "Struct", ")", ":", "self", ".", "_populate_struct_type_attributes", "(", "env", ",", "data_type", ")", "elif", "isinstance", "(", "data_type", ",", "Union", ")", ":", "self", ".", "_populate_union_type_attributes", "(", "env", ",", "data_type", ")", "self", ".", "_resolution_in_progress", ".", "remove", "(", "data_type", ")", "if", "type_ref", ".", "nullable", ":", "unwrapped_dt", ",", "_", "=", "unwrap_aliases", "(", "data_type", ")", "if", "isinstance", "(", "unwrapped_dt", ",", "Nullable", ")", ":", "raise", "InvalidSpec", "(", "'Cannot mark reference to nullable type as nullable.'", ",", "*", "loc", ")", "data_type", "=", "Nullable", "(", "data_type", ")", "return", "data_type" ]
Resolves the data type referenced by type_ref. If `enforce_fully_defined` is True, then the referenced type must be fully populated (fields, parent_type, ...), and not simply a forward reference.
[ "Resolves", "the", "data", "type", "referenced", "by", "type_ref", "." ]
python
train
45.345679
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L1077-L1090
def _check_running_services(services): """Check that the services dict provided is actually running and provide a list of (service, boolean) tuples for each service. Returns both a zipped list of (service, boolean) and a list of booleans in the same order as the services. @param services: OrderedDict of strings: [ports], one for each service to check. @returns [(service, boolean), ...], : results for checks [boolean] : just the result of the service checks """ services_running = [service_running(s) for s in services] return list(zip(services, services_running)), services_running
[ "def", "_check_running_services", "(", "services", ")", ":", "services_running", "=", "[", "service_running", "(", "s", ")", "for", "s", "in", "services", "]", "return", "list", "(", "zip", "(", "services", ",", "services_running", ")", ")", ",", "services_running" ]
Check that the services dict provided is actually running and provide a list of (service, boolean) tuples for each service. Returns both a zipped list of (service, boolean) and a list of booleans in the same order as the services. @param services: OrderedDict of strings: [ports], one for each service to check. @returns [(service, boolean), ...], : results for checks [boolean] : just the result of the service checks
[ "Check", "that", "the", "services", "dict", "provided", "is", "actually", "running", "and", "provide", "a", "list", "of", "(", "service", "boolean", ")", "tuples", "for", "each", "service", "." ]
python
train
47.214286
apache/spark
python/pyspark/util.py
https://github.com/apache/spark/blob/618d6bff71073c8c93501ab7392c3cc579730f0b/python/pyspark/util.py#L70-L89
def majorMinorVersion(sparkVersion): """ Given a Spark version string, return the (major version number, minor version number). E.g., for 2.0.1-SNAPSHOT, return (2, 0). >>> sparkVersion = "2.4.0" >>> VersionUtils.majorMinorVersion(sparkVersion) (2, 4) >>> sparkVersion = "2.3.0-SNAPSHOT" >>> VersionUtils.majorMinorVersion(sparkVersion) (2, 3) """ m = re.search(r'^(\d+)\.(\d+)(\..*)?$', sparkVersion) if m is not None: return (int(m.group(1)), int(m.group(2))) else: raise ValueError("Spark tried to parse '%s' as a Spark" % sparkVersion + " version string, but it could not find the major and minor" + " version numbers.")
[ "def", "majorMinorVersion", "(", "sparkVersion", ")", ":", "m", "=", "re", ".", "search", "(", "r'^(\\d+)\\.(\\d+)(\\..*)?$'", ",", "sparkVersion", ")", "if", "m", "is", "not", "None", ":", "return", "(", "int", "(", "m", ".", "group", "(", "1", ")", ")", ",", "int", "(", "m", ".", "group", "(", "2", ")", ")", ")", "else", ":", "raise", "ValueError", "(", "\"Spark tried to parse '%s' as a Spark\"", "%", "sparkVersion", "+", "\" version string, but it could not find the major and minor\"", "+", "\" version numbers.\"", ")" ]
Given a Spark version string, return the (major version number, minor version number). E.g., for 2.0.1-SNAPSHOT, return (2, 0). >>> sparkVersion = "2.4.0" >>> VersionUtils.majorMinorVersion(sparkVersion) (2, 4) >>> sparkVersion = "2.3.0-SNAPSHOT" >>> VersionUtils.majorMinorVersion(sparkVersion) (2, 3)
[ "Given", "a", "Spark", "version", "string", "return", "the", "(", "major", "version", "number", "minor", "version", "number", ")", ".", "E", ".", "g", ".", "for", "2", ".", "0", ".", "1", "-", "SNAPSHOT", "return", "(", "2", "0", ")", "." ]
python
train
39.65
SetBased/py-stratum
pystratum/application/PyStratumApplication.py
https://github.com/SetBased/py-stratum/blob/7c5ffaa2fdd03f865832a5190b5897ff2c0e3155/pystratum/application/PyStratumApplication.py#L25-L38
def get_default_commands(self): """ Returns the default commands of this application. :rtype: list[cleo.Command] """ commands = Application.get_default_commands(self) self.add(ConstantsCommand()) self.add(LoaderCommand()) self.add(PyStratumCommand()) self.add(WrapperCommand()) return commands
[ "def", "get_default_commands", "(", "self", ")", ":", "commands", "=", "Application", ".", "get_default_commands", "(", "self", ")", "self", ".", "add", "(", "ConstantsCommand", "(", ")", ")", "self", ".", "add", "(", "LoaderCommand", "(", ")", ")", "self", ".", "add", "(", "PyStratumCommand", "(", ")", ")", "self", ".", "add", "(", "WrapperCommand", "(", ")", ")", "return", "commands" ]
Returns the default commands of this application. :rtype: list[cleo.Command]
[ "Returns", "the", "default", "commands", "of", "this", "application", "." ]
python
train
25.928571
horazont/aioxmpp
aioxmpp/structs.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/structs.py#L792-L815
def fromstr(cls, s, *, strict=True): """ Construct a JID out of a string containing it. :param s: The string to parse. :type s: :class:`str` :param strict: Whether to enable strict parsing. :type strict: :class:`bool` :raises: See :class:`JID` :return: The parsed JID :rtype: :class:`JID` See the :class:`JID` class level documentation for the semantics of `strict`. """ nodedomain, sep, resource = s.partition("/") if not sep: resource = None localpart, sep, domain = nodedomain.partition("@") if not sep: domain = localpart localpart = None return cls(localpart, domain, resource, strict=strict)
[ "def", "fromstr", "(", "cls", ",", "s", ",", "*", ",", "strict", "=", "True", ")", ":", "nodedomain", ",", "sep", ",", "resource", "=", "s", ".", "partition", "(", "\"/\"", ")", "if", "not", "sep", ":", "resource", "=", "None", "localpart", ",", "sep", ",", "domain", "=", "nodedomain", ".", "partition", "(", "\"@\"", ")", "if", "not", "sep", ":", "domain", "=", "localpart", "localpart", "=", "None", "return", "cls", "(", "localpart", ",", "domain", ",", "resource", ",", "strict", "=", "strict", ")" ]
Construct a JID out of a string containing it. :param s: The string to parse. :type s: :class:`str` :param strict: Whether to enable strict parsing. :type strict: :class:`bool` :raises: See :class:`JID` :return: The parsed JID :rtype: :class:`JID` See the :class:`JID` class level documentation for the semantics of `strict`.
[ "Construct", "a", "JID", "out", "of", "a", "string", "containing", "it", "." ]
python
train
31.208333
mrcagney/make_gtfs
make_gtfs/protofeed.py
https://github.com/mrcagney/make_gtfs/blob/37b6f88e03bac708c2e85d6f4b6d48a0c92e4a59/make_gtfs/protofeed.py#L73-L86
def copy(self): """ Return a copy of this ProtoFeed, that is, a feed with all the same attributes. """ other = ProtoFeed() for key in cs.PROTOFEED_ATTRS: value = getattr(self, key) if isinstance(value, pd.DataFrame): # Pandas copy DataFrame value = value.copy() setattr(other, key, value) return other
[ "def", "copy", "(", "self", ")", ":", "other", "=", "ProtoFeed", "(", ")", "for", "key", "in", "cs", ".", "PROTOFEED_ATTRS", ":", "value", "=", "getattr", "(", "self", ",", "key", ")", "if", "isinstance", "(", "value", ",", "pd", ".", "DataFrame", ")", ":", "# Pandas copy DataFrame", "value", "=", "value", ".", "copy", "(", ")", "setattr", "(", "other", ",", "key", ",", "value", ")", "return", "other" ]
Return a copy of this ProtoFeed, that is, a feed with all the same attributes.
[ "Return", "a", "copy", "of", "this", "ProtoFeed", "that", "is", "a", "feed", "with", "all", "the", "same", "attributes", "." ]
python
train
29.5
timothydmorton/VESPA
vespa/stars/populations.py
https://github.com/timothydmorton/VESPA/blob/0446b54d48009f3655cfd1a3957ceea21d3adcaa/vespa/stars/populations.py#L297-L305
def bands(self): """ Bandpasses for which StarPopulation has magnitude data """ bands = [] for c in self.stars.columns: if re.search('_mag',c): bands.append(c) return bands
[ "def", "bands", "(", "self", ")", ":", "bands", "=", "[", "]", "for", "c", "in", "self", ".", "stars", ".", "columns", ":", "if", "re", ".", "search", "(", "'_mag'", ",", "c", ")", ":", "bands", ".", "append", "(", "c", ")", "return", "bands" ]
Bandpasses for which StarPopulation has magnitude data
[ "Bandpasses", "for", "which", "StarPopulation", "has", "magnitude", "data" ]
python
train
26.666667
orb-framework/orb
orb/core/model.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/model.py#L387-L419
def changes(self, columns=None, recurse=True, flags=0, inflated=False): """ Returns a dictionary of changes that have been made to the data from this record. :return { <orb.Column>: ( <variant> old, <variant> new), .. } """ output = {} is_record = self.isRecord() schema = self.schema() columns = [schema.column(c) for c in columns] if columns else \ schema.columns(recurse=recurse, flags=flags).values() context = self.context(inflated=inflated) with ReadLocker(self.__dataLock): for col in columns: old, curr = self.__values.get(col.name(), (None, None)) if col.testFlag(col.Flags.ReadOnly): continue elif not is_record: old = None check_old = col.restore(old, context) check_curr = col.restore(curr, context) try: different = check_old != check_curr except StandardError: different = True if different: output[col] = (check_old, check_curr) return output
[ "def", "changes", "(", "self", ",", "columns", "=", "None", ",", "recurse", "=", "True", ",", "flags", "=", "0", ",", "inflated", "=", "False", ")", ":", "output", "=", "{", "}", "is_record", "=", "self", ".", "isRecord", "(", ")", "schema", "=", "self", ".", "schema", "(", ")", "columns", "=", "[", "schema", ".", "column", "(", "c", ")", "for", "c", "in", "columns", "]", "if", "columns", "else", "schema", ".", "columns", "(", "recurse", "=", "recurse", ",", "flags", "=", "flags", ")", ".", "values", "(", ")", "context", "=", "self", ".", "context", "(", "inflated", "=", "inflated", ")", "with", "ReadLocker", "(", "self", ".", "__dataLock", ")", ":", "for", "col", "in", "columns", ":", "old", ",", "curr", "=", "self", ".", "__values", ".", "get", "(", "col", ".", "name", "(", ")", ",", "(", "None", ",", "None", ")", ")", "if", "col", ".", "testFlag", "(", "col", ".", "Flags", ".", "ReadOnly", ")", ":", "continue", "elif", "not", "is_record", ":", "old", "=", "None", "check_old", "=", "col", ".", "restore", "(", "old", ",", "context", ")", "check_curr", "=", "col", ".", "restore", "(", "curr", ",", "context", ")", "try", ":", "different", "=", "check_old", "!=", "check_curr", "except", "StandardError", ":", "different", "=", "True", "if", "different", ":", "output", "[", "col", "]", "=", "(", "check_old", ",", "check_curr", ")", "return", "output" ]
Returns a dictionary of changes that have been made to the data from this record. :return { <orb.Column>: ( <variant> old, <variant> new), .. }
[ "Returns", "a", "dictionary", "of", "changes", "that", "have", "been", "made", "to", "the", "data", "from", "this", "record", "." ]
python
train
36.030303
jadolg/rocketchat_API
rocketchat_API/rocketchat.py
https://github.com/jadolg/rocketchat_API/blob/f220d094434991cb9892418245f054ea06f28aad/rocketchat_API/rocketchat.py#L596-L598
def im_set_topic(self, room_id, topic, **kwargs): """Sets the topic for the direct message""" return self.__call_api_post('im.setTopic', roomId=room_id, topic=topic, kwargs=kwargs)
[ "def", "im_set_topic", "(", "self", ",", "room_id", ",", "topic", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "__call_api_post", "(", "'im.setTopic'", ",", "roomId", "=", "room_id", ",", "topic", "=", "topic", ",", "kwargs", "=", "kwargs", ")" ]
Sets the topic for the direct message
[ "Sets", "the", "topic", "for", "the", "direct", "message" ]
python
train
64.666667
nuagenetworks/monolithe
monolithe/generators/lang/javascript/writers/apiversionwriter.py
https://github.com/nuagenetworks/monolithe/blob/626011af3ff43f73b7bd8aa5e1f93fb5f1f0e181/monolithe/generators/lang/javascript/writers/apiversionwriter.py#L242-L257
def _write_enums(self, entity_name, attributes): """ This method writes the ouput for a particular specification. """ self.enum_attrs_for_locale[entity_name] = attributes; for attribute in attributes: enum_name = "%s%sEnum" % (entity_name, attribute.name[0].upper() + attribute.name[1:]) self.enum_list.append(enum_name) filename = "%s%s.js" % (self._class_prefix, enum_name) self.write(destination = self.enum_directory, filename=filename, template_name="enum.js.tpl", class_prefix = self._class_prefix, enum_name = enum_name, allowed_choices = set(attribute.allowed_choices))
[ "def", "_write_enums", "(", "self", ",", "entity_name", ",", "attributes", ")", ":", "self", ".", "enum_attrs_for_locale", "[", "entity_name", "]", "=", "attributes", "for", "attribute", "in", "attributes", ":", "enum_name", "=", "\"%s%sEnum\"", "%", "(", "entity_name", ",", "attribute", ".", "name", "[", "0", "]", ".", "upper", "(", ")", "+", "attribute", ".", "name", "[", "1", ":", "]", ")", "self", ".", "enum_list", ".", "append", "(", "enum_name", ")", "filename", "=", "\"%s%s.js\"", "%", "(", "self", ".", "_class_prefix", ",", "enum_name", ")", "self", ".", "write", "(", "destination", "=", "self", ".", "enum_directory", ",", "filename", "=", "filename", ",", "template_name", "=", "\"enum.js.tpl\"", ",", "class_prefix", "=", "self", ".", "_class_prefix", ",", "enum_name", "=", "enum_name", ",", "allowed_choices", "=", "set", "(", "attribute", ".", "allowed_choices", ")", ")" ]
This method writes the ouput for a particular specification.
[ "This", "method", "writes", "the", "ouput", "for", "a", "particular", "specification", "." ]
python
train
48.25
draperjames/qtpandas
qtpandas/models/DataFrameModel.py
https://github.com/draperjames/qtpandas/blob/64294fb69f1839e53dee5ea453337266bfaf24f4/qtpandas/models/DataFrameModel.py#L50-L72
def read_sql(sql, con, filePath, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None): """ Read SQL query or database table into a DataFrameModel. Provide a filePath argument in addition to the *args/**kwargs from pandas.read_sql and get a DataFrameModel. NOTE: The chunksize option is overridden to None always (for now). Reference: http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html pandas.read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None) :return: DataFrameModel """ # TODO: Decide if chunksize is worth keeping and how to handle? df = pandas.read_sql(sql, con, index_col, coerce_float, params, parse_dates, columns, chunksize=None) return DataFrameModel(df, filePath=filePath)
[ "def", "read_sql", "(", "sql", ",", "con", ",", "filePath", ",", "index_col", "=", "None", ",", "coerce_float", "=", "True", ",", "params", "=", "None", ",", "parse_dates", "=", "None", ",", "columns", "=", "None", ",", "chunksize", "=", "None", ")", ":", "# TODO: Decide if chunksize is worth keeping and how to handle?", "df", "=", "pandas", ".", "read_sql", "(", "sql", ",", "con", ",", "index_col", ",", "coerce_float", ",", "params", ",", "parse_dates", ",", "columns", ",", "chunksize", "=", "None", ")", "return", "DataFrameModel", "(", "df", ",", "filePath", "=", "filePath", ")" ]
Read SQL query or database table into a DataFrameModel. Provide a filePath argument in addition to the *args/**kwargs from pandas.read_sql and get a DataFrameModel. NOTE: The chunksize option is overridden to None always (for now). Reference: http://pandas.pydata.org/pandas-docs/version/0.18.1/generated/pandas.read_sql.html pandas.read_sql(sql, con, index_col=None, coerce_float=True, params=None, parse_dates=None, columns=None, chunksize=None) :return: DataFrameModel
[ "Read", "SQL", "query", "or", "database", "table", "into", "a", "DataFrameModel", ".", "Provide", "a", "filePath", "argument", "in", "addition", "to", "the", "*", "args", "/", "**", "kwargs", "from", "pandas", ".", "read_sql", "and", "get", "a", "DataFrameModel", "." ]
python
train
39.521739
SiLab-Bonn/pyBAR
pybar/analysis/analysis_utils.py
https://github.com/SiLab-Bonn/pyBAR/blob/5ad95bbcd41cd358825823fb78f396cfce23593e/pybar/analysis/analysis_utils.py#L1615-L1632
def print_raw_data_file(input_file, start_index=0, limit=200, flavor='fei4b', select=None, tdc_trig_dist=False, trigger_data_mode=0, meta_data_v2=True): """Printing FEI4 data from raw data file for debugging. """ with tb.open_file(input_file + '.h5', mode="r") as file_h5: if meta_data_v2: index_start = file_h5.root.meta_data.read(field='index_start') index_stop = file_h5.root.meta_data.read(field='index_stop') else: index_start = file_h5.root.meta_data.read(field='start_index') index_stop = file_h5.root.meta_data.read(field='stop_index') total_words = 0 for read_out_index, (index_start, index_stop) in enumerate(np.column_stack((index_start, index_stop))): if start_index < index_stop: print "\nchunk %d with length %d (from index %d to %d)\n" % (read_out_index, (index_stop - index_start), index_start, index_stop) raw_data = file_h5.root.raw_data.read(index_start, index_stop) total_words += print_raw_data(raw_data=raw_data, start_index=max(start_index - index_start, 0), limit=limit - total_words, flavor=flavor, index_offset=index_start, select=select, tdc_trig_dist=tdc_trig_dist, trigger_data_mode=trigger_data_mode) if limit and total_words >= limit: break
[ "def", "print_raw_data_file", "(", "input_file", ",", "start_index", "=", "0", ",", "limit", "=", "200", ",", "flavor", "=", "'fei4b'", ",", "select", "=", "None", ",", "tdc_trig_dist", "=", "False", ",", "trigger_data_mode", "=", "0", ",", "meta_data_v2", "=", "True", ")", ":", "with", "tb", ".", "open_file", "(", "input_file", "+", "'.h5'", ",", "mode", "=", "\"r\"", ")", "as", "file_h5", ":", "if", "meta_data_v2", ":", "index_start", "=", "file_h5", ".", "root", ".", "meta_data", ".", "read", "(", "field", "=", "'index_start'", ")", "index_stop", "=", "file_h5", ".", "root", ".", "meta_data", ".", "read", "(", "field", "=", "'index_stop'", ")", "else", ":", "index_start", "=", "file_h5", ".", "root", ".", "meta_data", ".", "read", "(", "field", "=", "'start_index'", ")", "index_stop", "=", "file_h5", ".", "root", ".", "meta_data", ".", "read", "(", "field", "=", "'stop_index'", ")", "total_words", "=", "0", "for", "read_out_index", ",", "(", "index_start", ",", "index_stop", ")", "in", "enumerate", "(", "np", ".", "column_stack", "(", "(", "index_start", ",", "index_stop", ")", ")", ")", ":", "if", "start_index", "<", "index_stop", ":", "print", "\"\\nchunk %d with length %d (from index %d to %d)\\n\"", "%", "(", "read_out_index", ",", "(", "index_stop", "-", "index_start", ")", ",", "index_start", ",", "index_stop", ")", "raw_data", "=", "file_h5", ".", "root", ".", "raw_data", ".", "read", "(", "index_start", ",", "index_stop", ")", "total_words", "+=", "print_raw_data", "(", "raw_data", "=", "raw_data", ",", "start_index", "=", "max", "(", "start_index", "-", "index_start", ",", "0", ")", ",", "limit", "=", "limit", "-", "total_words", ",", "flavor", "=", "flavor", ",", "index_offset", "=", "index_start", ",", "select", "=", "select", ",", "tdc_trig_dist", "=", "tdc_trig_dist", ",", "trigger_data_mode", "=", "trigger_data_mode", ")", "if", "limit", "and", "total_words", ">=", "limit", ":", "break" ]
Printing FEI4 data from raw data file for debugging.
[ "Printing", "FEI4", "data", "from", "raw", "data", "file", "for", "debugging", "." ]
python
train
74.555556
fozzle/python-brotherprint
brotherprint/brotherprint.py
https://github.com/fozzle/python-brotherprint/blob/5fb92df11b599c30a7da3d6ac7ed60acff230044/brotherprint/brotherprint.py#L989-L1004
def select_delim(self, delim): '''Select desired delimeter Args: delim: The delimeter character you want. Returns: None Raises: RuntimeError: Delimeter too long. ''' size = len(delim) if size > 20: raise RuntimeError('Delimeter too long') n1 = size/10 n2 = size%10 self.send('^SS'+chr(n1)+chr(n2))
[ "def", "select_delim", "(", "self", ",", "delim", ")", ":", "size", "=", "len", "(", "delim", ")", "if", "size", ">", "20", ":", "raise", "RuntimeError", "(", "'Delimeter too long'", ")", "n1", "=", "size", "/", "10", "n2", "=", "size", "%", "10", "self", ".", "send", "(", "'^SS'", "+", "chr", "(", "n1", ")", "+", "chr", "(", "n2", ")", ")" ]
Select desired delimeter Args: delim: The delimeter character you want. Returns: None Raises: RuntimeError: Delimeter too long.
[ "Select", "desired", "delimeter", "Args", ":", "delim", ":", "The", "delimeter", "character", "you", "want", ".", "Returns", ":", "None", "Raises", ":", "RuntimeError", ":", "Delimeter", "too", "long", "." ]
python
train
26.1875
joke2k/faker
faker/providers/ssn/it_IT/__init__.py
https://github.com/joke2k/faker/blob/965824b61132e52d92d1a6ce470396dbbe01c96c/faker/providers/ssn/it_IT/__init__.py#L18-L24
def checksum(value): """ Calculates the checksum char used for the 16th char. Author: Vincenzo Palazzo """ return chr(65 + sum(CHECKSUM_TABLE[index % 2][ALPHANUMERICS_DICT[char]] for index, char in enumerate(value)) % 26)
[ "def", "checksum", "(", "value", ")", ":", "return", "chr", "(", "65", "+", "sum", "(", "CHECKSUM_TABLE", "[", "index", "%", "2", "]", "[", "ALPHANUMERICS_DICT", "[", "char", "]", "]", "for", "index", ",", "char", "in", "enumerate", "(", "value", ")", ")", "%", "26", ")" ]
Calculates the checksum char used for the 16th char. Author: Vincenzo Palazzo
[ "Calculates", "the", "checksum", "char", "used", "for", "the", "16th", "char", ".", "Author", ":", "Vincenzo", "Palazzo" ]
python
train
37
resonai/ybt
yabt/dot.py
https://github.com/resonai/ybt/blob/5b40df0922ef3383eb85f2b04a26a2db4b81b3fd/yabt/dot.py#L62-L79
def write_dot(build_context, conf: Config, out_f): """Write build graph in dot format to `out_f` file-like object.""" not_buildenv_targets = get_not_buildenv_targets(build_context) prebuilt_targets = get_prebuilt_targets(build_context) out_f.write('strict digraph {\n') for node in build_context.target_graph.nodes: if conf.show_buildenv_deps or node in not_buildenv_targets: cached = node in prebuilt_targets fillcolor = 'fillcolor="grey",style=filled' if cached else '' color = TARGETS_COLORS.get( build_context.targets[node].builder_name, 'black') out_f.write(' "{}" [color="{}",{}];\n'.format(node, color, fillcolor)) out_f.writelines(' "{}" -> "{}";\n'.format(u, v) for u, v in build_context.target_graph.edges if conf.show_buildenv_deps or (u in not_buildenv_targets and v in not_buildenv_targets)) out_f.write('}\n\n')
[ "def", "write_dot", "(", "build_context", ",", "conf", ":", "Config", ",", "out_f", ")", ":", "not_buildenv_targets", "=", "get_not_buildenv_targets", "(", "build_context", ")", "prebuilt_targets", "=", "get_prebuilt_targets", "(", "build_context", ")", "out_f", ".", "write", "(", "'strict digraph {\\n'", ")", "for", "node", "in", "build_context", ".", "target_graph", ".", "nodes", ":", "if", "conf", ".", "show_buildenv_deps", "or", "node", "in", "not_buildenv_targets", ":", "cached", "=", "node", "in", "prebuilt_targets", "fillcolor", "=", "'fillcolor=\"grey\",style=filled'", "if", "cached", "else", "''", "color", "=", "TARGETS_COLORS", ".", "get", "(", "build_context", ".", "targets", "[", "node", "]", ".", "builder_name", ",", "'black'", ")", "out_f", ".", "write", "(", "' \"{}\" [color=\"{}\",{}];\\n'", ".", "format", "(", "node", ",", "color", ",", "fillcolor", ")", ")", "out_f", ".", "writelines", "(", "' \"{}\" -> \"{}\";\\n'", ".", "format", "(", "u", ",", "v", ")", "for", "u", ",", "v", "in", "build_context", ".", "target_graph", ".", "edges", "if", "conf", ".", "show_buildenv_deps", "or", "(", "u", "in", "not_buildenv_targets", "and", "v", "in", "not_buildenv_targets", ")", ")", "out_f", ".", "write", "(", "'}\\n\\n'", ")" ]
Write build graph in dot format to `out_f` file-like object.
[ "Write", "build", "graph", "in", "dot", "format", "to", "out_f", "file", "-", "like", "object", "." ]
python
train
57.388889
tek/proteome
proteome/project.py
https://github.com/tek/proteome/blob/b4fea6ca633a5b9ff56eaaf2507028fc6ff078b9/proteome/project.py#L305-L323
def from_json(self, json: Map) -> Maybe[Project]: ''' Try to instantiate a Project from the given json object. Convert the **type** key to **tpe** and its value to Maybe. Make sure **root** is a directory, fall back to resolution by **tpe/name**. Reinsert the root dir into the json dict, filter out all keys that aren't contained in Project's fields. Try to instantiate. ''' root = json.get('root')\ .map(mkpath)\ .or_else( json.get_all('type', 'name') .flat_map2(self.resolver.type_name)) valid_fields = root\ .map(lambda a: json ** Map(root=a, tpe=json.get('type')))\ .map(lambda a: a.at(*Project._pclass_fields)) return Try(lambda: valid_fields.map(lambda kw: Project(**kw))) | Empty()
[ "def", "from_json", "(", "self", ",", "json", ":", "Map", ")", "->", "Maybe", "[", "Project", "]", ":", "root", "=", "json", ".", "get", "(", "'root'", ")", ".", "map", "(", "mkpath", ")", ".", "or_else", "(", "json", ".", "get_all", "(", "'type'", ",", "'name'", ")", ".", "flat_map2", "(", "self", ".", "resolver", ".", "type_name", ")", ")", "valid_fields", "=", "root", ".", "map", "(", "lambda", "a", ":", "json", "**", "Map", "(", "root", "=", "a", ",", "tpe", "=", "json", ".", "get", "(", "'type'", ")", ")", ")", ".", "map", "(", "lambda", "a", ":", "a", ".", "at", "(", "*", "Project", ".", "_pclass_fields", ")", ")", "return", "Try", "(", "lambda", ":", "valid_fields", ".", "map", "(", "lambda", "kw", ":", "Project", "(", "*", "*", "kw", ")", ")", ")", "|", "Empty", "(", ")" ]
Try to instantiate a Project from the given json object. Convert the **type** key to **tpe** and its value to Maybe. Make sure **root** is a directory, fall back to resolution by **tpe/name**. Reinsert the root dir into the json dict, filter out all keys that aren't contained in Project's fields. Try to instantiate.
[ "Try", "to", "instantiate", "a", "Project", "from", "the", "given", "json", "object", ".", "Convert", "the", "**", "type", "**", "key", "to", "**", "tpe", "**", "and", "its", "value", "to", "Maybe", ".", "Make", "sure", "**", "root", "**", "is", "a", "directory", "fall", "back", "to", "resolution", "by", "**", "tpe", "/", "name", "**", ".", "Reinsert", "the", "root", "dir", "into", "the", "json", "dict", "filter", "out", "all", "keys", "that", "aren", "t", "contained", "in", "Project", "s", "fields", ".", "Try", "to", "instantiate", "." ]
python
train
44.578947
jab/bidict
bidict/_orderedbidict.py
https://github.com/jab/bidict/blob/1a1ba9758651aed9c4f58384eff006d2e2ad6835/bidict/_orderedbidict.py#L47-L59
def popitem(self, last=True): # pylint: disable=arguments-differ u"""*x.popitem() → (k, v)* Remove and return the most recently added item as a (key, value) pair if *last* is True, else the least recently added item. :raises KeyError: if *x* is empty. """ if not self: raise KeyError('mapping is empty') key = next((reversed if last else iter)(self)) val = self._pop(key) return key, val
[ "def", "popitem", "(", "self", ",", "last", "=", "True", ")", ":", "# pylint: disable=arguments-differ", "if", "not", "self", ":", "raise", "KeyError", "(", "'mapping is empty'", ")", "key", "=", "next", "(", "(", "reversed", "if", "last", "else", "iter", ")", "(", "self", ")", ")", "val", "=", "self", ".", "_pop", "(", "key", ")", "return", "key", ",", "val" ]
u"""*x.popitem() → (k, v)* Remove and return the most recently added item as a (key, value) pair if *last* is True, else the least recently added item. :raises KeyError: if *x* is empty.
[ "u", "*", "x", ".", "popitem", "()", "→", "(", "k", "v", ")", "*" ]
python
test
35.538462
yatiml/yatiml
yatiml/helpers.py
https://github.com/yatiml/yatiml/blob/4f55c058b72388350f0af3076ac3ea9bc1c142b0/yatiml/helpers.py#L427-L525
def map_attribute_to_seq(self, attribute: str, key_attribute: str, value_attribute: Optional[str] = None) -> None: """Converts a mapping attribute to a sequence. This function takes an attribute of this Node whose value \ is a mapping or a mapping of mappings and turns it into a \ sequence of mappings. Each entry in the original mapping is \ converted to an entry in the list. If only a key attribute is \ given, then each entry in the original mapping must map to a \ (sub)mapping. This submapping becomes the corresponding list \ entry, with the key added to it as an additional attribute. If a \ value attribute is also given, then an entry in the original \ mapping may map to any object. If the mapped-to object is a \ mapping, the conversion is as before, otherwise a new \ submapping is created, and key and value are added using the \ given key and value attribute names. An example probably helps. If you have a Node representing \ this piece of YAML:: items: item1: description: Basic widget price: 100.0 item2: description: Premium quality widget price: 200.0 and call map_attribute_to_seq('items', 'item_id'), then the \ Node will be modified to represent this:: items: - item_id: item1 description: Basic widget price: 100.0 - item_id: item2 description: Premium quality widget price: 200.0 which once converted to an object is often easier to deal with \ in code. Slightly more complicated, this YAML:: items: item1: Basic widget item2: description: Premium quality widget price: 200.0 when passed through map_attribute_to_seq('items', 'item_id', \ 'description'), will result in th equivalent of:: items: - item_id: item1 description: Basic widget - item_id: item2 description: Premium quality widget price: 200.0 If the attribute does not exist, or is not a mapping, this \ function will silently do nothing. With thanks to the makers of the Common Workflow Language for \ the idea. Args: attribute: Name of the attribute whose value to modify. key_attribute: Name of the new attribute in each item to \ add with the value of the key. value_attribute: Name of the new attribute in each item to \ add with the value of the key. """ if not self.has_attribute(attribute): return attr_node = self.get_attribute(attribute) if not attr_node.is_mapping(): return start_mark = attr_node.yaml_node.start_mark end_mark = attr_node.yaml_node.end_mark object_list = [] for item_key, item_value in attr_node.yaml_node.value: item_value_node = Node(item_value) if not item_value_node.is_mapping(): if value_attribute is None: return ynode = item_value_node.yaml_node item_value_node.make_mapping() item_value_node.set_attribute(value_attribute, ynode) item_value_node.set_attribute(key_attribute, item_key.value) object_list.append(item_value_node.yaml_node) seq_node = yaml.SequenceNode('tag:yaml.org,2002:seq', object_list, start_mark, end_mark) self.set_attribute(attribute, seq_node)
[ "def", "map_attribute_to_seq", "(", "self", ",", "attribute", ":", "str", ",", "key_attribute", ":", "str", ",", "value_attribute", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "None", ":", "if", "not", "self", ".", "has_attribute", "(", "attribute", ")", ":", "return", "attr_node", "=", "self", ".", "get_attribute", "(", "attribute", ")", "if", "not", "attr_node", ".", "is_mapping", "(", ")", ":", "return", "start_mark", "=", "attr_node", ".", "yaml_node", ".", "start_mark", "end_mark", "=", "attr_node", ".", "yaml_node", ".", "end_mark", "object_list", "=", "[", "]", "for", "item_key", ",", "item_value", "in", "attr_node", ".", "yaml_node", ".", "value", ":", "item_value_node", "=", "Node", "(", "item_value", ")", "if", "not", "item_value_node", ".", "is_mapping", "(", ")", ":", "if", "value_attribute", "is", "None", ":", "return", "ynode", "=", "item_value_node", ".", "yaml_node", "item_value_node", ".", "make_mapping", "(", ")", "item_value_node", ".", "set_attribute", "(", "value_attribute", ",", "ynode", ")", "item_value_node", ".", "set_attribute", "(", "key_attribute", ",", "item_key", ".", "value", ")", "object_list", ".", "append", "(", "item_value_node", ".", "yaml_node", ")", "seq_node", "=", "yaml", ".", "SequenceNode", "(", "'tag:yaml.org,2002:seq'", ",", "object_list", ",", "start_mark", ",", "end_mark", ")", "self", ".", "set_attribute", "(", "attribute", ",", "seq_node", ")" ]
Converts a mapping attribute to a sequence. This function takes an attribute of this Node whose value \ is a mapping or a mapping of mappings and turns it into a \ sequence of mappings. Each entry in the original mapping is \ converted to an entry in the list. If only a key attribute is \ given, then each entry in the original mapping must map to a \ (sub)mapping. This submapping becomes the corresponding list \ entry, with the key added to it as an additional attribute. If a \ value attribute is also given, then an entry in the original \ mapping may map to any object. If the mapped-to object is a \ mapping, the conversion is as before, otherwise a new \ submapping is created, and key and value are added using the \ given key and value attribute names. An example probably helps. If you have a Node representing \ this piece of YAML:: items: item1: description: Basic widget price: 100.0 item2: description: Premium quality widget price: 200.0 and call map_attribute_to_seq('items', 'item_id'), then the \ Node will be modified to represent this:: items: - item_id: item1 description: Basic widget price: 100.0 - item_id: item2 description: Premium quality widget price: 200.0 which once converted to an object is often easier to deal with \ in code. Slightly more complicated, this YAML:: items: item1: Basic widget item2: description: Premium quality widget price: 200.0 when passed through map_attribute_to_seq('items', 'item_id', \ 'description'), will result in th equivalent of:: items: - item_id: item1 description: Basic widget - item_id: item2 description: Premium quality widget price: 200.0 If the attribute does not exist, or is not a mapping, this \ function will silently do nothing. With thanks to the makers of the Common Workflow Language for \ the idea. Args: attribute: Name of the attribute whose value to modify. key_attribute: Name of the new attribute in each item to \ add with the value of the key. value_attribute: Name of the new attribute in each item to \ add with the value of the key.
[ "Converts", "a", "mapping", "attribute", "to", "a", "sequence", "." ]
python
train
38.454545
learningequality/le-utils
le_utils/constants/languages.py
https://github.com/learningequality/le-utils/blob/07a776e3d5c288818764a25170de2e9ec5850c0d/le_utils/constants/languages.py#L200-L231
def getlang_by_alpha2(code): """ Lookup a Language object for language code `code` based on these strategies: - Special case rules for Hebrew and Chinese Hans/Hant scripts - Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a language with the same `name` in the internal representaion Returns `None` if no matching language is found. """ # Handle special cases for language codes returned by YouTube API if code == 'iw': # handle old Hebrew code 'iw' and return modern code 'he' return getlang('he') elif 'zh-Hans' in code: return getlang('zh-CN') # use code `zh-CN` for all simplified Chinese elif 'zh-Hant' in code or re.match('zh(.*)?-HK', code): return getlang('zh-TW') # use code `zh-TW` for all traditional Chinese # extract prefix only if specified with subcode: e.g. zh-Hans --> zh first_part = code.split('-')[0] # See if pycountry can find this language try: pyc_lang = pycountry.languages.get(alpha_2=first_part) if pyc_lang: if hasattr(pyc_lang, 'inverted_name'): lang_name = pyc_lang.inverted_name else: lang_name = pyc_lang.name return getlang_by_name(lang_name) else: return None except KeyError: return None
[ "def", "getlang_by_alpha2", "(", "code", ")", ":", "# Handle special cases for language codes returned by YouTube API", "if", "code", "==", "'iw'", ":", "# handle old Hebrew code 'iw' and return modern code 'he'", "return", "getlang", "(", "'he'", ")", "elif", "'zh-Hans'", "in", "code", ":", "return", "getlang", "(", "'zh-CN'", ")", "# use code `zh-CN` for all simplified Chinese", "elif", "'zh-Hant'", "in", "code", "or", "re", ".", "match", "(", "'zh(.*)?-HK'", ",", "code", ")", ":", "return", "getlang", "(", "'zh-TW'", ")", "# use code `zh-TW` for all traditional Chinese", "# extract prefix only if specified with subcode: e.g. zh-Hans --> zh", "first_part", "=", "code", ".", "split", "(", "'-'", ")", "[", "0", "]", "# See if pycountry can find this language", "try", ":", "pyc_lang", "=", "pycountry", ".", "languages", ".", "get", "(", "alpha_2", "=", "first_part", ")", "if", "pyc_lang", ":", "if", "hasattr", "(", "pyc_lang", ",", "'inverted_name'", ")", ":", "lang_name", "=", "pyc_lang", ".", "inverted_name", "else", ":", "lang_name", "=", "pyc_lang", ".", "name", "return", "getlang_by_name", "(", "lang_name", ")", "else", ":", "return", "None", "except", "KeyError", ":", "return", "None" ]
Lookup a Language object for language code `code` based on these strategies: - Special case rules for Hebrew and Chinese Hans/Hant scripts - Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a language with the same `name` in the internal representaion Returns `None` if no matching language is found.
[ "Lookup", "a", "Language", "object", "for", "language", "code", "code", "based", "on", "these", "strategies", ":", "-", "Special", "case", "rules", "for", "Hebrew", "and", "Chinese", "Hans", "/", "Hant", "scripts", "-", "Using", "alpha_2", "lookup", "in", "pycountry", ".", "languages", "followed", "by", "lookup", "for", "a", "language", "with", "the", "same", "name", "in", "the", "internal", "representaion", "Returns", "None", "if", "no", "matching", "language", "is", "found", "." ]
python
train
41.5625
Arubacloud/pyArubaCloud
ArubaCloud/PyArubaAPI.py
https://github.com/Arubacloud/pyArubaCloud/blob/ec4aecd8ca342b1e1a4f16b7cc87cb5e697cfcd4/ArubaCloud/PyArubaAPI.py#L309-L323
def get_package_id(self, name): """ Retrieve the smart package id given is English name @param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large". @return: The package id that depends on the Data center and the size choosen. """ json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4)) json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme) for package in json_obj['Value']: packageId = package['PackageID'] for description in package['Descriptions']: languageID = description['LanguageID'] packageName = description['Text'] if languageID == 2 and packageName.lower() == name.lower(): return packageId
[ "def", "get_package_id", "(", "self", ",", "name", ")", ":", "json_scheme", "=", "self", ".", "gen_def_json_scheme", "(", "'GetPreConfiguredPackages'", ",", "dict", "(", "HypervisorType", "=", "4", ")", ")", "json_obj", "=", "self", ".", "call_method_post", "(", "method", "=", "'GetPreConfiguredPackages '", ",", "json_scheme", "=", "json_scheme", ")", "for", "package", "in", "json_obj", "[", "'Value'", "]", ":", "packageId", "=", "package", "[", "'PackageID'", "]", "for", "description", "in", "package", "[", "'Descriptions'", "]", ":", "languageID", "=", "description", "[", "'LanguageID'", "]", "packageName", "=", "description", "[", "'Text'", "]", "if", "languageID", "==", "2", "and", "packageName", ".", "lower", "(", ")", "==", "name", ".", "lower", "(", ")", ":", "return", "packageId" ]
Retrieve the smart package id given is English name @param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large". @return: The package id that depends on the Data center and the size choosen.
[ "Retrieve", "the", "smart", "package", "id", "given", "is", "English", "name" ]
python
train
57.266667
pahaz/sshtunnel
sshtunnel.py
https://github.com/pahaz/sshtunnel/blob/66a923e4c6c8e41b8348420523fbf5ddfd53176c/sshtunnel.py#L1061-L1097
def _consolidate_auth(ssh_password=None, ssh_pkey=None, ssh_pkey_password=None, allow_agent=True, host_pkey_directories=None, logger=None): """ Get sure authentication information is in place. ``ssh_pkey`` may be of classes: - ``str`` - in this case it represents a private key file; public key will be obtained from it - ``paramiko.Pkey`` - it will be transparently added to loaded keys """ ssh_loaded_pkeys = SSHTunnelForwarder.get_keys( logger=logger, host_pkey_directories=host_pkey_directories, allow_agent=allow_agent ) if isinstance(ssh_pkey, string_types): ssh_pkey_expanded = os.path.expanduser(ssh_pkey) if os.path.exists(ssh_pkey_expanded): ssh_pkey = SSHTunnelForwarder.read_private_key_file( pkey_file=ssh_pkey_expanded, pkey_password=ssh_pkey_password or ssh_password, logger=logger ) elif logger: logger.warning('Private key file not found: {0}' .format(ssh_pkey)) if isinstance(ssh_pkey, paramiko.pkey.PKey): ssh_loaded_pkeys.insert(0, ssh_pkey) if not ssh_password and not ssh_loaded_pkeys: raise ValueError('No password or public key available!') return (ssh_password, ssh_loaded_pkeys)
[ "def", "_consolidate_auth", "(", "ssh_password", "=", "None", ",", "ssh_pkey", "=", "None", ",", "ssh_pkey_password", "=", "None", ",", "allow_agent", "=", "True", ",", "host_pkey_directories", "=", "None", ",", "logger", "=", "None", ")", ":", "ssh_loaded_pkeys", "=", "SSHTunnelForwarder", ".", "get_keys", "(", "logger", "=", "logger", ",", "host_pkey_directories", "=", "host_pkey_directories", ",", "allow_agent", "=", "allow_agent", ")", "if", "isinstance", "(", "ssh_pkey", ",", "string_types", ")", ":", "ssh_pkey_expanded", "=", "os", ".", "path", ".", "expanduser", "(", "ssh_pkey", ")", "if", "os", ".", "path", ".", "exists", "(", "ssh_pkey_expanded", ")", ":", "ssh_pkey", "=", "SSHTunnelForwarder", ".", "read_private_key_file", "(", "pkey_file", "=", "ssh_pkey_expanded", ",", "pkey_password", "=", "ssh_pkey_password", "or", "ssh_password", ",", "logger", "=", "logger", ")", "elif", "logger", ":", "logger", ".", "warning", "(", "'Private key file not found: {0}'", ".", "format", "(", "ssh_pkey", ")", ")", "if", "isinstance", "(", "ssh_pkey", ",", "paramiko", ".", "pkey", ".", "PKey", ")", ":", "ssh_loaded_pkeys", ".", "insert", "(", "0", ",", "ssh_pkey", ")", "if", "not", "ssh_password", "and", "not", "ssh_loaded_pkeys", ":", "raise", "ValueError", "(", "'No password or public key available!'", ")", "return", "(", "ssh_password", ",", "ssh_loaded_pkeys", ")" ]
Get sure authentication information is in place. ``ssh_pkey`` may be of classes: - ``str`` - in this case it represents a private key file; public key will be obtained from it - ``paramiko.Pkey`` - it will be transparently added to loaded keys
[ "Get", "sure", "authentication", "information", "is", "in", "place", ".", "ssh_pkey", "may", "be", "of", "classes", ":", "-", "str", "-", "in", "this", "case", "it", "represents", "a", "private", "key", "file", ";", "public", "key", "will", "be", "obtained", "from", "it", "-", "paramiko", ".", "Pkey", "-", "it", "will", "be", "transparently", "added", "to", "loaded", "keys" ]
python
train
41.945946
jilljenn/tryalgo
tryalgo/range_minimum_query.py
https://github.com/jilljenn/tryalgo/blob/89a4dd9655e7b6b0a176f72b4c60d0196420dfe1/tryalgo/range_minimum_query.py#L126-L150
def _clear(self, node, left, right): """propagates the lazy updates for this node to the subtrees. as a result the maxval, minval, sumval values for the node are up to date. """ if self.lazyset[node] is not None: # first do the pending set val = self.lazyset[node] self.minval[node] = val self.maxval[node] = val self.sumval[node] = val * (right - left) self.lazyset[node] = None if left < right - 1: # not a leaf self.lazyset[2 * node] = val # propagate to direct descendents self.lazyadd[2 * node] = 0 self.lazyset[2 * node + 1] = val self.lazyadd[2 * node + 1] = 0 if self.lazyadd[node] != 0: # then do the pending add val = self.lazyadd[node] self.minval[node] += val self.maxval[node] += val self.sumval[node] += val * (right - left) self.lazyadd[node] = 0 if left < right - 1: # not at a leaf self.lazyadd[2 * node] += val # propagate to direct descendents self.lazyadd[2 * node + 1] += val
[ "def", "_clear", "(", "self", ",", "node", ",", "left", ",", "right", ")", ":", "if", "self", ".", "lazyset", "[", "node", "]", "is", "not", "None", ":", "# first do the pending set", "val", "=", "self", ".", "lazyset", "[", "node", "]", "self", ".", "minval", "[", "node", "]", "=", "val", "self", ".", "maxval", "[", "node", "]", "=", "val", "self", ".", "sumval", "[", "node", "]", "=", "val", "*", "(", "right", "-", "left", ")", "self", ".", "lazyset", "[", "node", "]", "=", "None", "if", "left", "<", "right", "-", "1", ":", "# not a leaf", "self", ".", "lazyset", "[", "2", "*", "node", "]", "=", "val", "# propagate to direct descendents", "self", ".", "lazyadd", "[", "2", "*", "node", "]", "=", "0", "self", ".", "lazyset", "[", "2", "*", "node", "+", "1", "]", "=", "val", "self", ".", "lazyadd", "[", "2", "*", "node", "+", "1", "]", "=", "0", "if", "self", ".", "lazyadd", "[", "node", "]", "!=", "0", ":", "# then do the pending add", "val", "=", "self", ".", "lazyadd", "[", "node", "]", "self", ".", "minval", "[", "node", "]", "+=", "val", "self", ".", "maxval", "[", "node", "]", "+=", "val", "self", ".", "sumval", "[", "node", "]", "+=", "val", "*", "(", "right", "-", "left", ")", "self", ".", "lazyadd", "[", "node", "]", "=", "0", "if", "left", "<", "right", "-", "1", ":", "# not at a leaf", "self", ".", "lazyadd", "[", "2", "*", "node", "]", "+=", "val", "# propagate to direct descendents", "self", ".", "lazyadd", "[", "2", "*", "node", "+", "1", "]", "+=", "val" ]
propagates the lazy updates for this node to the subtrees. as a result the maxval, minval, sumval values for the node are up to date.
[ "propagates", "the", "lazy", "updates", "for", "this", "node", "to", "the", "subtrees", ".", "as", "a", "result", "the", "maxval", "minval", "sumval", "values", "for", "the", "node", "are", "up", "to", "date", "." ]
python
train
47.88
decryptus/sonicprobe
sonicprobe/libs/anysql.py
https://github.com/decryptus/sonicprobe/blob/72f73f3a40d2982d79ad68686e36aa31d94b76f8/sonicprobe/libs/anysql.py#L538-L549
def c14n_uri(sqluri): """ Ask the backend to c14n the uri. See register_uri_backend() for details. If no backend is found for this uri method, a NotImplementedError will be raised. """ uri_c14n_method = _get_methods_by_uri(sqluri)[METHOD_C14N_URI] if not uri_c14n_method: return sqluri return uri_c14n_method(sqluri)
[ "def", "c14n_uri", "(", "sqluri", ")", ":", "uri_c14n_method", "=", "_get_methods_by_uri", "(", "sqluri", ")", "[", "METHOD_C14N_URI", "]", "if", "not", "uri_c14n_method", ":", "return", "sqluri", "return", "uri_c14n_method", "(", "sqluri", ")" ]
Ask the backend to c14n the uri. See register_uri_backend() for details. If no backend is found for this uri method, a NotImplementedError will be raised.
[ "Ask", "the", "backend", "to", "c14n", "the", "uri", ".", "See", "register_uri_backend", "()", "for", "details", "." ]
python
train
29.166667
telminov/sw-python-utils
swutils/date.py
https://github.com/telminov/sw-python-utils/blob/68f976122dd26a581b8d833c023f7f06542ca85c/swutils/date.py#L112-L118
def age_to_date(age): """ преобразует возраст в год рождения. (Для фильтрации по дате рождения) """ today = datetime.date.today() date = datetime.date(today.year - age - 1, today.month, today.day) + datetime.timedelta(days=1) return date
[ "def", "age_to_date", "(", "age", ")", ":", "today", "=", "datetime", ".", "date", ".", "today", "(", ")", "date", "=", "datetime", ".", "date", "(", "today", ".", "year", "-", "age", "-", "1", ",", "today", ".", "month", ",", "today", ".", "day", ")", "+", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "return", "date" ]
преобразует возраст в год рождения. (Для фильтрации по дате рождения)
[ "преобразует", "возраст", "в", "год", "рождения", ".", "(", "Для", "фильтрации", "по", "дате", "рождения", ")" ]
python
train
37
tcalmant/ipopo
pelix/misc/log.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/misc/log.py#L241-L270
def _store_entry(self, entry): """ Stores a new log entry and notifies listeners :param entry: A LogEntry object """ # Get the logger and log the message self.__logs.append(entry) # Notify listeners for listener in self.__listeners.copy(): try: listener.logged(entry) except Exception as ex: # Create a new log entry, without using logging nor notifying # listener (to avoid a recursion) err_entry = LogEntry( logging.WARNING, "Error notifying logging listener {0}: {1}".format( listener, ex ), sys.exc_info(), self._context.get_bundle(), None, ) # Insert the new entry before the real one self.__logs.pop() self.__logs.append(err_entry) self.__logs.append(entry)
[ "def", "_store_entry", "(", "self", ",", "entry", ")", ":", "# Get the logger and log the message", "self", ".", "__logs", ".", "append", "(", "entry", ")", "# Notify listeners", "for", "listener", "in", "self", ".", "__listeners", ".", "copy", "(", ")", ":", "try", ":", "listener", ".", "logged", "(", "entry", ")", "except", "Exception", "as", "ex", ":", "# Create a new log entry, without using logging nor notifying", "# listener (to avoid a recursion)", "err_entry", "=", "LogEntry", "(", "logging", ".", "WARNING", ",", "\"Error notifying logging listener {0}: {1}\"", ".", "format", "(", "listener", ",", "ex", ")", ",", "sys", ".", "exc_info", "(", ")", ",", "self", ".", "_context", ".", "get_bundle", "(", ")", ",", "None", ",", ")", "# Insert the new entry before the real one", "self", ".", "__logs", ".", "pop", "(", ")", "self", ".", "__logs", ".", "append", "(", "err_entry", ")", "self", ".", "__logs", ".", "append", "(", "entry", ")" ]
Stores a new log entry and notifies listeners :param entry: A LogEntry object
[ "Stores", "a", "new", "log", "entry", "and", "notifies", "listeners" ]
python
train
33.766667
binux/pyspider
pyspider/fetcher/tornado_fetcher.py
https://github.com/binux/pyspider/blob/3fccfabe2b057b7a56d4a4c79dc0dd6cd2239fe9/pyspider/fetcher/tornado_fetcher.py#L431-L529
def phantomjs_fetch(self, url, task): '''Fetch with phantomjs proxy''' start_time = time.time() self.on_fetch('phantomjs', task) handle_error = lambda x: self.handle_error('phantomjs', url, task, start_time, x) # check phantomjs proxy is enabled if not self.phantomjs_proxy: result = { "orig_url": url, "content": "phantomjs is not enabled.", "headers": {}, "status_code": 501, "url": url, "time": time.time() - start_time, "cookies": {}, "save": task.get('fetch', {}).get('save') } logger.warning("[501] %s:%s %s 0s", task.get('project'), task.get('taskid'), url) raise gen.Return(result) # setup request parameters fetch = self.pack_tornado_request_parameters(url, task) task_fetch = task.get('fetch', {}) for each in task_fetch: if each not in fetch: fetch[each] = task_fetch[each] # robots.txt if task_fetch.get('robots_txt', False): user_agent = fetch['headers']['User-Agent'] can_fetch = yield self.can_fetch(user_agent, url) if not can_fetch: error = tornado.httpclient.HTTPError(403, 'Disallowed by robots.txt') raise gen.Return(handle_error(error)) request_conf = { 'follow_redirects': False } request_conf['connect_timeout'] = fetch.get('connect_timeout', 20) request_conf['request_timeout'] = fetch.get('request_timeout', 120) + 1 session = cookies.RequestsCookieJar() if 'Cookie' in fetch['headers']: c = http_cookies.SimpleCookie() try: c.load(fetch['headers']['Cookie']) except AttributeError: c.load(utils.utf8(fetch['headers']['Cookie'])) for key in c: session.set(key, c[key]) del fetch['headers']['Cookie'] if 'cookies' in fetch: session.update(fetch['cookies']) del fetch['cookies'] request = tornado.httpclient.HTTPRequest(url=fetch['url']) cookie_header = cookies.get_cookie_header(session, request) if cookie_header: fetch['headers']['Cookie'] = cookie_header # making requests fetch['headers'] = dict(fetch['headers']) try: request = tornado.httpclient.HTTPRequest( url=self.phantomjs_proxy, method="POST", body=json.dumps(fetch), **request_conf) except Exception as e: raise gen.Return(handle_error(e)) try: response = yield gen.maybe_future(self.http_client.fetch(request)) except tornado.httpclient.HTTPError as e: if e.response: response = e.response else: raise gen.Return(handle_error(e)) if not response.body: raise gen.Return(handle_error(Exception('no response from phantomjs: %r' % response))) result = {} try: result = json.loads(utils.text(response.body)) assert 'status_code' in result, result except Exception as e: if response.error: result['error'] = utils.text(response.error) raise gen.Return(handle_error(e)) if result.get('status_code', 200): logger.info("[%d] %s:%s %s %.2fs", result['status_code'], task.get('project'), task.get('taskid'), url, result['time']) else: logger.error("[%d] %s:%s %s, %r %.2fs", result['status_code'], task.get('project'), task.get('taskid'), url, result['content'], result['time']) raise gen.Return(result)
[ "def", "phantomjs_fetch", "(", "self", ",", "url", ",", "task", ")", ":", "start_time", "=", "time", ".", "time", "(", ")", "self", ".", "on_fetch", "(", "'phantomjs'", ",", "task", ")", "handle_error", "=", "lambda", "x", ":", "self", ".", "handle_error", "(", "'phantomjs'", ",", "url", ",", "task", ",", "start_time", ",", "x", ")", "# check phantomjs proxy is enabled", "if", "not", "self", ".", "phantomjs_proxy", ":", "result", "=", "{", "\"orig_url\"", ":", "url", ",", "\"content\"", ":", "\"phantomjs is not enabled.\"", ",", "\"headers\"", ":", "{", "}", ",", "\"status_code\"", ":", "501", ",", "\"url\"", ":", "url", ",", "\"time\"", ":", "time", ".", "time", "(", ")", "-", "start_time", ",", "\"cookies\"", ":", "{", "}", ",", "\"save\"", ":", "task", ".", "get", "(", "'fetch'", ",", "{", "}", ")", ".", "get", "(", "'save'", ")", "}", "logger", ".", "warning", "(", "\"[501] %s:%s %s 0s\"", ",", "task", ".", "get", "(", "'project'", ")", ",", "task", ".", "get", "(", "'taskid'", ")", ",", "url", ")", "raise", "gen", ".", "Return", "(", "result", ")", "# setup request parameters", "fetch", "=", "self", ".", "pack_tornado_request_parameters", "(", "url", ",", "task", ")", "task_fetch", "=", "task", ".", "get", "(", "'fetch'", ",", "{", "}", ")", "for", "each", "in", "task_fetch", ":", "if", "each", "not", "in", "fetch", ":", "fetch", "[", "each", "]", "=", "task_fetch", "[", "each", "]", "# robots.txt", "if", "task_fetch", ".", "get", "(", "'robots_txt'", ",", "False", ")", ":", "user_agent", "=", "fetch", "[", "'headers'", "]", "[", "'User-Agent'", "]", "can_fetch", "=", "yield", "self", ".", "can_fetch", "(", "user_agent", ",", "url", ")", "if", "not", "can_fetch", ":", "error", "=", "tornado", ".", "httpclient", ".", "HTTPError", "(", "403", ",", "'Disallowed by robots.txt'", ")", "raise", "gen", ".", "Return", "(", "handle_error", "(", "error", ")", ")", "request_conf", "=", "{", "'follow_redirects'", ":", "False", "}", "request_conf", "[", "'connect_timeout'", "]", "=", "fetch", ".", "get", "(", "'connect_timeout'", ",", "20", ")", "request_conf", "[", "'request_timeout'", "]", "=", "fetch", ".", "get", "(", "'request_timeout'", ",", "120", ")", "+", "1", "session", "=", "cookies", ".", "RequestsCookieJar", "(", ")", "if", "'Cookie'", "in", "fetch", "[", "'headers'", "]", ":", "c", "=", "http_cookies", ".", "SimpleCookie", "(", ")", "try", ":", "c", ".", "load", "(", "fetch", "[", "'headers'", "]", "[", "'Cookie'", "]", ")", "except", "AttributeError", ":", "c", ".", "load", "(", "utils", ".", "utf8", "(", "fetch", "[", "'headers'", "]", "[", "'Cookie'", "]", ")", ")", "for", "key", "in", "c", ":", "session", ".", "set", "(", "key", ",", "c", "[", "key", "]", ")", "del", "fetch", "[", "'headers'", "]", "[", "'Cookie'", "]", "if", "'cookies'", "in", "fetch", ":", "session", ".", "update", "(", "fetch", "[", "'cookies'", "]", ")", "del", "fetch", "[", "'cookies'", "]", "request", "=", "tornado", ".", "httpclient", ".", "HTTPRequest", "(", "url", "=", "fetch", "[", "'url'", "]", ")", "cookie_header", "=", "cookies", ".", "get_cookie_header", "(", "session", ",", "request", ")", "if", "cookie_header", ":", "fetch", "[", "'headers'", "]", "[", "'Cookie'", "]", "=", "cookie_header", "# making requests", "fetch", "[", "'headers'", "]", "=", "dict", "(", "fetch", "[", "'headers'", "]", ")", "try", ":", "request", "=", "tornado", ".", "httpclient", ".", "HTTPRequest", "(", "url", "=", "self", ".", "phantomjs_proxy", ",", "method", "=", "\"POST\"", ",", "body", "=", "json", ".", "dumps", "(", "fetch", ")", ",", "*", "*", "request_conf", ")", "except", "Exception", "as", "e", ":", "raise", "gen", ".", "Return", "(", "handle_error", "(", "e", ")", ")", "try", ":", "response", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "http_client", ".", "fetch", "(", "request", ")", ")", "except", "tornado", ".", "httpclient", ".", "HTTPError", "as", "e", ":", "if", "e", ".", "response", ":", "response", "=", "e", ".", "response", "else", ":", "raise", "gen", ".", "Return", "(", "handle_error", "(", "e", ")", ")", "if", "not", "response", ".", "body", ":", "raise", "gen", ".", "Return", "(", "handle_error", "(", "Exception", "(", "'no response from phantomjs: %r'", "%", "response", ")", ")", ")", "result", "=", "{", "}", "try", ":", "result", "=", "json", ".", "loads", "(", "utils", ".", "text", "(", "response", ".", "body", ")", ")", "assert", "'status_code'", "in", "result", ",", "result", "except", "Exception", "as", "e", ":", "if", "response", ".", "error", ":", "result", "[", "'error'", "]", "=", "utils", ".", "text", "(", "response", ".", "error", ")", "raise", "gen", ".", "Return", "(", "handle_error", "(", "e", ")", ")", "if", "result", ".", "get", "(", "'status_code'", ",", "200", ")", ":", "logger", ".", "info", "(", "\"[%d] %s:%s %s %.2fs\"", ",", "result", "[", "'status_code'", "]", ",", "task", ".", "get", "(", "'project'", ")", ",", "task", ".", "get", "(", "'taskid'", ")", ",", "url", ",", "result", "[", "'time'", "]", ")", "else", ":", "logger", ".", "error", "(", "\"[%d] %s:%s %s, %r %.2fs\"", ",", "result", "[", "'status_code'", "]", ",", "task", ".", "get", "(", "'project'", ")", ",", "task", ".", "get", "(", "'taskid'", ")", ",", "url", ",", "result", "[", "'content'", "]", ",", "result", "[", "'time'", "]", ")", "raise", "gen", ".", "Return", "(", "result", ")" ]
Fetch with phantomjs proxy
[ "Fetch", "with", "phantomjs", "proxy" ]
python
train
38.383838
clalancette/pycdlib
pycdlib/rockridge.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/rockridge.py#L218-L231
def record(self): # type: () -> bytes ''' Generate a string representing the Rock Ridge Rock Ridge record. Parameters: None. Returns: String containing the Rock Ridge record. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('RR record not yet initialized!') return b'RR' + struct.pack('=BBB', RRRRRecord.length(), SU_ENTRY_VERSION, self.rr_flags)
[ "def", "record", "(", "self", ")", ":", "# type: () -> bytes", "if", "not", "self", ".", "_initialized", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'RR record not yet initialized!'", ")", "return", "b'RR'", "+", "struct", ".", "pack", "(", "'=BBB'", ",", "RRRRRecord", ".", "length", "(", ")", ",", "SU_ENTRY_VERSION", ",", "self", ".", "rr_flags", ")" ]
Generate a string representing the Rock Ridge Rock Ridge record. Parameters: None. Returns: String containing the Rock Ridge record.
[ "Generate", "a", "string", "representing", "the", "Rock", "Ridge", "Rock", "Ridge", "record", "." ]
python
train
32.428571
Yubico/yubikey-manager
ykman/cli/oath.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/oath.py#L307-L326
def list(ctx, show_hidden, oath_type, period): """ List all credentials. List all credentials stored on your YubiKey. """ ensure_validated(ctx) controller = ctx.obj['controller'] creds = [cred for cred in controller.list() if show_hidden or not cred.is_hidden ] creds.sort() for cred in creds: click.echo(cred.printable_key, nl=False) if oath_type: click.echo(u', {}'.format(cred.oath_type.name), nl=False) if period: click.echo(', {}'.format(cred.period), nl=False) click.echo()
[ "def", "list", "(", "ctx", ",", "show_hidden", ",", "oath_type", ",", "period", ")", ":", "ensure_validated", "(", "ctx", ")", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "creds", "=", "[", "cred", "for", "cred", "in", "controller", ".", "list", "(", ")", "if", "show_hidden", "or", "not", "cred", ".", "is_hidden", "]", "creds", ".", "sort", "(", ")", "for", "cred", "in", "creds", ":", "click", ".", "echo", "(", "cred", ".", "printable_key", ",", "nl", "=", "False", ")", "if", "oath_type", ":", "click", ".", "echo", "(", "u', {}'", ".", "format", "(", "cred", ".", "oath_type", ".", "name", ")", ",", "nl", "=", "False", ")", "if", "period", ":", "click", ".", "echo", "(", "', {}'", ".", "format", "(", "cred", ".", "period", ")", ",", "nl", "=", "False", ")", "click", ".", "echo", "(", ")" ]
List all credentials. List all credentials stored on your YubiKey.
[ "List", "all", "credentials", "." ]
python
train
29.6
tjvr/kurt
kurt/scratch14/objtable.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/scratch14/objtable.py#L60-L69
def _encode(self, obj, context): """Encodes a class to a lower-level object using the class' own to_construct function. If no such function is defined, returns the object unchanged. """ func = getattr(obj, 'to_construct', None) if callable(func): return func(context) else: return obj
[ "def", "_encode", "(", "self", ",", "obj", ",", "context", ")", ":", "func", "=", "getattr", "(", "obj", ",", "'to_construct'", ",", "None", ")", "if", "callable", "(", "func", ")", ":", "return", "func", "(", "context", ")", "else", ":", "return", "obj" ]
Encodes a class to a lower-level object using the class' own to_construct function. If no such function is defined, returns the object unchanged.
[ "Encodes", "a", "class", "to", "a", "lower", "-", "level", "object", "using", "the", "class", "own", "to_construct", "function", ".", "If", "no", "such", "function", "is", "defined", "returns", "the", "object", "unchanged", "." ]
python
train
35.5
intiocean/pyinter
pyinter/examples/daterange.py
https://github.com/intiocean/pyinter/blob/fb6e904307477fa43123cc9ab326680aa1a8cd62/pyinter/examples/daterange.py#L5-L11
def daterange(start, end, delta=timedelta(days=1), lower=Interval.CLOSED, upper=Interval.OPEN): """Returns a generator which creates the next value in the range on demand""" date_interval = Interval(lower=lower, lower_value=start, upper_value=end, upper=upper) current = start if start in date_interval else start + delta while current in date_interval: yield current current = current + delta
[ "def", "daterange", "(", "start", ",", "end", ",", "delta", "=", "timedelta", "(", "days", "=", "1", ")", ",", "lower", "=", "Interval", ".", "CLOSED", ",", "upper", "=", "Interval", ".", "OPEN", ")", ":", "date_interval", "=", "Interval", "(", "lower", "=", "lower", ",", "lower_value", "=", "start", ",", "upper_value", "=", "end", ",", "upper", "=", "upper", ")", "current", "=", "start", "if", "start", "in", "date_interval", "else", "start", "+", "delta", "while", "current", "in", "date_interval", ":", "yield", "current", "current", "=", "current", "+", "delta" ]
Returns a generator which creates the next value in the range on demand
[ "Returns", "a", "generator", "which", "creates", "the", "next", "value", "in", "the", "range", "on", "demand" ]
python
train
59.857143
kennethreitz/clint
clint/resources.py
https://github.com/kennethreitz/clint/blob/9d3693d644b8587d985972b6075d970096f6439e/clint/resources.py#L67-L73
def open(self, filename, mode='r'): """Returns file object from given filename.""" self._raise_if_none() fn = path_join(self.path, filename) return open(fn, mode)
[ "def", "open", "(", "self", ",", "filename", ",", "mode", "=", "'r'", ")", ":", "self", ".", "_raise_if_none", "(", ")", "fn", "=", "path_join", "(", "self", ".", "path", ",", "filename", ")", "return", "open", "(", "fn", ",", "mode", ")" ]
Returns file object from given filename.
[ "Returns", "file", "object", "from", "given", "filename", "." ]
python
train
27.142857
jxtech/wechatpy
wechatpy/client/api/wxa.py
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/wxa.py#L145-L164
def commit(self, template_id, ext_json, version, description): """ 为授权的小程序账号上传小程序代码 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4 :param template_id: 代码库中的代码模板 ID :param ext_json: 第三方自定义的配置 :param version: 代码版本号,开发者可自定义 :param description: 代码描述,开发者可自定义 """ return self._post( 'wxa/commit', data={ 'template_id': template_id, 'ext_json': ext_json, 'user_version': version, 'user_desc': description, }, )
[ "def", "commit", "(", "self", ",", "template_id", ",", "ext_json", ",", "version", ",", "description", ")", ":", "return", "self", ".", "_post", "(", "'wxa/commit'", ",", "data", "=", "{", "'template_id'", ":", "template_id", ",", "'ext_json'", ":", "ext_json", ",", "'user_version'", ":", "version", ",", "'user_desc'", ":", "description", ",", "}", ",", ")" ]
为授权的小程序账号上传小程序代码 详情请参考 https://open.weixin.qq.com/cgi-bin/showdocument?action=dir_list&id=open1489140610_Uavc4 :param template_id: 代码库中的代码模板 ID :param ext_json: 第三方自定义的配置 :param version: 代码版本号,开发者可自定义 :param description: 代码描述,开发者可自定义
[ "为授权的小程序账号上传小程序代码", "详情请参考", "https", ":", "//", "open", ".", "weixin", ".", "qq", ".", "com", "/", "cgi", "-", "bin", "/", "showdocument?action", "=", "dir_list&id", "=", "open1489140610_Uavc4" ]
python
train
31
materialsproject/custodian
custodian/ansible/actions.py
https://github.com/materialsproject/custodian/blob/b33b01574fc899f959acb3c495398fd3d0fc41d0/custodian/ansible/actions.py#L154-L168
def file_create(filename, settings): """ Creates a file. Args: filename (str): Filename. settings (dict): Must be {"content": actual_content} """ if len(settings) != 1: raise ValueError("Settings must only contain one item with key " "'content'.") for k, v in settings.items(): if k == "content": with open(filename, 'w') as f: f.write(v)
[ "def", "file_create", "(", "filename", ",", "settings", ")", ":", "if", "len", "(", "settings", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Settings must only contain one item with key \"", "\"'content'.\"", ")", "for", "k", ",", "v", "in", "settings", ".", "items", "(", ")", ":", "if", "k", "==", "\"content\"", ":", "with", "open", "(", "filename", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "v", ")" ]
Creates a file. Args: filename (str): Filename. settings (dict): Must be {"content": actual_content}
[ "Creates", "a", "file", "." ]
python
train
32.4
galaxyproject/pulsar
tools/install_venv.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/tools/install_venv.py#L92-L105
def create_virtualenv(venv=VENV): """Creates the virtual environment and installs PIP only into the virtual environment """ print 'Creating venv...', run_command(['virtualenv', '-q', '--no-site-packages', VENV]) print 'done.' print 'Installing pip in virtualenv...', if not run_command([WITH_VENV, 'easy_install', 'pip']).strip(): die("Failed to install pip.") print 'done.' print 'Installing distribute in virtualenv...' pip_install('distribute>=0.6.24') print 'done.'
[ "def", "create_virtualenv", "(", "venv", "=", "VENV", ")", ":", "print", "'Creating venv...'", ",", "run_command", "(", "[", "'virtualenv'", ",", "'-q'", ",", "'--no-site-packages'", ",", "VENV", "]", ")", "print", "'done.'", "print", "'Installing pip in virtualenv...'", ",", "if", "not", "run_command", "(", "[", "WITH_VENV", ",", "'easy_install'", ",", "'pip'", "]", ")", ".", "strip", "(", ")", ":", "die", "(", "\"Failed to install pip.\"", ")", "print", "'done.'", "print", "'Installing distribute in virtualenv...'", "pip_install", "(", "'distribute>=0.6.24'", ")", "print", "'done.'" ]
Creates the virtual environment and installs PIP only into the virtual environment
[ "Creates", "the", "virtual", "environment", "and", "installs", "PIP", "only", "into", "the", "virtual", "environment" ]
python
train
36.571429
saltstack/salt
salt/modules/zfs.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zfs.py#L575-L639
def diff(name_a, name_b=None, **kwargs): ''' Display the difference between a snapshot of a given filesystem and another snapshot of that filesystem from a later time or the current contents of the filesystem. name_a : string name of snapshot name_b : string (optional) name of snapshot or filesystem show_changetime : boolean display the path's inode change time as the first column of output. (default = True) show_indication : boolean display an indication of the type of file. (default = True) parsable : boolean if true we don't parse the timestamp to a more readable date (default = True) .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset ''' ## Configure command # NOTE: initialize the defaults flags = ['-H'] target = [] # NOTE: set extra config from kwargs if kwargs.get('show_changetime', True): flags.append('-t') if kwargs.get('show_indication', True): flags.append('-F') # NOTE: update target target.append(name_a) if name_b: target.append(name_b) ## Diff filesystem/snapshot res = __salt__['cmd.run_all']( __utils__['zfs.zfs_command']( command='diff', flags=flags, target=target, ), python_shell=False, ) if res['retcode'] != 0: return __utils__['zfs.parse_command_result'](res) else: if not kwargs.get('parsable', True) and kwargs.get('show_changetime', True): ret = OrderedDict() for entry in res['stdout'].splitlines(): entry = entry.split() entry_timestamp = __utils__['dateutils.strftime'](entry[0], '%Y-%m-%d.%H:%M:%S.%f') entry_data = "\t\t".join(entry[1:]) ret[entry_timestamp] = entry_data else: ret = res['stdout'].splitlines() return ret
[ "def", "diff", "(", "name_a", ",", "name_b", "=", "None", ",", "*", "*", "kwargs", ")", ":", "## Configure command", "# NOTE: initialize the defaults", "flags", "=", "[", "'-H'", "]", "target", "=", "[", "]", "# NOTE: set extra config from kwargs", "if", "kwargs", ".", "get", "(", "'show_changetime'", ",", "True", ")", ":", "flags", ".", "append", "(", "'-t'", ")", "if", "kwargs", ".", "get", "(", "'show_indication'", ",", "True", ")", ":", "flags", ".", "append", "(", "'-F'", ")", "# NOTE: update target", "target", ".", "append", "(", "name_a", ")", "if", "name_b", ":", "target", ".", "append", "(", "name_b", ")", "## Diff filesystem/snapshot", "res", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "__utils__", "[", "'zfs.zfs_command'", "]", "(", "command", "=", "'diff'", ",", "flags", "=", "flags", ",", "target", "=", "target", ",", ")", ",", "python_shell", "=", "False", ",", ")", "if", "res", "[", "'retcode'", "]", "!=", "0", ":", "return", "__utils__", "[", "'zfs.parse_command_result'", "]", "(", "res", ")", "else", ":", "if", "not", "kwargs", ".", "get", "(", "'parsable'", ",", "True", ")", "and", "kwargs", ".", "get", "(", "'show_changetime'", ",", "True", ")", ":", "ret", "=", "OrderedDict", "(", ")", "for", "entry", "in", "res", "[", "'stdout'", "]", ".", "splitlines", "(", ")", ":", "entry", "=", "entry", ".", "split", "(", ")", "entry_timestamp", "=", "__utils__", "[", "'dateutils.strftime'", "]", "(", "entry", "[", "0", "]", ",", "'%Y-%m-%d.%H:%M:%S.%f'", ")", "entry_data", "=", "\"\\t\\t\"", ".", "join", "(", "entry", "[", "1", ":", "]", ")", "ret", "[", "entry_timestamp", "]", "=", "entry_data", "else", ":", "ret", "=", "res", "[", "'stdout'", "]", ".", "splitlines", "(", ")", "return", "ret" ]
Display the difference between a snapshot of a given filesystem and another snapshot of that filesystem from a later time or the current contents of the filesystem. name_a : string name of snapshot name_b : string (optional) name of snapshot or filesystem show_changetime : boolean display the path's inode change time as the first column of output. (default = True) show_indication : boolean display an indication of the type of file. (default = True) parsable : boolean if true we don't parse the timestamp to a more readable date (default = True) .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' zfs.diff myzpool/mydataset@yesterday myzpool/mydataset
[ "Display", "the", "difference", "between", "a", "snapshot", "of", "a", "given", "filesystem", "and", "another", "snapshot", "of", "that", "filesystem", "from", "a", "later", "time", "or", "the", "current", "contents", "of", "the", "filesystem", "." ]
python
train
30.138462
instaloader/instaloader
instaloader/instaloadercontext.py
https://github.com/instaloader/instaloader/blob/87d877e650cd8020b04b8b51be120599a441fd5b/instaloader/instaloadercontext.py#L286-L301
def _graphql_query_waittime(self, query_hash: str, current_time: float, untracked_queries: bool = False) -> int: """Calculate time needed to wait before GraphQL query can be executed.""" sliding_window = 660 if query_hash not in self._graphql_query_timestamps: self._graphql_query_timestamps[query_hash] = [] self._graphql_query_timestamps[query_hash] = list(filter(lambda t: t > current_time - 60 * 60, self._graphql_query_timestamps[query_hash])) reqs_in_sliding_window = list(filter(lambda t: t > current_time - sliding_window, self._graphql_query_timestamps[query_hash])) count_per_sliding_window = self._graphql_request_count_per_sliding_window(query_hash) if len(reqs_in_sliding_window) < count_per_sliding_window and not untracked_queries: return max(0, self._graphql_earliest_next_request_time - current_time) next_request_time = min(reqs_in_sliding_window) + sliding_window + 6 if untracked_queries: self._graphql_earliest_next_request_time = next_request_time return round(max(next_request_time, self._graphql_earliest_next_request_time) - current_time)
[ "def", "_graphql_query_waittime", "(", "self", ",", "query_hash", ":", "str", ",", "current_time", ":", "float", ",", "untracked_queries", ":", "bool", "=", "False", ")", "->", "int", ":", "sliding_window", "=", "660", "if", "query_hash", "not", "in", "self", ".", "_graphql_query_timestamps", ":", "self", ".", "_graphql_query_timestamps", "[", "query_hash", "]", "=", "[", "]", "self", ".", "_graphql_query_timestamps", "[", "query_hash", "]", "=", "list", "(", "filter", "(", "lambda", "t", ":", "t", ">", "current_time", "-", "60", "*", "60", ",", "self", ".", "_graphql_query_timestamps", "[", "query_hash", "]", ")", ")", "reqs_in_sliding_window", "=", "list", "(", "filter", "(", "lambda", "t", ":", "t", ">", "current_time", "-", "sliding_window", ",", "self", ".", "_graphql_query_timestamps", "[", "query_hash", "]", ")", ")", "count_per_sliding_window", "=", "self", ".", "_graphql_request_count_per_sliding_window", "(", "query_hash", ")", "if", "len", "(", "reqs_in_sliding_window", ")", "<", "count_per_sliding_window", "and", "not", "untracked_queries", ":", "return", "max", "(", "0", ",", "self", ".", "_graphql_earliest_next_request_time", "-", "current_time", ")", "next_request_time", "=", "min", "(", "reqs_in_sliding_window", ")", "+", "sliding_window", "+", "6", "if", "untracked_queries", ":", "self", ".", "_graphql_earliest_next_request_time", "=", "next_request_time", "return", "round", "(", "max", "(", "next_request_time", ",", "self", ".", "_graphql_earliest_next_request_time", ")", "-", "current_time", ")" ]
Calculate time needed to wait before GraphQL query can be executed.
[ "Calculate", "time", "needed", "to", "wait", "before", "GraphQL", "query", "can", "be", "executed", "." ]
python
train
79.625
melizalab/libtfr
examples/tfr_tm.py
https://github.com/melizalab/libtfr/blob/9f7e7705793d258a0b205f185b20e3bbcda473da/examples/tfr_tm.py#L14-L56
def fmsin(N, fnormin=0.05, fnormax=0.45, period=None, t0=None, fnorm0=0.25, pm1=1): """ Signal with sinusoidal frequency modulation. generates a frequency modulation with a sinusoidal frequency. This sinusoidal modulation is designed such that the instantaneous frequency at time T0 is equal to FNORM0, and the ambiguity between increasing or decreasing frequency is solved by PM1. N : number of points. FNORMIN : smallest normalized frequency (default: 0.05) FNORMAX : highest normalized frequency (default: 0.45) PERIOD : period of the sinusoidal fm (default: N ) T0 : time reference for the phase (default: N/2 ) FNORM0 : normalized frequency at time T0 (default: 0.25) PM1 : frequency direction at T0 (-1 or +1) (default: +1 ) Returns: Y : signal IFLAW : its instantaneous frequency law Example: z,i=fmsin(140,0.05,0.45,100,20,0.3,-1.0) Original MATLAB code F. Auger, July 1995. (note: Licensed under GPL; see main LICENSE file) """ if period==None: period = N if t0==None: t0 = N/2 pm1 = nx.sign(pm1) fnormid=0.5*(fnormax+fnormin); delta =0.5*(fnormax-fnormin); phi =-pm1*nx.arccos((fnorm0-fnormid)/delta); time =nx.arange(1,N)-t0; phase =2*nx.pi*fnormid*time+delta*period*(nx.sin(2*nx.pi*time/period+phi)-nx.sin(phi)); y =nx.exp(1j*phase) iflaw =fnormid+delta*nx.cos(2*nx.pi*time/period+phi); return y,iflaw
[ "def", "fmsin", "(", "N", ",", "fnormin", "=", "0.05", ",", "fnormax", "=", "0.45", ",", "period", "=", "None", ",", "t0", "=", "None", ",", "fnorm0", "=", "0.25", ",", "pm1", "=", "1", ")", ":", "if", "period", "==", "None", ":", "period", "=", "N", "if", "t0", "==", "None", ":", "t0", "=", "N", "/", "2", "pm1", "=", "nx", ".", "sign", "(", "pm1", ")", "fnormid", "=", "0.5", "*", "(", "fnormax", "+", "fnormin", ")", "delta", "=", "0.5", "*", "(", "fnormax", "-", "fnormin", ")", "phi", "=", "-", "pm1", "*", "nx", ".", "arccos", "(", "(", "fnorm0", "-", "fnormid", ")", "/", "delta", ")", "time", "=", "nx", ".", "arange", "(", "1", ",", "N", ")", "-", "t0", "phase", "=", "2", "*", "nx", ".", "pi", "*", "fnormid", "*", "time", "+", "delta", "*", "period", "*", "(", "nx", ".", "sin", "(", "2", "*", "nx", ".", "pi", "*", "time", "/", "period", "+", "phi", ")", "-", "nx", ".", "sin", "(", "phi", ")", ")", "y", "=", "nx", ".", "exp", "(", "1j", "*", "phase", ")", "iflaw", "=", "fnormid", "+", "delta", "*", "nx", ".", "cos", "(", "2", "*", "nx", ".", "pi", "*", "time", "/", "period", "+", "phi", ")", "return", "y", ",", "iflaw" ]
Signal with sinusoidal frequency modulation. generates a frequency modulation with a sinusoidal frequency. This sinusoidal modulation is designed such that the instantaneous frequency at time T0 is equal to FNORM0, and the ambiguity between increasing or decreasing frequency is solved by PM1. N : number of points. FNORMIN : smallest normalized frequency (default: 0.05) FNORMAX : highest normalized frequency (default: 0.45) PERIOD : period of the sinusoidal fm (default: N ) T0 : time reference for the phase (default: N/2 ) FNORM0 : normalized frequency at time T0 (default: 0.25) PM1 : frequency direction at T0 (-1 or +1) (default: +1 ) Returns: Y : signal IFLAW : its instantaneous frequency law Example: z,i=fmsin(140,0.05,0.45,100,20,0.3,-1.0) Original MATLAB code F. Auger, July 1995. (note: Licensed under GPL; see main LICENSE file)
[ "Signal", "with", "sinusoidal", "frequency", "modulation", "." ]
python
train
34.953488
matthieugouel/gibica
gibica/interpreter.py
https://github.com/matthieugouel/gibica/blob/65f937f7a6255078cc22eb7691a2897466032909/gibica/interpreter.py#L73-L85
def visit_FunctionBody(self, node): """Visitor for `FunctionBody` AST node.""" for child in node.children: return_value = self.visit(child) if isinstance(child, ReturnStatement): return return_value if isinstance(child, (IfStatement, WhileStatement)): if return_value is not None: return return_value return NoneType()
[ "def", "visit_FunctionBody", "(", "self", ",", "node", ")", ":", "for", "child", "in", "node", ".", "children", ":", "return_value", "=", "self", ".", "visit", "(", "child", ")", "if", "isinstance", "(", "child", ",", "ReturnStatement", ")", ":", "return", "return_value", "if", "isinstance", "(", "child", ",", "(", "IfStatement", ",", "WhileStatement", ")", ")", ":", "if", "return_value", "is", "not", "None", ":", "return", "return_value", "return", "NoneType", "(", ")" ]
Visitor for `FunctionBody` AST node.
[ "Visitor", "for", "FunctionBody", "AST", "node", "." ]
python
train
32.384615
casacore/python-casacore
casacore/images/image.py
https://github.com/casacore/python-casacore/blob/975510861ea005f7919dd9e438b5f98a1682eebe/casacore/images/image.py#L354-L375
def putmask(self, value, blc=(), trc=(), inc=()): """Put image mask. Using the arguments blc (bottom left corner), trc (top right corner), and inc (stride) it is possible to put a data slice. Not all axes need to be specified. Missing values default to begin, end, and 1. The data should be a numpy array. Its dimensionality must be the same as the dimensionality of the image. Note that the casacore images use the convention that a mask value True means good and False means bad. However, numpy uses the opposite. Therefore the mask will be negated, so a numoy masked can be given directly. The mask is not written if the image has no mask and if it the entire mask is False. In that case the mask most likely comes from a getmask operation on an image without a mask. """ # casa and numpy have opposite flags return self._putmask(~value, self._adjustBlc(blc), self._adjustInc(inc))
[ "def", "putmask", "(", "self", ",", "value", ",", "blc", "=", "(", ")", ",", "trc", "=", "(", ")", ",", "inc", "=", "(", ")", ")", ":", "# casa and numpy have opposite flags", "return", "self", ".", "_putmask", "(", "~", "value", ",", "self", ".", "_adjustBlc", "(", "blc", ")", ",", "self", ".", "_adjustInc", "(", "inc", ")", ")" ]
Put image mask. Using the arguments blc (bottom left corner), trc (top right corner), and inc (stride) it is possible to put a data slice. Not all axes need to be specified. Missing values default to begin, end, and 1. The data should be a numpy array. Its dimensionality must be the same as the dimensionality of the image. Note that the casacore images use the convention that a mask value True means good and False means bad. However, numpy uses the opposite. Therefore the mask will be negated, so a numoy masked can be given directly. The mask is not written if the image has no mask and if it the entire mask is False. In that case the mask most likely comes from a getmask operation on an image without a mask.
[ "Put", "image", "mask", "." ]
python
train
46.545455