docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Returns a help string for all known flags. Args: prefix: str, per-line output prefix. include_special_flags: bool, whether to include description of SPECIAL_FLAGS, i.e. --flagfile and --undefok. Returns: str, formatted help message.
def get_help(self, prefix='', include_special_flags=True): flags_by_module = self.flags_by_module_dict() if flags_by_module: modules = sorted(flags_by_module) # Print the help for the main module first, if possible. main_module = sys.argv[0] if main_module in modules: modules.remove(main_module) modules = [main_module] + modules return self._get_help_for_modules(modules, prefix, include_special_flags) else: output_lines = [] # Just print one long list of flags. values = six.itervalues(self._flags()) if include_special_flags: values = itertools.chain( values, six.itervalues(_helpers.SPECIAL_FLAGS._flags())) # pylint: disable=protected-access self._render_flag_list(values, output_lines, prefix) return '\n'.join(output_lines)
175,781
Returns the help string for a list of modules. Private to absl.flags package. Args: modules: List[str], a list of modules to get the help string for. prefix: str, a string that is prepended to each generated help line. include_special_flags: bool, whether to include description of SPECIAL_FLAGS, i.e. --flagfile and --undefok.
def _get_help_for_modules(self, modules, prefix, include_special_flags): output_lines = [] for module in modules: self._render_our_module_flags(module, output_lines, prefix) if include_special_flags: self._render_module_flags( 'absl.flags', six.itervalues(_helpers.SPECIAL_FLAGS._flags()), # pylint: disable=protected-access output_lines, prefix) return '\n'.join(output_lines)
175,782
Returns a help string for the key flags of a given module. Args: module: module|str, the module to render key flags for. output_lines: [str], a list of strings. The generated help message lines will be appended to this list. prefix: str, a string that is prepended to each generated help line.
def _render_our_module_key_flags(self, module, output_lines, prefix=''): key_flags = self.get_key_flags_for_module(module) if key_flags: self._render_module_flags(module, key_flags, output_lines, prefix)
175,785
Describes the key flags of a module. Args: module: module|str, the module to describe the key flags for. Returns: str, describing the key flags of a module.
def module_help(self, module): helplist = [] self._render_our_module_key_flags(module, helplist) return '\n'.join(helplist)
175,786
Returns the value of a flag (if not None) or a default value. Args: name: str, the name of a flag. default: Default value to use if the flag value is None. Returns: Requested flag value or default.
def get_flag_value(self, name, default): # pylint: disable=invalid-name value = self.__getattr__(name) if value is not None: # Can't do if not value, b/c value might be '0' or "" return value else: return default
175,788
Returns filename from a flagfile_str of form -[-]flagfile=filename. The cases of --flagfile foo and -flagfile foo shouldn't be hitting this function, as they are dealt with in the level above this function. Args: flagfile_str: str, the flagfile string. Returns: str, the filename from a flagfile_str of form -[-]flagfile=filename. Raises: Error: Raised when illegal --flagfile is provided.
def _extract_filename(self, flagfile_str): if flagfile_str.startswith('--flagfile='): return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip()) elif flagfile_str.startswith('-flagfile='): return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip()) else: raise _exceptions.Error( 'Hit illegal --flagfile type: %s' % flagfile_str)
175,790
Appends all flags assignments from this FlagInfo object to a file. Output will be in the format of a flagfile. NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile from https://github.com/gflags/gflags. Args: filename: str, name of the file.
def append_flags_into_file(self, filename): with open(filename, 'a') as out_file: out_file.write(self.flags_into_string())
175,794
Outputs flag documentation in XML format. NOTE: We use element names that are consistent with those used by the C++ command-line flag library, from https://github.com/gflags/gflags. We also use a few new elements (e.g., <key>), but we do not interfere / overlap with existing XML elements used by the C++ library. Please maintain this consistency. Args: outfile: File object we write to. Default None means sys.stdout.
def write_help_in_xml_format(self, outfile=None): doc = minidom.Document() all_flag = doc.createElement('AllFlags') doc.appendChild(all_flag) all_flag.appendChild(_helpers.create_xml_dom_element( doc, 'program', os.path.basename(sys.argv[0]))) usage_doc = sys.modules['__main__'].__doc__ if not usage_doc: usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] else: usage_doc = usage_doc.replace('%s', sys.argv[0]) all_flag.appendChild(_helpers.create_xml_dom_element( doc, 'usage', usage_doc)) # Get list of key flags for the main module. key_flags = self.get_key_flags_for_module(sys.argv[0]) # Sort flags by declaring module name and next by flag name. flags_by_module = self.flags_by_module_dict() all_module_names = list(flags_by_module.keys()) all_module_names.sort() for module_name in all_module_names: flag_list = [(f.name, f) for f in flags_by_module[module_name]] flag_list.sort() for unused_flag_name, flag in flag_list: is_key = flag in key_flags all_flag.appendChild(flag._create_xml_dom_element( # pylint: disable=protected-access doc, module_name, is_key=is_key)) outfile = outfile or sys.stdout if six.PY2: outfile.write(doc.toprettyxml(indent=' ', encoding='utf-8')) else: outfile.write( doc.toprettyxml(indent=' ', encoding='utf-8').decode('utf-8')) outfile.flush()
175,795
Converts an absl log level to a cpp log level. Args: level: int, an absl.logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in Abseil C++.
def absl_to_cpp(level): if not isinstance(level, int): raise TypeError('Expect an int level, found {}'.format(type(level))) if level >= 0: # C++ log levels must be >= 0 return 0 else: return -level
175,797
Converts an integer level from the absl value to the standard value. Args: level: int, an absl.logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in standard logging.
def absl_to_standard(level): if not isinstance(level, int): raise TypeError('Expect an int level, found {}'.format(type(level))) if level < ABSL_FATAL: level = ABSL_FATAL if level <= ABSL_DEBUG: return ABSL_TO_STANDARD[level] # Maps to vlog levels. return STANDARD_DEBUG - level + 1
175,798
Converts an integer level from the standard value to the absl value. Args: level: int, a Python standard logging level. Raises: TypeError: Raised when level is not an integer. Returns: The corresponding integer level for use in absl logging.
def standard_to_absl(level): if not isinstance(level, int): raise TypeError('Expect an int level, found {}'.format(type(level))) if level < 0: level = 0 if level < STANDARD_DEBUG: # Maps to vlog levels. return STANDARD_DEBUG - level + 1 elif level < STANDARD_INFO: return ABSL_DEBUG elif level < STANDARD_WARNING: return ABSL_INFO elif level < STANDARD_ERROR: return ABSL_WARNING elif level < STANDARD_CRITICAL: return ABSL_ERROR else: return ABSL_FATAL
175,799
Tries to parse the flags, print usage, and exit if unparseable. Args: args: [str], a non-empty list of the command line arguments including program name. Returns: [str], a non-empty list of remaining command line arguments after parsing flags, including program name.
def parse_flags_with_usage(args): try: return FLAGS(args) except flags.Error as error: sys.stderr.write('FATAL Flags parsing error: %s\n' % error) sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n') sys.exit(1)
175,800
Writes __main__'s docstring to stderr with some help text. Args: shorthelp: bool, if True, prints only flags from the main module, rather than all flags. writeto_stdout: bool, if True, writes help message to stdout, rather than to stderr. detailed_error: str, additional detail about why usage info was presented. exitcode: optional integer, if set, exits with this status code after writing help.
def usage(shorthelp=False, writeto_stdout=False, detailed_error=None, exitcode=None): if writeto_stdout: stdfile = sys.stdout else: stdfile = sys.stderr doc = sys.modules['__main__'].__doc__ if not doc: doc = '\nUSAGE: %s [flags]\n' % sys.argv[0] doc = flags.text_wrap(doc, indent=' ', firstline_indent='') else: # Replace all '%s' with sys.argv[0], and all '%%' with '%'. num_specifiers = doc.count('%') - 2 * doc.count('%%') try: doc %= (sys.argv[0],) * num_specifiers except (OverflowError, TypeError, ValueError): # Just display the docstring as-is. pass if shorthelp: flag_str = FLAGS.main_module_help() else: flag_str = FLAGS.get_help() try: stdfile.write(doc) if flag_str: stdfile.write('\nflags:\n') stdfile.write(flag_str) stdfile.write('\n') if detailed_error is not None: stdfile.write('\n%s\n' % detailed_error) except IOError as e: # We avoid printing a huge backtrace if we get EPIPE, because # "foo.par --help | less" is a frequent use case. if e.errno != errno.EPIPE: raise if exitcode is not None: sys.exit(exitcode)
175,807
Installs an exception handler. Args: handler: ExceptionHandler, the exception handler to install. Raises: TypeError: Raised when the handler was not of the correct type. All installed exception handlers will be called if main() exits via an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt, FlagsError or UsageError.
def install_exception_handler(handler): if not isinstance(handler, ExceptionHandler): raise TypeError('handler of type %s does not inherit from ExceptionHandler' % type(handler)) EXCEPTION_HANDLERS.append(handler)
175,808
Parses one or more arguments with the installed parser. Args: arguments: a single argument or a list of arguments (typically a list of default values); a single argument is converted internally into a list containing one item.
def parse(self, arguments): new_values = self._parse(arguments) if self.present: self.value.extend(new_values) else: self.value = new_values self.present += len(new_values)
175,819
Ensures that flags are not None during program execution. Recommended usage: if __name__ == '__main__': flags.mark_flags_as_required(['flag1', 'flag2', 'flag3']) app.run() Args: flag_names: Sequence[str], names of the flags. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined. Raises: AttributeError: If any of flag name has not already been defined as a flag.
def mark_flags_as_required(flag_names, flag_values=_flagvalues.FLAGS): for flag_name in flag_names: mark_flag_as_required(flag_name, flag_values)
175,825
Ensures that only one flag among flag_names is True. Args: flag_names: [str], names of the flags. required: bool. If true, exactly one flag must be True. Otherwise, at most one flag can be True, and it is valid for all flags to be False. flag_values: flags.FlagValues, optional FlagValues instance where the flags are defined.
def mark_bool_flags_as_mutual_exclusive(flag_names, required=False, flag_values=_flagvalues.FLAGS): for flag_name in flag_names: if not flag_values[flag_name].boolean: raise _exceptions.ValidationError( 'Flag --{} is not Boolean, which is required for flags used in ' 'mark_bool_flags_as_mutual_exclusive.'.format(flag_name)) def validate_boolean_mutual_exclusion(flags_dict): flag_count = sum(bool(val) for val in flags_dict.values()) if flag_count == 1 or (not required and flag_count == 0): return True raise _exceptions.ValidationError( '{} one of ({}) must be True.'.format( 'Exactly' if required else 'At most', ', '.join(flag_names))) register_multi_flags_validator( flag_names, validate_boolean_mutual_exclusion, flag_values=flag_values)
175,827
Register new flags validator to be checked. Args: fv: flags.FlagValues, the FlagValues instance to add the validator. validator_instance: validators.Validator, the validator to add. Raises: KeyError: Raised when validators work with a non-existing flag.
def _add_validator(fv, validator_instance): for flag_name in validator_instance.get_flags_names(): fv[flag_name].validators.append(validator_instance)
175,828
Constructor to create all validators. Args: checker: function to verify the constraint. Input of this method varies, see SingleFlagValidator and multi_flags_validator for a detailed description. message: str, error message to be shown to the user.
def __init__(self, checker, message): self.checker = checker self.message = message Validator.validators_count += 1 # Used to assert validators in the order they were registered. self.insertion_index = Validator.validators_count
175,829
Verifies that constraint is satisfied. flags library calls this method to verify Validator's constraint. Args: flag_values: flags.FlagValues, the FlagValues instance to get flags from. Raises: Error: Raised if constraint is not satisfied.
def verify(self, flag_values): param = self._get_input_to_checker_function(flag_values) if not self.checker(param): raise _exceptions.ValidationError(self.message)
175,830
Given flag values, returns the input to be given to checker. Args: flag_values: flags.FlagValues, the FlagValues instance to get flags from. Returns: dict, with keys() being self.lag_names, and value for each key being the value of the corresponding flag (string, boolean, etc).
def _get_input_to_checker_function(self, flag_values): return dict([key, flag_values[key].value] for key in self.flag_names)
175,833
Sets the logging verbosity. Causes all messages of level <= v to be logged, and all messages of level > v to be silently discarded. Args: v: int|str, the verbosity level as an integer or string. Legal string values are those that can be coerced to an integer as well as case-insensitive 'debug', 'info', 'warning', 'error', and 'fatal'.
def set_verbosity(v): try: new_level = int(v) except ValueError: new_level = converter.ABSL_NAMES[v.upper()] FLAGS.verbosity = new_level
175,837
Sets the stderr threshold to the value passed in. Args: s: str|int, valid strings values are case-insensitive 'debug', 'info', 'warning', 'error', and 'fatal'; valid integer values are logging.DEBUG|INFO|WARNING|ERROR|FATAL. Raises: ValueError: Raised when s is an invalid value.
def set_stderrthreshold(s): if s in converter.ABSL_LEVELS: FLAGS.stderrthreshold = converter.ABSL_LEVELS[s] elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES: FLAGS.stderrthreshold = s else: raise ValueError( 'set_stderrthreshold only accepts integer absl logging level ' 'from -3 to 1, or case-insensitive string values ' "'debug', 'info', 'warning', 'error', and 'fatal'. " 'But found "{}" ({}).'.format(s, type(s)))
175,838
Logs 'msg % args' at level 'level' once per 'n' times. Logs the 1st call, (N+1)st call, (2N+1)st call, etc. Not threadsafe. Args: level: int, the absl logging level at which to log. msg: str, the message to be logged. n: int, the number of times this should be called before it is logged. *args: The args to be substitued into the msg.
def log_every_n(level, msg, n, *args): count = _get_next_log_count_per_token(get_absl_logger().findCaller()) log_if(level, msg, not (count % n), *args)
175,839
Tests if 'num_seconds' have passed since 'token' was requested. Not strictly thread-safe - may log with the wrong frequency if called concurrently from multiple threads. Accuracy depends on resolution of 'timeit.default_timer()'. Always returns True on the first call for a given 'token'. Args: token: The token for which to look up the count. num_seconds: The number of seconds to test for. Returns: Whether it has been >= 'num_seconds' since 'token' was last requested.
def _seconds_have_elapsed(token, num_seconds): now = timeit.default_timer() then = _log_timer_per_token.get(token, None) if then is None or (now - then) >= num_seconds: _log_timer_per_token[token] = now return True else: return False
175,840
Logs 'msg % args' at level 'level' iff 'n_seconds' elapsed since last call. Logs the first call, logs subsequent calls if 'n' seconds have elapsed since the last logging call from the same call site (file + line). Not thread-safe. Args: level: int, the absl logging level at which to log. msg: str, the message to be logged. n_seconds: float or int, seconds which should elapse before logging again. *args: The args to be substitued into the msg.
def log_every_n_seconds(level, msg, n_seconds, *args): should_log = _seconds_have_elapsed(get_absl_logger().findCaller(), n_seconds) log_if(level, msg, should_log, *args)
175,841
Checks if vlog is enabled for the given level in caller's source file. Args: level: int, the C++ verbose logging level at which to log the message, e.g. 1, 2, 3, 4... While absl level constants are also supported, callers should prefer level_debug|level_info|... calls for checking those. Returns: True if logging is turned on for that level.
def vlog_is_on(level): if level > converter.ABSL_DEBUG: # Even though this function supports level that is greater than 1, users # should use logging.vlog instead for such cases. # Treat this as vlog, 1 is equivalent to DEBUG. standard_level = converter.STANDARD_DEBUG - (level - 1) else: if level < converter.ABSL_FATAL: level = converter.ABSL_FATAL standard_level = converter.absl_to_standard(level) return _absl_logger.isEnabledFor(standard_level)
175,844
Returns the name of the log file. For Python logging, only one file is used and level is ignored. And it returns empty string if it logs to stderr/stdout or the log stream has no `name` attribute. Args: level: int, the absl.logging level. Raises: ValueError: Raised when `level` has an invalid value.
def get_log_file_name(level=INFO): if level not in converter.ABSL_LEVELS: raise ValueError('Invalid absl.logging level {}'.format(level)) stream = get_absl_handler().python_handler.stream if (stream == sys.stderr or stream == sys.stdout or not hasattr(stream, 'name')): return '' else: return stream.name
175,845
Returns the most suitable directory to put log files into. Args: log_dir: str|None, if specified, the logfile(s) will be created in that directory. Otherwise if the --log_dir command-line flag is provided, the logfile will be created in that directory. Otherwise the logfile will be created in a standard location.
def find_log_dir(log_dir=None): # Get a list of possible log dirs (will try to use them in order). if log_dir: # log_dir was explicitly specified as an arg, so use it and it alone. dirs = [log_dir] elif FLAGS['log_dir'].value: # log_dir flag was provided, so use it and it alone (this mimics the # behavior of the same flag in logging.cc). dirs = [FLAGS['log_dir'].value] else: dirs = ['/tmp/', './'] # Find the first usable log dir. for d in dirs: if os.path.isdir(d) and os.access(d, os.W_OK): return d _absl_logger.fatal("Can't find a writable directory for logs, tried %s", dirs)
175,847
Returns the absl log prefix for the log record. Args: record: logging.LogRecord, the record to get prefix for.
def get_absl_log_prefix(record): created_tuple = time.localtime(record.created) created_microsecond = int(record.created % 1.0 * 1e6) critical_prefix = '' level = record.levelno if _is_non_absl_fatal_record(record): # When the level is FATAL, but not logged from absl, lower the level so # it's treated as ERROR. level = logging.ERROR critical_prefix = _CRITICAL_PREFIX severity = converter.get_initial_for_level(level) return '%c%02d%02d %02d:%02d:%02d.%06d %5d %s:%d] %s' % ( severity, created_tuple.tm_mon, created_tuple.tm_mday, created_tuple.tm_hour, created_tuple.tm_min, created_tuple.tm_sec, created_microsecond, _get_thread_id(), record.filename, record.lineno, critical_prefix)
175,848
Emits the record to stderr. This temporarily sets the handler stream to stderr, calls StreamHandler.emit, then reverts the stream back. Args: record: logging.LogRecord, the record to log.
def _log_to_stderr(self, record): # emit() is protected by a lock in logging.Handler, so we don't need to # protect here again. old_stream = self.stream self.stream = sys.stderr try: super(PythonHandler, self).emit(record) finally: self.stream = old_stream
175,861
Prints a record out to some streams. If FLAGS.logtostderr is set, it will print to sys.stderr ONLY. If FLAGS.alsologtostderr is set, it will print to sys.stderr. If FLAGS.logtostderr is not set, it will log to the stream associated with the current thread. Args: record: logging.LogRecord, the record to emit.
def emit(self, record): # People occasionally call logging functions at import time before # our flags may have even been defined yet, let alone even parsed, as we # rely on the C++ side to define some flags for us and app init to # deal with parsing. Match the C++ library behavior of notify and emit # such messages to stderr. It encourages people to clean-up and does # not hide the message. level = record.levelno if not FLAGS.is_parsed(): # Also implies "before flag has been defined". global _warn_preinit_stderr if _warn_preinit_stderr: sys.stderr.write( 'WARNING: Logging before flag parsing goes to stderr.\n') _warn_preinit_stderr = False self._log_to_stderr(record) elif FLAGS['logtostderr'].value: self._log_to_stderr(record) else: super(PythonHandler, self).emit(record) stderr_threshold = converter.string_to_standard( FLAGS['stderrthreshold'].value) if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and self.stream != sys.stderr): self._log_to_stderr(record) # Die when the record is created from ABSLLogger and level is FATAL. if _is_absl_fatal_record(record): self.flush() # Flush the log before dying. # In threaded python, sys.exit() from a non-main thread only # exits the thread in question. os.abort()
175,862
Appends the message from the record to the results of the prefix. Args: record: logging.LogRecord, the record to be formatted. Returns: The formatted string representing the record.
def format(self, record): if (not FLAGS['showprefixforinfo'].value and FLAGS['verbosity'].value == converter.ABSL_INFO and record.levelno == logging.INFO and _absl_handler.python_handler.stream == sys.stderr): prefix = '' else: prefix = get_absl_log_prefix(record) return prefix + super(PythonFormatter, self).format(record)
175,866
Logs a message at a cetain level substituting in the supplied arguments. This method behaves differently in python and c++ modes. Args: level: int, the standard logging level at which to log the message. msg: str, the text of the message to log. *args: The arguments to substitute in the message. **kwargs: The keyword arguments to substitute in the message.
def log(self, level, msg, *args, **kwargs): if level >= logging.FATAL: # Add property to the LogRecord created by this logger. # This will be used by the ABSLHandler to determine whether it should # treat CRITICAL/FATAL logs as really FATAL. extra = kwargs.setdefault('extra', {}) extra[_ABSL_LOG_FATAL] = True super(ABSLLogger, self).log(level, msg, *args, **kwargs)
175,870
Initializes ArgumentParser. Args: **kwargs: same as argparse.ArgumentParser, except: 1. It also accepts `inherited_absl_flags`: the absl flags to inherit. The default is the global absl.flags.FLAGS instance. Pass None to ignore absl flags. 2. The `prefix_chars` argument must be the default value '-'. Raises: ValueError: Raised when prefix_chars is not '-'.
def __init__(self, **kwargs): prefix_chars = kwargs.get('prefix_chars', '-') if prefix_chars != '-': raise ValueError( 'argparse_flags.ArgumentParser only supports "-" as the prefix ' 'character, found "{}".'.format(prefix_chars)) # Remove inherited_absl_flags before calling super. self._inherited_absl_flags = kwargs.pop('inherited_absl_flags', flags.FLAGS) # Now call super to initialize argparse.ArgumentParser before calling # add_argument in _define_absl_flags. super(ArgumentParser, self).__init__(**kwargs) if self.add_help: # -h and --help are defined in super. # Also add the --helpshort and --helpfull flags. self.add_argument( # Action 'help' defines a similar flag to -h/--help. '--helpshort', action='help', default=argparse.SUPPRESS, help=argparse.SUPPRESS) self.add_argument( '--helpfull', action=_HelpFullAction, default=argparse.SUPPRESS, help='show full help message and exit') if self._inherited_absl_flags: self.add_argument('--undefok', help=argparse.SUPPRESS) self._define_absl_flags(self._inherited_absl_flags)
175,874
Initializes _FlagAction. Args: option_strings: See argparse.Action. dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. help: See argparse.Action. metavar: See argparse.Action. flag_instance: absl.flags.Flag, the absl flag instance.
def __init__(self, option_strings, dest, help, metavar, flag_instance): # pylint: disable=redefined-builtin del dest self._flag_instance = flag_instance super(_FlagAction, self).__init__( option_strings=option_strings, dest=argparse.SUPPRESS, help=help, metavar=metavar)
175,878
Initializes _BooleanFlagAction. Args: option_strings: See argparse.Action. dest: Ignored. The flag is always defined with dest=argparse.SUPPRESS. help: See argparse.Action. metavar: See argparse.Action. flag_instance: absl.flags.Flag, the absl flag instance.
def __init__(self, option_strings, dest, help, metavar, flag_instance): # pylint: disable=redefined-builtin del dest self._flag_instance = flag_instance flag_names = [self._flag_instance.name] if self._flag_instance.short_name: flag_names.append(self._flag_instance.short_name) self._flag_names = frozenset(flag_names) super(_BooleanFlagAction, self).__init__( option_strings=option_strings, dest=argparse.SUPPRESS, nargs=0, # Does not accept values, only `--bool` or `--nobool`. help=help, metavar=metavar)
175,880
Returns the module that defines a global environment, and its name. Args: globals_dict: A dictionary that should correspond to an environment providing the values of the globals. Returns: _ModuleObjectAndName - pair of module object & module name. Returns (None, None) if the module could not be identified.
def get_module_object_and_name(globals_dict): name = globals_dict.get('__name__', None) module = sys.modules.get(name, None) # Pick a more informative name for the main module. return _ModuleObjectAndName(module, (sys.argv[0] if name == '__main__' else name))
175,883
Returns an XML DOM element with name and text value. Args: doc: minidom.Document, the DOM document it should create nodes from. name: str, the tag of XML element. value: object, whose string representation will be used as the value of the XML element. Illegal or highly discouraged xml 1.0 characters are stripped. Returns: An instance of minidom.Element.
def create_xml_dom_element(doc, name, value): s = str_or_unicode(value) if six.PY2 and not isinstance(s, unicode): # Get a valid unicode string. s = s.decode('utf-8', 'ignore') if isinstance(value, bool): # Display boolean values as the C++ flag library does: no caps. s = s.lower() # Remove illegal xml characters. s = _ILLEGAL_XML_CHARS_REGEX.sub(u'', s) e = doc.createElement(name) e.appendChild(doc.createTextNode(s)) return e
175,885
Removes indentation from triple-quoted strings. This is the function specified in PEP 257 to handle docstrings: https://www.python.org/dev/peps/pep-0257/. Args: docstring: str, a python docstring. Returns: str, docstring with indentation removed.
def trim_docstring(docstring): if not docstring: return '' # If you've got a line longer than this you have other problems... max_indent = 1 << 29 # Convert tabs to spaces (following the normal Python rules) # and split into a list of lines: lines = docstring.expandtabs().splitlines() # Determine minimum indentation (first line doesn't count): indent = max_indent for line in lines[1:]: stripped = line.lstrip() if stripped: indent = min(indent, len(line) - len(stripped)) # Remove indentation (first line is special): trimmed = [lines[0].strip()] if indent < max_indent: for line in lines[1:]: trimmed.append(line[indent:].rstrip()) # Strip off trailing and leading blank lines: while trimmed and not trimmed[-1]: trimmed.pop() while trimmed and not trimmed[0]: trimmed.pop(0) # Return a single string: return '\n'.join(trimmed)
175,890
Init the pool from a json file. Args: filename (str, optional): if the filename is provided, proxies will be load from it.
def __init__(self, filename=None): self.idx = {'http': 0, 'https': 0} self.test_url = { 'http': 'http://www.sina.com.cn', 'https': 'https://www.taobao.com' } self.proxies = {'http': {}, 'https': {}} self.addr_list = {'http': [], 'https': []} self.dec_ratio = 0.9 self.inc_ratio = 1 / self.dec_ratio self.weight_thr = 0.2 self.logger = logging.getLogger(__name__) if filename is not None: self.load(filename)
176,097
Get the number of proxies in the pool Args: protocol (str, optional): 'http' or 'https' or None. (default None) Returns: If protocol is None, return the total number of proxies, otherwise, return the number of proxies of corresponding protocol.
def proxy_num(self, protocol=None): http_num = len(self.proxies['http']) https_num = len(self.proxies['https']) if protocol == 'http': return http_num elif protocol == 'https': return https_num else: return http_num + https_num
176,098
Check if a proxy is valid Args: addr: A string in the form of 'ip:port' protocol: Either 'http' or 'https', different test urls will be used according to protocol. timeout: A integer indicating the timeout of connecting the test url. Returns: dict: If the proxy is valid, returns {'valid': True, 'response_time': xx} otherwise returns {'valid': False, 'msg': 'xxxxxx'}.
def is_valid(self, addr, protocol='http', timeout=5): start = time.time() try: r = requests.get(self.test_url[protocol], timeout=timeout, proxies={protocol: 'http://' + addr}) except KeyboardInterrupt: raise except requests.exceptions.Timeout: return {'valid': False, 'msg': 'timeout'} except: return {'valid': False, 'msg': 'exception'} else: if r.status_code == 200: response_time = time.time() - start return {'valid': True, 'response_time': response_time} else: return { 'valid': False, 'msg': 'status code: {}'.format(r.status_code) }
176,106
Target function of validation threads Args: proxy_scanner: A ProxyScanner object. expected_num: Max number of valid proxies to be scanned. queue_timeout: Timeout for getting a proxy from the queue. val_timeout: An integer passed to `is_valid` as argument `timeout`.
def validate(self, proxy_scanner, expected_num=20, queue_timeout=3, val_timeout=5): while self.proxy_num() < expected_num: try: candidate_proxy = proxy_scanner.proxy_queue.get( timeout=queue_timeout) except queue.Empty: if proxy_scanner.is_scanning(): continue else: break addr = candidate_proxy['addr'] protocol = candidate_proxy['protocol'] ret = self.is_valid(addr, protocol, val_timeout) if self.proxy_num() >= expected_num: self.logger.info('Enough valid proxies, thread {} exit.' .format(threading.current_thread().name)) break if ret['valid']: self.add_proxy(Proxy(addr, protocol)) self.logger.info('{} ok, {:.2f}s'.format(addr, ret[ 'response_time'])) else: self.logger.info('{} invalid, {}'.format(addr, ret['msg']))
176,107
Register a scan function Args: func_name: The function name of a scan function. func_kwargs: A dict containing arguments of the scan function.
def register_func(self, func_name, func_kwargs): self.scan_funcs.append(func_name) self.scan_kwargs.append(func_kwargs)
176,111
Scan candidate proxies from http://ip84.com Args: region: Either 'mainland' or 'overseas'. page: An integer indicating how many pages to be scanned.
def scan_ip84(self, region='mainland', page=1): self.logger.info('start scanning http://ip84.com for proxy list...') for i in range(1, page + 1): if region == 'mainland': url = 'http://ip84.com/dlgn/{}'.format(i) elif region == 'overseas': url = 'http://ip84.com/gwgn/{}'.format(i) else: url = 'http://ip84.com/gn/{}'.format(i) response = requests.get(url) soup = BeautifulSoup(response.content, 'lxml') table = soup.find('table', class_='list') for tr in table.find_all('tr'): if tr.th is not None: continue info = tr.find_all('td') protocol = info[4].string.lower() addr = '{}:{}'.format(info[0].string, info[1].string) self.proxy_queue.put({'addr': addr, 'protocol': protocol})
176,112
Check whether the item has been in the cache If the item has not been seen before, then hash it and put it into the cache, otherwise indicates the item is duplicated. When the cache size exceeds capacity, discard the earliest items in the cache. Args: item (object): The item to be checked and stored in cache. It must be immutable or a list/dict. Returns: bool: Whether the item has been in cache.
def is_duplicated(self, item): if isinstance(item, dict): hashable_item = json.dumps(item, sort_keys=True) elif isinstance(item, list): hashable_item = frozenset(item) else: hashable_item = item if hashable_item in self._cache: return True else: if self.cache_capacity > 0 and len( self._cache) >= self.cache_capacity: self._cache.popitem(False) self._cache[hashable_item] = 1 return False
176,118
Set signals. Args: signals: A dict(key-value pairs) of all signals. For example {'signal1': True, 'signal2': 10}
def set(self, **signals): for name in signals: if name not in self._signals: self._init_status[name] = signals[name] self._signals[name] = signals[name]
176,127
Feed urls once Args: url_template: A string with parameters replaced with "{}". keyword: A string indicating the searching keyword. offset: An integer indicating the starting index. max_num: An integer indicating the max number of images to be crawled. page_step: An integer added to offset after each iteration.
def feed(self, url_template, keyword, offset, max_num, page_step): for i in range(offset, offset + max_num, page_step): url = url_template.format(keyword, i) self.out_queue.put(url) self.logger.debug('put url to url_queue: {}'.format(url))
176,149
Connect two ThreadPools. The ``in_queue`` of the second pool will be set as the ``out_queue`` of the current pool, thus all the output will be input to the second pool. Args: component (ThreadPool): the ThreadPool to be connected. Returns: ThreadPool: the modified second ThreadPool.
def connect(self, component): if not isinstance(component, ThreadPool): raise TypeError('"component" must be a ThreadPool object') component.in_queue = self.out_queue return component
176,159
Set offset of file index. Args: file_idx_offset: It can be either an integer or 'auto'. If set to an integer, the filename will start from ``file_idx_offset`` + 1. If set to ``'auto'``, the filename will start from existing max file index plus 1.
def set_file_idx_offset(self, file_idx_offset=0): if isinstance(file_idx_offset, int): self.file_idx_offset = file_idx_offset elif file_idx_offset == 'auto': self.file_idx_offset = self.storage.max_file_idx() else: raise ValueError('"file_idx_offset" must be an integer or `auto`')
176,161
Set the path where the image will be saved. The default strategy is to use an increasing 6-digit number as the filename. You can override this method if you want to set custom naming rules. The file extension is kept if it can be obtained from the url, otherwise ``default_ext`` is used as extension. Args: task (dict): The task dict got from ``task_queue``. Output: Filename with extension.
def get_filename(self, task, default_ext): url_path = urlparse(task['file_url'])[2] extension = url_path.split('.')[-1] if '.' in url_path else default_ext file_idx = self.fetched_num + self.file_idx_offset return '{:06d}.{}'.format(file_idx, extension)
176,162
Download the image and save it to the corresponding path. Args: task (dict): The task dict got from ``task_queue``. timeout (int): Timeout of making requests for downloading images. max_retry (int): the max retry times if the request fails. **kwargs: reserved arguments for overriding.
def download(self, task, default_ext, timeout=5, max_retry=3, overwrite=False, **kwargs): file_url = task['file_url'] task['success'] = False task['filename'] = None retry = max_retry if not overwrite: with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) if self.storage.exists(filename): self.logger.info('skip downloading file %s', filename) return self.fetched_num -= 1 while retry > 0 and not self.signal.get('reach_max_num'): try: response = self.session.get(file_url, timeout=timeout) except Exception as e: self.logger.error('Exception caught when downloading file %s, ' 'error: %s, remaining retry times: %d', file_url, e, retry - 1) else: if self.reach_max_num(): self.signal.set(reach_max_num=True) break elif response.status_code != 200: self.logger.error('Response status code %d, file %s', response.status_code, file_url) break elif not self.keep_file(task, response, **kwargs): break with self.lock: self.fetched_num += 1 filename = self.get_filename(task, default_ext) self.logger.info('image #%s\t%s', self.fetched_num, file_url) self.storage.write(filename, response.content) task['success'] = True task['filename'] = filename break finally: retry -= 1
176,164
Decide whether to keep the image Compare image size with ``min_size`` and ``max_size`` to decide. Args: response (Response): response of requests. min_size (tuple or None): minimum size of required images. max_size (tuple or None): maximum size of required images. Returns: bool: whether to keep the image.
def keep_file(self, task, response, min_size=None, max_size=None): try: img = Image.open(BytesIO(response.content)) except (IOError, OSError): return False task['img_size'] = img.size if min_size and not self._size_gt(img.size, min_size): return False if max_size and not self._size_lt(img.size, max_size): return False return True
176,169
Set storage backend for downloader For full list of storage backend supported, please see :mod:`storage`. Args: storage (dict or BaseStorage): storage backend configuration or instance
def set_storage(self, storage): if isinstance(storage, BaseStorage): self.storage = storage elif isinstance(storage, dict): if 'backend' not in storage and 'root_dir' in storage: storage['backend'] = 'FileSystem' try: backend_cls = getattr(storage_package, storage['backend']) except AttributeError: try: backend_cls = import_module(storage['backend']) except ImportError: self.logger.error('cannot find backend module %s', storage['backend']) sys.exit() kwargs = storage.copy() del kwargs['backend'] self.storage = backend_cls(**kwargs) else: raise TypeError('"storage" must be a storage object or dict')
176,182
Init session with default or custom headers Args: headers: A dict of headers (default None, thus using the default header to init the session)
def set_session(self, headers=None): if headers is None: headers = { 'User-Agent': ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3)' ' AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/48.0.2564.116 Safari/537.36') } elif not isinstance(headers, dict): raise TypeError('"headers" must be a dict object') self.session = Session(self.proxy_pool) self.session.headers.update(headers)
176,183
Start crawling This method will start feeder, parser and download and wait until all threads exit. Args: feeder_kwargs (dict, optional): Arguments to be passed to ``feeder.start()`` parser_kwargs (dict, optional): Arguments to be passed to ``parser.start()`` downloader_kwargs (dict, optional): Arguments to be passed to ``downloader.start()``
def crawl(self, feeder_kwargs=None, parser_kwargs=None, downloader_kwargs=None): self.signal.reset() self.logger.info('start crawling...') feeder_kwargs = {} if feeder_kwargs is None else feeder_kwargs parser_kwargs = {} if parser_kwargs is None else parser_kwargs downloader_kwargs = {} if downloader_kwargs is None else downloader_kwargs self.logger.info('starting %d feeder threads...', self.feeder.thread_num) self.feeder.start(**feeder_kwargs) self.logger.info('starting %d parser threads...', self.parser.thread_num) self.parser.start(**parser_kwargs) self.logger.info('starting %d downloader threads...', self.downloader.thread_num) self.downloader.start(**downloader_kwargs) while True: if not self.feeder.is_alive(): self.signal.set(feeder_exited=True) if not self.parser.is_alive(): self.signal.set(parser_exited=True) if not self.downloader.is_alive(): break time.sleep(1) if not self.feeder.in_queue.empty(): self.feeder.clear_buffer() if not self.parser.in_queue.empty(): self.parser.clear_buffer() if not self.downloader.in_queue.empty(): self.downloader.clear_buffer(True) self.logger.info('Crawling task done!')
176,184
CapitalFlow constructor. Args: * amount (float): Amount to adjust by
def __init__(self, amount): super(CapitalFlow, self).__init__() self.amount = float(amount)
176,231
Close a child position - alias for rebalance(0, child). This will also flatten (close out all) the child's children. Args: * child (str): Child, specified by name.
def close(self, child): c = self.children[child] # flatten if children not None if c.children is not None and len(c.children) != 0: c.flatten() if c.value != 0. and not np.isnan(c.value): c.allocate(-c.value)
176,259
Set commission (transaction fee) function. Args: fn (fn(quantity, price)): Function used to determine commission amount.
def set_commissions(self, fn): self.commission_fn = fn for c in self._childrenv: if isinstance(c, StrategyBase): c.set_commissions(fn)
176,261
Setup Security with universe. Speeds up future runs. Args: * universe (DataFrame): DataFrame of prices with security's name as one of the columns.
def setup(self, universe): # if we already have all the prices, we will store them to speed up # future updates try: prices = universe[self.name] except KeyError: prices = None # setup internal data if prices is not None: self._prices = prices self.data = pd.DataFrame(index=universe.index, columns=['value', 'position'], data=0.0) self._prices_set = True else: self.data = pd.DataFrame(index=universe.index, columns=['price', 'value', 'position']) self._prices = self.data['price'] self._prices_set = False self._values = self.data['value'] self._positions = self.data['position'] # add _outlay self.data['outlay'] = 0. self._outlays = self.data['outlay']
176,267
This allocates capital to the Security. This is the method used to buy/sell the security. A given amount of shares will be determined on the current price, a commission will be calculated based on the parent's commission fn, and any remaining capital will be passed back up to parent as an adjustment. Args: * amount (float): Amount of adjustment. * update (bool): Force update?
def allocate(self, amount, update=True): # will need to update if this has been idle for a while... # update if needupdate or if now is stale # fetch parent's now since our now is stale if self._needupdate or self.now != self.parent.now: self.update(self.parent.now) # ignore 0 alloc # Note that if the price of security has dropped to zero, then it # should never be selected by SelectAll, SelectN etc. I.e. we should # not open the position at zero price. At the same time, we are able # to close it at zero price, because at that point amount=0. # Note also that we don't erase the position in an asset which price # has dropped to zero (though the weight will indeed be = 0) if amount == 0: return if self.parent is self or self.parent is None: raise Exception( 'Cannot allocate capital to a parentless security') if self._price == 0 or np.isnan(self._price): raise Exception( 'Cannot allocate capital to ' '%s because price is %s as of %s' % (self.name, self._price, self.parent.now)) # buy/sell # determine quantity - must also factor in commission # closing out? if amount == -self._value: q = -self._position else: q = amount / (self._price * self.multiplier) if self.integer_positions: if (self._position > 0) or ((self._position == 0) and ( amount > 0)): # if we're going long or changing long position q = math.floor(q) else: # if we're going short or changing short position q = math.ceil(q) # if q is 0 nothing to do if q == 0 or np.isnan(q): return # unless we are closing out a position (q == -position) # we want to ensure that # # - In the event of a positive amount, this indicates the maximum # amount a given security can use up for a purchase. Therefore, if # commissions push us above this amount, we cannot buy `q`, and must # decrease its value # # - In the event of a negative amount, we want to 'raise' at least the # amount indicated, no less. Therefore, if we have commission, we must # sell additional units to fund this requirement. As such, q must once # again decrease. # if not q == -self._position: full_outlay, _, _ = self.outlay(q) # if full outlay > amount, we must decrease the magnitude of `q` # this can potentially lead to an infinite loop if the commission # per share > price per share. However, we cannot really detect # that in advance since the function can be non-linear (say a fn # like max(1, abs(q) * 0.01). Nevertheless, we want to avoid these # situations. # cap the maximum number of iterations to 1e4 and raise exception # if we get there # if integer positions then we know we are stuck if q doesn't change # if integer positions is false then we want full_outlay == amount # if integer positions is true then we want to be at the q where # if we bought 1 more then we wouldn't have enough cash i = 0 last_q = q last_amount_short = full_outlay - amount while not np.isclose(full_outlay, amount, rtol=0.) and q != 0: dq_wout_considering_tx_costs = (full_outlay - amount)/(self._price * self.multiplier) q = q - dq_wout_considering_tx_costs if self.integer_positions: q = math.floor(q) full_outlay, _, _ = self.outlay(q) # if our q is too low and we have integer positions # then we know that the correct quantity is the one where # the outlay of q + 1 < amount. i.e. if we bought one more # position then we wouldn't have enough cash if self.integer_positions: full_outlay_of_1_more, _, _ = self.outlay(q + 1) if full_outlay < amount and full_outlay_of_1_more > amount: break # if not integer positions then we should keep going until # full_outlay == amount or is close enough i = i + 1 if i > 1e4: raise Exception( 'Potentially infinite loop detected. This occurred ' 'while trying to reduce the amount of shares purchased' ' to respect the outlay <= amount rule. This is most ' 'likely due to a commission function that outputs a ' 'commission that is greater than the amount of cash ' 'a short sale can raise.') if self.integer_positions and last_q == q: raise Exception( 'Newton Method like root search for quantity is stuck!' ' q did not change in iterations so it is probably a bug' ' but we are not entirely sure it is wrong! Consider ' ' changing to warning.' ) last_q = q if np.abs(full_outlay - amount) > np.abs(last_amount_short): raise Exception( 'The difference between what we have raised with q and' ' the amount we are trying to raise has gotten bigger since' ' last iteration! full_outlay should always be approaching' ' amount! There may be a case where the commission fn is' ' not smooth' ) last_amount_short = full_outlay - amount # if last step led to q == 0, then we can return just like above if q == 0: return # this security will need an update, even if pos is 0 (for example if # we close the positions, value and pos is 0, but still need to do that # last update) self._needupdate = True # adjust position & value self._position += q # calculate proper adjustment for parent # parent passed down amount so we want to pass # -outlay back up to parent to adjust for capital # used full_outlay, outlay, fee = self.outlay(q) # store outlay for future reference self._outlay += outlay # call parent self.parent.adjust(-full_outlay, update=update, flow=False, fee=fee)
176,269
Determines the complete cash outlay (including commission) necessary given a quantity q. Second returning parameter is a commission itself. Args: * q (float): quantity
def outlay(self, q): fee = self.commission(q, self._price * self.multiplier) outlay = q * self._price * self.multiplier return outlay + fee, outlay, fee
176,270
Load or create a precise model Args: model_name: Name of model params: Parameters used to create the model Returns: model: Loaded Keras model
def create_model(model_name: Optional[str], params: ModelParams) -> 'Sequential': if model_name and isfile(model_name): print('Loading from ' + model_name + '...') model = load_precise_model(model_name) else: from keras.layers.core import Dense from keras.layers.recurrent import GRU from keras.models import Sequential model = Sequential() model.add(GRU( params.recurrent_units, activation='linear', input_shape=(pr.n_features, pr.feature_size), dropout=params.dropout, name='net' )) model.add(Dense(1, activation='sigmoid')) load_keras() metrics = ['accuracy'] + params.extra_metrics * [false_pos, false_neg] set_loss_bias(params.loss_bias) for i in model.layers[:params.freeze_till]: i.trainable = False model.compile('rmsprop', weighted_log_loss, metrics=(not params.skip_acc) * metrics) return model
176,578
Load the vectorized representations of the stored data files Args: train: Whether to load train data test: Whether to load test data
def load(self, train=True, test=True, shuffle=True) -> tuple: return self.__load(self.__load_files, train, test, shuffle=shuffle)
176,597
Converts an HD5F file from Keras to a .pb for use with TensorFlow Args: model_path: location of Keras model out_file: location to write protobuf
def convert(model_path: str, out_file: str): print('Converting', model_path, 'to', out_file, '...') import tensorflow as tf from precise.model import load_precise_model from keras import backend as K out_dir, filename = split(out_file) out_dir = out_dir or '.' os.makedirs(out_dir, exist_ok=True) K.set_learning_phase(0) model = load_precise_model(model_path) out_name = 'net_output' tf.identity(model.output, name=out_name) print('Output node name:', out_name) print('Output folder:', out_dir) sess = K.get_session() # Write the graph in human readable tf.train.write_graph(sess.graph.as_graph_def(), out_dir, filename + 'txt', as_text=True) print('Saved readable graph to:', filename + 'txt') # Write the graph in binary .pb file from tensorflow.python.framework import graph_util from tensorflow.python.framework import graph_io cgraph = graph_util.convert_variables_to_constants(sess, sess.graph.as_graph_def(), [out_name]) graph_io.write_graph(cgraph, out_dir, filename, as_text=False) if isfile(model_path + '.params'): copyfile(model_path + '.params', out_file + '.params') print('Saved graph to:', filename) del sess
176,664
Gets an AsYouTypeFormatter for the specific region. Arguments: region_code -- The region where the phone number is being entered Return an AsYouTypeFormatter} object, which could be used to format phone numbers in the specific region "as you type"
def __init__(self, region_code): self._clear() self._default_country = region_code.upper() self._current_metadata = _get_metadata_for_region(self._default_country) self._default_metadata = self._current_metadata
176,895
Normalizes a string of characters representing a phone number. This converts wide-ascii and arabic-indic numerals to European numerals, and strips punctuation and alpha characters (optional). Arguments: number -- a string representing a phone number keep_non_digits -- whether to keep non-digits Returns the normalized string version of the phone number.
def normalize_digits_only(number, keep_non_digits=False): number = unicod(number) number_length = len(number) normalized_digits = U_EMPTY_STRING for ii in range(number_length): d = unicode_digit(number[ii], -1) if d != -1: normalized_digits += unicod(d) elif keep_non_digits: normalized_digits += number[ii] return normalized_digits
176,969
Gets the national significant number of a phone number. Note that a national significant number doesn't contain a national prefix or any formatting. Arguments: numobj -- The PhoneNumber object for which the national significant number is needed. Returns the national significant number of the PhoneNumber object passed in.
def national_significant_number(numobj): # If leading zero(s) have been set, we prefix this now. Note this is not a # national prefix. national_number = U_EMPTY_STRING if numobj.italian_leading_zero: num_zeros = numobj.number_of_leading_zeros if num_zeros is None: num_zeros = 1 if num_zeros > 0: national_number = U_ZERO * num_zeros national_number += str(numobj.national_number) return national_number
176,989
Gets a valid number for the specified number type (it may belong to any country). Arguments: num_type -- The type of number that is needed. Returns a valid number for the specified type. Returns None when the metadata does not contain such information. This should only happen when no numbers of this type are allocated anywhere in the world anymore.
def _example_number_anywhere_for_type(num_type): for region_code in SUPPORTED_REGIONS: example_numobj = example_number_for_type(region_code, num_type) if example_numobj is not None: return example_numobj # If there wasn't an example number for a region, try the non-geographical entities. for country_calling_code in COUNTRY_CODES_FOR_NON_GEO_REGIONS: metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None) desc = _number_desc_by_type(metadata, num_type) if desc is not None and desc.example_number is not None: try: return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION) except NumberParseException: # pragma no cover pass # There are no example numbers of this type for any country in the library. return None
176,996
Gets a valid number for the specified country calling code for a non-geographical entity. Arguments: country_calling_code -- The country calling code for a non-geographical entity. Returns a valid number for the non-geographical entity. Returns None when the metadata does not contain such information, or the country calling code passed in does not belong to a non-geographical entity.
def example_number_for_non_geo_entity(country_calling_code): metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None) if metadata is not None: # For geographical entities, fixed-line data is always present. However, for non-geographical # entities, this is not the case, so we have to go through different types to find the # example number. We don't check fixed-line or personal number since they aren't used by # non-geographical entities (if this changes, a unit-test will catch this.) for desc in (metadata.mobile, metadata.toll_free, metadata.shared_cost, metadata.voip, metadata.voicemail, metadata.uan, metadata.premium_rate): try: if (desc is not None and desc.example_number is not None): return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION) except NumberParseException: pass return None
176,997
Gets the type of a valid phone number. Arguments: numobj -- The PhoneNumber object that we want to know the type of. Returns the type of the phone number, as a PhoneNumberType value; returns PhoneNumberType.UNKNOWN if it is invalid.
def number_type(numobj): region_code = region_code_for_number(numobj) metadata = PhoneMetadata.metadata_for_region_or_calling_code(numobj.country_code, region_code) if metadata is None: return PhoneNumberType.UNKNOWN national_number = national_significant_number(numobj) return _number_type_helper(national_number, metadata)
177,000
Returns the region where a phone number is from. This could be used for geocoding at the region level. Only guarantees correct results for valid, full numbers (not short-codes, or invalid numbers). Arguments: numobj -- The phone number object whose origin we want to know Returns the region where the phone number is from, or None if no region matches this calling code.
def region_code_for_number(numobj): country_code = numobj.country_code regions = COUNTRY_CODE_TO_REGION_CODE.get(country_code, None) if regions is None: return None if len(regions) == 1: return regions[0] else: return _region_code_for_number_from_list(numobj, regions)
177,004
Returns the country calling code for a specific region. For example, this would be 1 for the United States, and 64 for New Zealand. Assumes the region is already valid. Arguments: region_code -- The region that we want to get the country calling code for. Returns the country calling code for the region denoted by region_code.
def country_code_for_valid_region(region_code): metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if metadata is None: raise Exception("Invalid region code %s" % region_code) return metadata.country_code
177,006
Truncate a number object that is too long. Attempts to extract a valid number from a phone number that is too long to be valid, and resets the PhoneNumber object passed in to that valid version. If no valid number could be extracted, the PhoneNumber object passed in will not be modified. Arguments: numobj -- A PhoneNumber object which contains a number that is too long to be valid. Returns True if a valid phone number can be successfully extracted.
def truncate_too_long_number(numobj): if is_valid_number(numobj): return True numobj_copy = PhoneNumber() numobj_copy.merge_from(numobj) national_number = numobj.national_number while not is_valid_number(numobj_copy): # Strip a digit off the RHS national_number = national_number // 10 numobj_copy.national_number = national_number validation_result = is_possible_number_with_reason(numobj_copy) if (validation_result == ValidationResult.TOO_SHORT or national_number == 0): return False # To reach here, numobj_copy is a valid number. Modify the original object numobj.national_number = national_number return True
177,013
Strip extension from the end of a number string. Strips any extension (as in, the part of the number dialled after the call is connected, usually indicated with extn, ext, x or similar) from the end of the number, and returns it. Arguments: number -- the non-normalized telephone number that we wish to strip the extension from. Returns a 2-tuple of: - the phone extension (or "" or not present) - the number before the extension.
def _maybe_strip_extension(number): match = _EXTN_PATTERN.search(number) # If we find a potential extension, and the number preceding this is a # viable number, we assume it is an extension. if match and _is_viable_phone_number(number[:match.start()]): # The numbers are captured into groups in the regular expression. for group in match.groups(): # We go through the capturing groups until we find one that # captured some digits. If none did, then we will return the empty # string. if group is not None: return (group, number[:match.start()]) return ("", number)
177,019
Returns true if the supplied region supports mobile number portability. Returns false for invalid, unknown or regions that don't support mobile number portability. Arguments: region_code -- the region for which we want to know whether it supports mobile number portability or not.
def is_mobile_number_portable_region(region_code): metadata = PhoneMetadata.metadata_for_region(region_code, None) if metadata is None: return False return metadata.mobile_number_portable_region
177,031
As time_zones_for_geographical_number() but explicitly checks the validity of the number passed in. Arguments: numobj -- a valid phone number for which we want to get the time zones to which it belongs Returns a list of the corresponding time zones or a single element list with the default unknown time zone if no other time zone was found or if the number was invalid
def time_zones_for_number(numobj): ntype = number_type(numobj) if ntype == PhoneNumberType.UNKNOWN: return _UNKNOWN_TIME_ZONE_LIST elif not is_number_type_geographical(ntype, numobj.country_code): return _country_level_time_zones_for_number(numobj) return time_zones_for_geographical_number(numobj)
177,037
Returns the list of time zones corresponding to the country calling code of a number. Arguments: numobj -- the phone number to look up Returns a list of the corresponding time zones or a single element list with the default unknown time zone if no other time zone was found or if the number was invalid
def _country_level_time_zones_for_number(numobj): cc = str(numobj.country_code) for prefix_len in range(TIMEZONE_LONGEST_PREFIX, 0, -1): prefix = cc[:(1 + prefix_len)] if prefix in TIMEZONE_DATA: return TIMEZONE_DATA[prefix] return _UNKNOWN_TIME_ZONE_LIST
177,038
Returns True if the groups of digits found in our candidate phone number match our expectations. Arguments: numobj -- the original number we found when parsing normalized_candidate -- the candidate number, normalized to only contain ASCII digits, but with non-digits (spaces etc) retained expected_number_groups -- the groups of digits that we would expect to see if we formatted this number Returns True if expectations matched.
def _all_number_groups_remain_grouped(numobj, normalized_candidate, formatted_number_groups): from_index = 0 if numobj.country_code_source != CountryCodeSource.FROM_DEFAULT_COUNTRY: # First skip the country code if the normalized candidate contained it. country_code = str(numobj.country_code) from_index = normalized_candidate.find(country_code) + len(country_code) # Check each group of consecutive digits are not broken into separate # groupings in the candidate string. for ii, formatted_number_group in enumerate(formatted_number_groups): # Fails if the substring of normalized_candidate starting from # from_index doesn't contain the consecutive digits in # formatted_number_group. from_index = normalized_candidate.find(formatted_number_group, from_index) if from_index < 0: return False # Moves from_index forward. from_index += len(formatted_number_group) if (ii == 0 and from_index < len(normalized_candidate)): # We are at the position right after the NDC. We get the region # used for formatting information based on the country code in the # phone number, rather than the number itself, as we do not need # to distinguish between different countries with the same country # calling code and this is faster. region = region_code_for_country_code(numobj.country_code) if (ndd_prefix_for_region(region, True) is not None and normalized_candidate[from_index].isdigit()): # This means there is no formatting symbol after the NDC. In # this case, we only accept the number if there is no # formatting symbol at all in the number, except for # extensions. This is only important for countries with # national prefixes. nsn = national_significant_number(numobj) return normalized_candidate[(from_index - len(formatted_number_group)):].startswith(nsn) # The check here makes sure that we haven't mistakenly already used the extension to # match the last group of the subscriber number. Note the extension cannot have # formatting in-between digits. return (normalized_candidate[from_index:].find(numobj.extension or U_EMPTY_STRING) != -1)
177,072
Returns True if the groups of digits found in our candidate phone number match our expectations. Arguments: numobj -- the original number we found when parsing normalized_candidate -- the candidate number, normalized to only contain ASCII digits, but with non-digits (spaces etc) retained expected_number_groups -- the groups of digits that we would expect to see if we formatted this number Returns True if expectations matched.
def _all_number_groups_are_exactly_present(numobj, normalized_candidate, formatted_number_groups): candidate_groups = re.split(NON_DIGITS_PATTERN, normalized_candidate) # Set this to the last group, skipping it if the number has an extension. if numobj.extension is not None: candidate_number_group_index = len(candidate_groups) - 2 else: candidate_number_group_index = len(candidate_groups) - 1 # First we check if the national significant number is formatted as a # block. We use contains and not equals, since the national significant # number may be present with a prefix such as a national number prefix, or # the country code itself. if (len(candidate_groups) == 1 or candidate_groups[candidate_number_group_index].find(national_significant_number(numobj)) != -1): return True # Starting from the end, go through in reverse, excluding the first group, # and check the candidate and number groups are the same. formatted_number_group_index = len(formatted_number_groups) - 1 while (formatted_number_group_index > 0 and candidate_number_group_index >= 0): if (candidate_groups[candidate_number_group_index] != formatted_number_groups[formatted_number_group_index]): return False formatted_number_group_index -= 1 candidate_number_group_index -= 1 # Now check the first group. There may be a national prefix at the start, so we only check # that the candidate group ends with the formatted number group. return (candidate_number_group_index >= 0 and candidate_groups[candidate_number_group_index].endswith(formatted_number_groups[0]))
177,074
Attempts to find the next subsequence in the searched sequence on or after index that represents a phone number. Returns the next match, None if none was found. Arguments: index -- The search index to start searching at. Returns the phone number match found, None if none can be found.
def _find(self, index): match = _PATTERN.search(self.text, index) while self._max_tries > 0 and match is not None: start = match.start() candidate = self.text[start:match.end()] # Check for extra numbers at the end. # TODO: This is the place to start when trying to support # extraction of multiple phone number from split notations (+41 79 # 123 45 67 / 68). candidate = self._trim_after_first_match(_SECOND_NUMBER_START_PATTERN, candidate) match = self._extract_match(candidate, start) if match is not None: return match # Move along index = start + len(candidate) self._max_tries -= 1 match = _PATTERN.search(self.text, index) return None
177,081
Attempts to extract a match from a candidate string. Arguments: candidate -- The candidate text that might contain a phone number. offset -- The offset of candidate within self.text Returns the match found, None if none can be found
def _extract_match(self, candidate, offset): # Skip a match that is more likely a publication page reference or a # date. if (_SLASH_SEPARATED_DATES.search(candidate)): return None # Skip potential time-stamps. if _TIME_STAMPS.search(candidate): following_text = self.text[offset + len(candidate):] if _TIME_STAMPS_SUFFIX.match(following_text): return None # Try to come up with a valid match given the entire candidate. match = self._parse_and_verify(candidate, offset) if match is not None: return match # If that failed, try to find an "inner match" -- there might be a # phone number within this candidate. return self._extract_inner_match(candidate, offset)
177,085
Attempts to extract a match from candidate if the whole candidate does not qualify as a match. Arguments: candidate -- The candidate text that might contain a phone number offset -- The current offset of candidate within text Returns the match found, None if none can be found
def _extract_inner_match(self, candidate, offset): for possible_inner_match in _INNER_MATCHES: group_match = possible_inner_match.search(candidate) is_first_match = True while group_match and self._max_tries > 0: if is_first_match: # We should handle any group before this one too. group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, candidate[:group_match.start()]) match = self._parse_and_verify(group, offset) if match is not None: return match self._max_tries -= 1 is_first_match = False group = self._trim_after_first_match(_UNWANTED_END_CHAR_PATTERN, group_match.group(1)) match = self._parse_and_verify(group, offset + group_match.start(1)) if match is not None: return match self._max_tries -= 1 group_match = possible_inner_match.search(candidate, group_match.start() + 1) return None
177,086
Parses a phone number from the candidate using phonenumberutil.parse and verifies it matches the requested leniency. If parsing and verification succeed, a corresponding PhoneNumberMatch is returned, otherwise this method returns None. Arguments: candidate -- The candidate match. offset -- The offset of candidate within self.text. Returns the parsed and validated phone number match, or None.
def _parse_and_verify(self, candidate, offset): try: # Check the candidate doesn't contain any formatting which would # indicate that it really isn't a phone number. if (not fullmatch(_MATCHING_BRACKETS, candidate) or _PUB_PAGES.search(candidate)): return None # If leniency is set to VALID or stricter, we also want to skip # numbers that are surrounded by Latin alphabetic characters, to # skip cases like abc8005001234 or 8005001234def. if self.leniency >= Leniency.VALID: # If the candidate is not at the start of the text, and does # not start with phone-number punctuation, check the previous # character if (offset > 0 and not _LEAD_PATTERN.match(candidate)): previous_char = self.text[offset - 1] # We return None if it is a latin letter or an invalid # punctuation symbol if (self._is_invalid_punctuation_symbol(previous_char) or self._is_latin_letter(previous_char)): return None last_char_index = offset + len(candidate) if last_char_index < len(self.text): next_char = self.text[last_char_index] if (self._is_invalid_punctuation_symbol(next_char) or self._is_latin_letter(next_char)): return None numobj = parse(candidate, self.preferred_region, keep_raw_input=True) if _verify(self.leniency, numobj, candidate, self): # We used parse(keep_raw_input=True) to create this number, # but for now we don't return the extra values parsed. # TODO: stop clearing all values here and switch all users # over to using raw_input rather than the raw_string of # PhoneNumberMatch. numobj.country_code_source = CountryCodeSource.UNSPECIFIED numobj.raw_input = None numobj.preferred_domestic_carrier_code = None return PhoneNumberMatch(offset, candidate, numobj) except NumberParseException: # ignore and continue pass return None
177,087
Check whether a short number is a possible number when dialled from a region. This provides a more lenient check than is_valid_short_number_for_region. Arguments: short_numobj -- the short number to check as a PhoneNumber object. region_dialing_from -- the region from which the number is dialed Return whether the number is a possible short number.
def is_possible_short_number_for_region(short_numobj, region_dialing_from): if not _region_dialing_from_matches_number(short_numobj, region_dialing_from): return False metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: # pragma no cover return False short_numlen = len(national_significant_number(short_numobj)) return (short_numlen in metadata.general_desc.possible_length)
177,093
Check whether a short number is a possible number. If a country calling code is shared by multiple regions, this returns True if it's possible in any of them. This provides a more lenient check than is_valid_short_number. Arguments: numobj -- the short number to check Return whether the number is a possible short number.
def is_possible_short_number(numobj): region_codes = region_codes_for_country_code(numobj.country_code) short_number_len = len(national_significant_number(numobj)) for region in region_codes: metadata = PhoneMetadata.short_metadata_for_region(region) if metadata is None: continue if short_number_len in metadata.general_desc.possible_length: return True return False
177,094
Tests whether a short number matches a valid pattern in a region. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. Arguments: short_numobj -- the short number to check as a PhoneNumber object. region_dialing_from -- the region from which the number is dialed Return whether the short number matches a valid pattern
def is_valid_short_number_for_region(short_numobj, region_dialing_from): if not _region_dialing_from_matches_number(short_numobj, region_dialing_from): return False metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: # pragma no cover return False short_number = national_significant_number(short_numobj) general_desc = metadata.general_desc if not _matches_possible_number_and_national_number(short_number, general_desc): return False short_number_desc = metadata.short_code if short_number_desc.national_number_pattern is None: # pragma no cover return False return _matches_possible_number_and_national_number(short_number, short_number_desc)
177,095
Tests whether a short number matches a valid pattern. If a country calling code is shared by multiple regions, this returns True if it's valid in any of them. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. See is_valid_short_number_for_region for details. Arguments: numobj - the short number for which we want to test the validity Return whether the short number matches a valid pattern
def is_valid_short_number(numobj): region_codes = region_codes_for_country_code(numobj.country_code) region_code = _region_code_for_short_number_from_region_list(numobj, region_codes) if len(region_codes) > 1 and region_code is not None: # If a matching region had been found for the phone number from among two or more regions, # then we have already implicitly verified its validity for that region. return True return is_valid_short_number_for_region(numobj, region_code)
177,096
Gets a valid short number for the specified region. Arguments: region_code -- the region for which an example short number is needed. Returns a valid short number for the specified region. Returns an empty string when the metadata does not contain such information.
def _example_short_number(region_code): metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = metadata.short_code if desc.example_number is not None: return desc.example_number return U_EMPTY_STRING
177,100
Gets a valid short number for the specified cost category. Arguments: region_code -- the region for which an example short number is needed. cost -- the cost category of number that is needed. Returns a valid short number for the specified region and cost category. Returns an empty string when the metadata does not contain such information, or the cost is UNKNOWN_COST.
def _example_short_number_for_cost(region_code, cost): metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = None if cost == ShortNumberCost.TOLL_FREE: desc = metadata.toll_free elif cost == ShortNumberCost.STANDARD_RATE: desc = metadata.standard_rate elif cost == ShortNumberCost.PREMIUM_RATE: desc = metadata.premium_rate else: # ShortNumberCost.UNKNOWN_COST numbers are computed by the process of # elimination from the other cost categoried. pass if desc is not None and desc.example_number is not None: return desc.example_number return U_EMPTY_STRING
177,101
Calculates refund transactions based on line items and shipping. When you want to create a refund, you should first use the calculate endpoint to generate accurate refund transactions. Args: order_id: Order ID for which the Refund has to created. shipping: Specify how much shipping to refund. refund_line_items: A list of line item IDs and quantities to refund. Returns: Unsaved refund record
def calculate(cls, order_id, shipping=None, refund_line_items=None): data = {} if shipping: data['shipping'] = shipping data['refund_line_items'] = refund_line_items or [] body = {'refund': data} resource = cls.post( "calculate", order_id=order_id, body=json.dumps(body).encode() ) return cls( cls.format.decode(resource.body), prefix_options={'order_id': order_id} )
177,145
Retrieves project name for given project id Args: projects: List of projects project_id: project id Returns: Project name or None if there is no match
def get_project_name(project_id, projects): for project in projects: if project_id == project.id: return project.name
179,908
Create an instance of the AuthenticatedClient class. Args: key (str): Your API key. b64secret (str): The secret key matching your API key. passphrase (str): Passphrase chosen when setting up key. api_url (Optional[str]): API URL. Defaults to cbpro API.
def __init__(self, key, b64secret, passphrase, api_url="https://api.pro.coinbase.com"): super(AuthenticatedClient, self).__init__(api_url) self.auth = CBProAuth(key, b64secret, passphrase) self.session = requests.Session()
180,219
Repay funding. Repays the older funding records first. Args: amount (int): Amount of currency to repay currency (str): The currency, example USD Returns: Not specified by cbpro.
def repay_funding(self, amount, currency): params = { 'amount': amount, 'currency': currency # example: USD } return self._send_message('post', '/funding/repay', data=json.dumps(params))
180,231
Close position. Args: repay_only (bool): Undocumented by cbpro. Returns: Undocumented
def close_position(self, repay_only): params = {'repay_only': repay_only} return self._send_message('post', '/position/close', data=json.dumps(params))
180,233
Withdraw funds to a crypto address. Args: amount (Decimal): The amount to withdraw currency (str): The type of currency (eg. 'BTC') crypto_address (str): Crypto address to withdraw to. Returns: dict: Withdraw details. Example:: { "id":"593533d2-ff31-46e0-b22e-ca754147a96a", "amount":"10.00", "currency": "BTC", }
def crypto_withdraw(self, amount, currency, crypto_address): params = {'amount': amount, 'currency': currency, 'crypto_address': crypto_address} return self._send_message('post', '/withdrawals/crypto', data=json.dumps(params))
180,236
Create cbpro API public client. Args: api_url (Optional[str]): API URL. Defaults to cbpro API.
def __init__(self, api_url='https://api.pro.coinbase.com', timeout=30): self.url = api_url.rstrip('/') self.auth = None self.session = requests.Session()
180,247
Send API request. Args: method (str): HTTP method (get, post, delete, etc.) endpoint (str): Endpoint (to be added to base URL) params (Optional[dict]): HTTP request parameters data (Optional[str]): JSON-encoded string payload for POST Returns: dict/list: JSON response
def _send_message(self, method, endpoint, params=None, data=None): url = self.url + endpoint r = self.session.request(method, url, params=params, data=data, auth=self.auth, timeout=30) return r.json()
180,251
Change the bytecode to use the given library address. Args: hex_code (bin): The bytecode encoded in hexadecimal. library_name (str): The library that will be resolved. library_address (str): The address of the library. Returns: bin: The bytecode encoded in hexadecimal with the library references resolved.
def solidity_resolve_address(hex_code, library_symbol, library_address): if library_address.startswith('0x'): raise ValueError('Address should not contain the 0x prefix') try: decode_hex(library_address) except TypeError: raise ValueError( 'library_address contains invalid characters, it must be hex encoded.') if len(library_symbol) != 40 or len(library_address) != 40: raise ValueError('Address with wrong length') return hex_code.replace(library_symbol, library_address)
181,196