docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Convert JSON in the body of the request to the parameters for the wrapped function. If the JSON is list, add it to ``*args``. If dict, add it to ``**kwargs`` in non-rewrite mode (no key in ``**kwargs`` will be overwritten). If single value, add it to ``*args``. Args: return_json (bool, default True): Should the decorator automatically convert returned value to JSON?
def json_to_params(fn=None, return_json=True): def json_to_params_decorator(fn): @handle_type_error @wraps(fn) def json_to_params_wrapper(*args, **kwargs): data = decode_json_body() if type(data) in [tuple, list]: args = list(args) + data elif type(data) == dict: # transport only items that are not already in kwargs allowed_keys = set(data.keys()) - set(kwargs.keys()) for key in allowed_keys: kwargs[key] = data[key] elif type(data) in PRIMITIVE_TYPES: args = list(args) args.append(data) if not return_json: return fn(*args, **kwargs) return encode_json_body( fn(*args, **kwargs) ) return json_to_params_wrapper if fn: # python decorator with optional parameters bukkake return json_to_params_decorator(fn) return json_to_params_decorator
864,343
Decode JSON from the request and add it as ``data`` parameter for wrapped function. Args: return_json (bool, default True): Should the decorator automatically convert returned value to JSON?
def json_to_data(fn=None, return_json=True): def json_to_data_decorator(fn): @handle_type_error @wraps(fn) def get_data_wrapper(*args, **kwargs): kwargs["data"] = decode_json_body() if not return_json: return fn(*args, **kwargs) return encode_json_body( fn(*args, **kwargs) ) return get_data_wrapper if fn: # python decorator with optional parameters bukkake return json_to_data_decorator(fn) return json_to_data_decorator
864,344
Convert bottle forms request to parameters for the wrapped function. Args: return_json (bool, default True): Should the decorator automatically convert returned value to JSON?
def form_to_params(fn=None, return_json=True): def forms_to_params_decorator(fn): @handle_type_error @wraps(fn) def forms_to_params_wrapper(*args, **kwargs): kwargs.update( dict(request.forms) ) if not return_json: return fn(*args, **kwargs) return encode_json_body( fn(*args, **kwargs) ) return forms_to_params_wrapper if fn: # python decorator with optional parameters bukkake return forms_to_params_decorator(fn) return forms_to_params_decorator
864,345
Get a GCEEnricher client. A factory function that validates configuration and returns an enricher client (:interface:`gordon.interfaces.IMessageHandler`) provider. Args: config (dict): Google Compute Engine API related configuration. metrics (obj): :interface:`IMetricRelay` implementation. kwargs (dict): Additional keyword arguments to pass to the enricher. Returns: A :class:`GCEEnricher` instance.
def get_enricher(config, metrics, **kwargs): builder = enricher.GCEEnricherBuilder( config, metrics, **kwargs) return builder.build_enricher()
864,439
Get a GDNSPublisher client. A factory function that validates configuration and returns a publisher client (:interface:`gordon.interfaces.IMessageHandler`) provider. Args: config (dict): Google Cloud DNS API related configuration. metrics (obj): :interface:`IMetricRelay` implementation. kwargs (dict): Additional keyword arguments to pass to the publisher. Returns: A :class:`GDNSPublisher` instance.
def get_gdns_publisher(config, metrics, **kwargs): builder = gdns_publisher.GDNSPublisherBuilder( config, metrics, **kwargs) return builder.build_publisher()
864,440
Get all active projects. You can find the endpoint documentation `here <https://cloud. google.com/resource-manager/reference/rest/v1/projects/list>`__. Args: page_size (int): hint for the client to only retrieve up to this number of results per API call. Returns: list(dicts): all active projects
async def list_all_active_projects(self, page_size=1000): url = f'{self.BASE_URL}/{self.api_version}/projects' params = {'pageSize': page_size} responses = await self.list_all(url, params) projects = self._parse_rsps_for_projects(responses) return [ project for project in projects if project.get('lifecycleState', '').lower() == 'active' ]
864,597
r""" Args: other (?): CommandLine: python -m sortedcontainers.sortedlist join2 Example: >>> from utool.experimental.dynamic_connectivity import * # NOQA >>> self = EulerTourList([1, 2, 3, 2, 4, 2, 1], load=3) >>> other = EulerTourList([0, 5, 9, 5, 0], load=3) >>> result = self.join(other) >>> print(result)
def join(self, other): r assert self._load == other._load, 'loads must be the same' self._lists.extend(other._lists) self._cumlen.extend([c + self._len for c in other._cumlen]) self._len += other._len
865,046
r""" Args: field_list (list): list of either a tuples to denote a keyword, or a strings for relacement t3ext Returns: str: repl for regex Example: >>> # ENABLE_DOCTEST >>> from utool.util_regex import * # NOQA >>> field_list = [('key',), 'unspecial string'] >>> repl = named_field_repl(field_list) >>> result = repl >>> print(result) \g<key>unspecial string
def named_field_repl(field_list): r # Allow for unnamed patterns bref_field_list = [ backref_field(key[0]) if isinstance(key, tuple) else key for key in field_list ] repl = ''.join(bref_field_list) return repl
865,092
r""" regex_parse Args: regex (str): text (str): fromstart (bool): Returns: dict or None: Example: >>> # DISABLE_DOCTEST >>> from utool.util_regex import * # NOQA >>> regex = r'(?P<string>\'[^\']*\')' >>> text = " 'just' 'a' sentance with 'strings' in it " >>> fromstart = False >>> result = regex_parse(regex, text, fromstart)['string'] >>> print(result)
def regex_parse(regex, text, fromstart=True): r match = regex_get_match(regex, text, fromstart=fromstart) if match is not None: parse_dict = match.groupdict() return parse_dict return None
865,099
r""" Prompts user to accept or checks command line for -y Args: msg (str): Returns: bool: accept or not
def are_you_sure(msg=''): r print(msg) from utool import util_arg from utool import util_str override = util_arg.get_argflag(('--yes', '--y', '-y')) if override: print('accepting based on command line flag') return True valid_ans = ['yes', 'y'] valid_prompt = util_str.conj_phrase(valid_ans, 'or') ans = input('Are you sure?\n Enter %s to accept\n' % valid_prompt) return ans.lower() in valid_ans
865,157
r""" Args: fpath (str): file path string CommandLine: python -m utool.util_dev --test-autopep8_diff --fpath ingest_data.py Example: >>> # DISABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> fpath = ut.get_argval('--fpath', type_=str, default='ingest_data.py') >>> result = autopep8_diff(fpath) >>> print(result)
def autopep8_diff(fpath): r import utool as ut args = ('autopep8', fpath, '--diff') res = ut.cmd(args, verbose=False) out, err, ret = res ut.print_difftext(out)
865,181
r""" Reads text from a file. Automatically returns utf8. Args: fpath (str): file path aslines (bool): if True returns list of lines verbose (bool): verbosity flag Returns: str: text from fpath (this is unicode) Ignore: x = b'''/whaleshark_003_fors\xc3\xb8g.wmv" />\r\n''' ut.writeto('foo.txt', x) y = ut.readfrom('foo.txt') y.encode('utf8') == x
def read_from(fpath, verbose=None, aslines=False, strict=True, n=None, errors='replace'): r if n is None: n = __READ_TAIL_N__ verbose = _rectify_verb_read(verbose) if verbose: print('[util_io] * Reading text file: %r ' % util_path.tail(fpath, n=n)) try: if not util_path.checkpath(fpath, verbose=verbose, n=n): raise IOError('[io] * FILE DOES NOT EXIST!') #with open(fpath, 'r') as file_: with open(fpath, 'rb') as file_: if aslines: #text = file_.readlines() if six.PY2: # python2 writes in bytes, so read as bytes then convert to # utf8 text = [line.decode('utf8', errors=errors) for line in file_.readlines()] else: text = [line.decode('utf8', errors=errors) for line in file_.readlines()] #text = file_.readlines() else: # text = file_.read() if six.PY2: text = file_.read().decode('utf8', errors=errors) else: #text = file_.read() text = file_.read().decode('utf8', errors=errors) return text except IOError as ex: from utool import util_dbg if verbose or strict: util_dbg.printex(ex, ' * Error reading fpath=%r' % util_path.tail(fpath, n=n), '[io]') if strict: raise
865,341
Finds a local varable somewhere in the stack and returns the value Args: varname (str): variable name Returns: None if varname is not found else its value
def search_stack_for_localvar(varname): curr_frame = inspect.currentframe() print(' * Searching parent frames for: ' + six.text_type(varname)) frame_no = 0 while curr_frame.f_back is not None: if varname in curr_frame.f_locals.keys(): print(' * Found in frame: ' + six.text_type(frame_no)) return curr_frame.f_locals[varname] frame_no += 1 curr_frame = curr_frame.f_back print('... Found nothing in all ' + six.text_type(frame_no) + ' frames.') return None
865,456
Finds a varable (local or global) somewhere in the stack and returns the value Args: varname (str): variable name Returns: None if varname is not found else its value
def search_stack_for_var(varname, verbose=util_arg.NOT_QUIET): curr_frame = inspect.currentframe() if verbose: print(' * Searching parent frames for: ' + six.text_type(varname)) frame_no = 0 while curr_frame.f_back is not None: if varname in curr_frame.f_locals.keys(): if verbose: print(' * Found local in frame: ' + six.text_type(frame_no)) return curr_frame.f_locals[varname] if varname in curr_frame.f_globals.keys(): if verbose: print(' * Found global in frame: ' + six.text_type(frame_no)) return curr_frame.f_globals[varname] frame_no += 1 curr_frame = curr_frame.f_back if verbose: print('... Found nothing in all ' + six.text_type(frame_no) + ' frames.') return None
865,457
Finds the string name which has where locals_[name] is val Check the varname is in the parent namespace This will only work with objects not primatives Args: val (): some value locals_ (dict): local dictionary to search default (str): strict (bool): Returns: str: the varname which is Val (if it exists)
def get_varname_from_locals(val, locals_, default='varname-not-found', strict=False, cmpfunc_=operator.is_): if val is None or isinstance(val, (int, float, bool)): # Cannot work on primative types return default try: for count, val_ in enumerate(six.itervalues(locals_)): if cmpfunc_(val, val_): index_ = count varname = six.text_type(list(locals_.keys())[index_]) except NameError: varname = default if strict: raise return varname
865,474
r""" Args: list_ (list): seed (int): Returns: list: list_ CommandLine: python -m utool.util_numpy --test-deterministic_shuffle Example: >>> # ENABLE_DOCTEST >>> from utool.util_numpy import * # NOQA >>> list_ = [1, 2, 3, 4, 5, 6] >>> seed = 1 >>> list_ = deterministic_shuffle(list_, seed) >>> result = str(list_) >>> print(result) [3, 2, 5, 1, 4, 6]
def deterministic_shuffle(list_, seed=0, rng=None): r rng = ensure_rng(seed if rng is None else rng) rng.shuffle(list_) return list_
865,591
If jedi-vim supports google style docstrings you should be able to autocomplete ColumnLists methods for `data` Args: data (utool.ColumnLists): a column list objct ibs (ibeis.IBEISController): an object
def _insource_jedi_vim_test(data, ibs): # TESTME: type a dot and tab. Hopefully autocomplete will happen. data ibs import utool as ut xdata = ut.ColumnLists() xdata import ibeis xibs = ibeis.IBEISController() xibs
865,599
r""" Args: str_ (str): Returns: float: timedelta CommandLine: python -m utool.util_time --exec-parse_timedelta_str Example: >>> # ENABLE_DOCTEST >>> from utool.util_time import * # NOQA >>> str_ = '24h' >>> timedelta = parse_timedelta_str(str_) >>> result = ('timedelta = %s' % (str(timedelta),)) >>> print(result) timedelta = 86400.0
def parse_timedelta_str(str_): r if str_.endswith('m'): timedelta = float(str_[0:-1]) * 60 elif str_.endswith('h'): timedelta = float(str_[0:-1]) * 60 * 60 elif str_.endswith('s'): timedelta = float(str_[0:-1]) else: raise NotImplementedError('Unknown timedelta format %r' % (str_)) return timedelta
865,909
r""" Ensures that directory will exist. creates new dir with sticky bits by default Args: path (str): dpath to ensure. Can also be a tuple to send to join info (bool): if True prints extra information mode (int): octal mode of directory (default 0o1777) Returns: str: path - the ensured directory
def ensuredir(path_, verbose=None, info=False, mode=0o1777): r if verbose is None: verbose = VERYVERBOSE if isinstance(path_, (list, tuple)): path_ = join(*path_) if HAVE_PATHLIB and isinstance(path_, pathlib.Path): path_ = str(path_) if not checkpath(path_, verbose=verbose, info=info): if verbose: print('[util_path] mkdir(%r)' % path_) try: os.makedirs(normpath(path_), mode=mode) except OSError as ex: util_dbg.printex( ex, 'check that the longest existing path ' 'is not a bad windows symlink.', keys=['path_']) raise return path_
866,061
r""" Creates file if it doesnt exist Args: fpath (str): file path times (None): verbose (bool): Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> fpath = '?' >>> times = None >>> verbose = True >>> result = touch(fpath, times, verbose) >>> print(result) References: http://stackoverflow.com/questions/1158076/implement-touch-using-python
def touch(fpath, times=None, verbose=True): r try: if verbose: print('[util_path] touching %r' % fpath) with open(fpath, 'a'): os.utime(fpath, times) except Exception as ex: import utool utool.printex(ex, 'touch %s' % fpath) raise return fpath
866,062
r""" Args: src (str): file or directory to copy dst (str): directory or new file to copy to Copies src file or folder to dst. If src is a folder this copy is recursive.
def copy_single(src, dst, overwrite=True, verbose=True, deeplink=True, dryrun=False): r try: if exists(src): if not isdir(src) and isdir(dst): # copying file to directory dst = join(dst, basename(src)) if exists(dst): if overwrite: prefix = 'C+O' if verbose: print('[util_path] [Copying + Overwrite]:') else: prefix = 'Skip' if verbose: print('[%s] ->%s' % (prefix, dst)) return else: prefix = 'C' if verbose: if dryrun: print('[util_path] [DryRun]: ') else: print('[util_path] [Copying]: ') if verbose: print('[%s] | %s' % (prefix, src)) print('[%s] ->%s' % (prefix, dst)) if not dryrun: if not deeplink and islink(src): linkto = os.readlink(src) symlink(linkto, dst) elif isdir(src): print('isdir') shutil.copytree(src, dst) else: shutil.copy2(src, dst) else: prefix = 'Miss' if verbose: print('[util_path] [Cannot Copy]: ') print('[%s] src=%s does not exist!' % (prefix, src)) print('[%s] dst=%s' % (prefix, dst)) except Exception as ex: from utool import util_dbg util_dbg.printex(ex, 'Error copying single', keys=['src', 'dst']) raise
866,066
r""" ensure_crossplat_path Args: path (str): Returns: str: crossplat_path Example(DOCTEST): >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = r'C:\somedir' >>> cplat_path = ensure_crossplat_path(path) >>> result = cplat_path >>> print(result) C:/somedir
def ensure_crossplat_path(path, winroot='C:'): r cplat_path = path.replace('\\', '/') if cplat_path == winroot: cplat_path += '/' return cplat_path
866,079
returns importable name from file path get_modname_from_modpath Args: module_fpath (str): module filepath Returns: str: modname Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> module_fpath = ut.util_path.__file__ >>> modname = ut.get_modname_from_modpath(module_fpath) >>> result = modname >>> print(result) utool.util_path
def get_modname_from_modpath(module_fpath): modsubdir_list = get_module_subdir_list(module_fpath) modname = '.'.join(modsubdir_list) modname = modname.replace('.__init__', '').strip() modname = modname.replace('.__main__', '').strip() return modname
866,082
get_module_subdir_list Args: module_fpath (str): Example: >>> # ENABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> import utool as ut >>> module_fpath = ut.util_path.__file__ >>> modsubdir_list = get_module_subdir_list(module_fpath) >>> result = modsubdir_list >>> print(result) ['utool', 'util_path']
def get_module_subdir_list(module_fpath): module_fpath = truepath(module_fpath) dpath, fname_ext = split(module_fpath) fname, ext = splitext(fname_ext) full_dpath = dpath dpath = full_dpath _modsubdir_list = [fname] while is_module_dir(dpath): dpath, dname = split(dpath) _modsubdir_list.append(dname) modsubdir_list = _modsubdir_list[::-1] return modsubdir_list
866,083
Python implementation of sed. NOT FINISHED searches and replaces text in files Args: regexpr (str): regx patterns to find repl (str): text to replace force (bool): recursive (bool): dpath_list (list): directories to search (defaults to cwd)
def sed(regexpr, repl, force=False, recursive=False, dpath_list=None, fpath_list=None, verbose=None, include_patterns=None, exclude_patterns=[]): #_grep(r, [repl], dpath_list=dpath_list, recursive=recursive) if include_patterns is None: include_patterns = ['*.py', '*.pyx', '*.pxi', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.html', '*.tex'] if dpath_list is None: dpath_list = [os.getcwd()] if verbose is None: verbose = ut.NOT_QUIET if fpath_list is None: greater_exclude_dirs = get_standard_exclude_dnames() exclude_dirs = [] fpath_generator = matching_fpaths( dpath_list, include_patterns, exclude_dirs, greater_exclude_dirs=greater_exclude_dirs, recursive=recursive, exclude_patterns=exclude_patterns) else: fpath_generator = fpath_list if verbose: print('sed-ing %r' % (dpath_list,)) print(' * regular expression : %r' % (regexpr,)) print(' * replacement : %r' % (repl,)) print(' * include_patterns : %r' % (include_patterns,)) print(' * recursive: %r' % (recursive,)) print(' * force: %r' % (force,)) from utool import util_str print(' * fpath_list: %s' % (util_str.repr3(fpath_list),)) regexpr = extend_regex(regexpr) #if '\x08' in regexpr: # print('Remember \\x08 != \\b') # print('subsituting for you for you') # regexpr = regexpr.replace('\x08', '\\b') # print(' * regular expression : %r' % (regexpr,)) # Walk through each directory recursively num_changed = 0 num_files_checked = 0 fpaths_changed = [] for fpath in fpath_generator: num_files_checked += 1 changed_lines = sedfile(fpath, regexpr, repl, force, verbose=verbose) if changed_lines is not None: fpaths_changed.append(fpath) num_changed += len(changed_lines) import utool as ut print('num_files_checked = %r' % (num_files_checked,)) print('fpaths_changed = %s' % (ut.repr3(sorted(fpaths_changed)),)) print('total lines changed = %r' % (num_changed,))
866,091
r""" replaces windows drives with mingw style drives Args: win32_path (str): CommandLine: python -m utool.util_path --test-ensure_mingw_drive Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> win32_path = r'C:/Program Files/Foobar' >>> result = ensure_mingw_drive(win32_path) >>> print(result) /c/Program Files/Foobar
def ensure_mingw_drive(win32_path): r win32_drive, _path = splitdrive(win32_path) mingw_drive = '/' + win32_drive[:-1].lower() mingw_path = mingw_drive + _path return mingw_path
866,103
r""" Args: short (bool): (default = False) Returns: str: Example: >>> # ENABLE_DOCTEST >>> from utool.util_cplat import * # NOQA >>> short = False >>> result = python_executable(short) >>> print(result)
def python_executable(check=True, short=False): r if not check: python_exe = 'python' else: from os.path import isdir python_exe_long = unixpath(sys.executable) python_exe = python_exe_long if short: python_exe_short = basename(python_exe_long) found = search_env_paths(python_exe_short, key_list=['PATH'], verbose=False) found = [f for f in found if not isdir(f)] if len(found) > 0: if found[0] == python_exe_long: # Safe to use the short name in this env python_exe = python_exe_short return python_exe
866,158
Trying to clean up cmd Args: command (str): string command shell (bool): if True, process is run in shell detatch (bool): if True, process is run in background verbose (int): verbosity mode verbout (bool): if True, `command` writes to stdout in realtime. defaults to True iff verbose > 0 Returns: dict: info - information about command status
def cmd2(command, shell=False, detatch=False, verbose=False, verbout=None): import shlex if isinstance(command, (list, tuple)): raise ValueError('command tuple not supported yet') args = shlex.split(command, posix=not WIN32) if verbose is True: verbose = 2 if verbout is None: verbout = verbose >= 1 if verbose >= 2: print('+=== START CMD2 ===') print('Command:') print(command) if verbout: print('----') print('Stdout:') proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell, universal_newlines=True) if detatch: info = {'proc': proc} else: write_fn = sys.stdout.write flush_fn = sys.stdout.flush logged_out = [] for line in _run_process(proc): #line_ = line if six.PY2 else line.decode('utf-8') line_ = line if six.PY2 else line if len(line_) > 0: if verbout: write_fn(line_) flush_fn() logged_out.append(line) try: from utool import util_str # NOQA # out = '\n'.join(logged_out) out = ''.join(logged_out) except UnicodeDecodeError: from utool import util_str # NOQA logged_out = util_str.ensure_unicode_strlist(logged_out) # out = '\n'.join(logged_out) out = ''.join(logged_out) # print('logged_out = %r' % (logged_out,)) # raise (out_, err) = proc.communicate() ret = proc.wait() info = { 'out': out, 'err': err, 'ret': ret, } if verbose >= 2: print('L___ END CMD2 ___') return info
866,174
Create Heroku Connect schema. Note: This function is only meant to be used for local development. In a production environment the schema will be created by Heroku Connect. Args: using (str): Alias for database connection. Returns: bool: ``True`` if the schema was created, ``False`` if the schema already exists.
def create_heroku_connect_schema(using=DEFAULT_DB_ALIAS): connection = connections[using] with connection.cursor() as cursor: cursor.execute(_SCHEMA_EXISTS_QUERY, [settings.HEROKU_CONNECT_SCHEMA]) schema_exists = cursor.fetchone()[0] if schema_exists: return False cursor.execute("CREATE SCHEMA %s;", [AsIs(settings.HEROKU_CONNECT_SCHEMA)]) with connection.schema_editor() as editor: for model in get_heroku_connect_models(): editor.create_model(model) # Needs PostgreSQL and database superuser privileges (which is the case on Heroku): editor.execute('CREATE EXTENSION IF NOT EXISTS "hstore";') from heroku_connect.models import (TriggerLog, TriggerLogArchive) for cls in [TriggerLog, TriggerLogArchive]: editor.create_model(cls) return True
866,219
Import Heroku Connection mapping for given connection. Args: connection_id (str): Heroku Connection connection ID. mapping (dict): Heroku Connect mapping. Raises: requests.HTTPError: If an error occurs uploading the mapping. ValueError: If the mapping is not JSON serializable.
def import_mapping(connection_id, mapping): url = os.path.join(settings.HEROKU_CONNECT_API_ENDPOINT, 'connections', connection_id, 'actions', 'import') response = requests.post( url=url, json=mapping, headers=_get_authorization_headers() ) response.raise_for_status()
866,222
SHOULD TURN ANY REGISTERED ARGS INTO A A NEW PARSING CONFIG FILE FOR BETTER --help COMMANDS import utool as ut __REGISTERED_ARGS__ = ut.util_arg.__REGISTERED_ARGS__ Args: extra_args (list): (default = []) CommandLine: python -m utool.util_arg --test-autogen_argparse_block Example: >>> # DISABLE_DOCTEST >>> import utool as ut >>> extra_args = [] >>> result = ut.autogen_argparse_block(extra_args) >>> print(result)
def autogen_argparse_block(extra_args=[]): #import utool as ut # NOQA #__REGISTERED_ARGS__ # TODO FINISHME grouped_args = [] # Group similar a args for argtup in __REGISTERED_ARGS__: argstr_list, type_, default, help_ = argtup argstr_set = set(argstr_list) # <MULTIKEY_SETATTR> # hack in multikey setattr n**2 yuck found = False for index, (keyset, vals) in enumerate(grouped_args): if len(keyset.intersection(argstr_set)) > 0: # update keyset.update(argstr_set) vals.append(argtup) found = True break if not found: new_keyset = argstr_set new_vals = [argtup] grouped_args.append((new_keyset, new_vals)) # </MULTIKEY_SETATTR> # DEBUG multi_groups = [] for keyset, vals in grouped_args: if len(vals) > 1: multi_groups.append(vals) if len(multi_groups) > 0: import utool as ut print('Following arg was specified multiple times') print(ut.repr4(multi_groups, newlines=2))
866,364
Plot the distribution of a real-valued feature conditioned by the target. Examples: `plot_real_feature(X, 'emb_mean_euclidean')` Args: df: Pandas dataframe containing the target column (named 'target'). feature_name: The name of the feature to plot. bins: The number of histogram bins for the distribution plot. figsize: The size of the plotted figure.
def plot_real_feature(df, feature_name, bins=50, figsize=(15, 15)): ix_negative_target = df[df.target == 0].index ix_positive_target = df[df.target == 1].index plt.figure(figsize=figsize) ax_overall_dist = plt.subplot2grid((3, 2), (0, 0), colspan=2) ax_target_conditional_dist = plt.subplot2grid((3, 2), (1, 0), colspan=2) ax_botplot = plt.subplot2grid((3, 2), (2, 0)) ax_violin_plot = plt.subplot2grid((3, 2), (2, 1)) ax_overall_dist.set_title('Distribution of {}'.format(feature_name), fontsize=16) sns.distplot( df[feature_name], bins=50, ax=ax_overall_dist ) sns.distplot( df.loc[ix_positive_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Positive Target' ) sns.distplot( df.loc[ix_negative_target][feature_name], bins=bins, ax=ax_target_conditional_dist, label='Negative Target' ) ax_target_conditional_dist.legend(loc='upper right', prop={'size': 14}) sns.boxplot( y=feature_name, x='target', data=df, ax=ax_botplot ) sns.violinplot( y=feature_name, x='target', data=df, ax=ax_violin_plot ) plt.show()
866,398
Plot a scatterplot of two features against one another, and calculate Pearson correlation coefficient. Examples: `plot_pair(X, 'emb_mean_euclidean', 'emb_mean_cosine')` Args: df: feature_name_1: The name of the first feature. feature_name_2: The name of the second feature. kind: One of the values { 'scatter' | 'reg' | 'resid' | 'kde' | 'hex' }. alpha: Alpha channel value. **kwargs: Additional argument to 'sns.jointplot'.
def plot_pair(df, feature_name_1, feature_name_2, kind='scatter', alpha=0.01, **kwargs): plt.figure() sns.jointplot( feature_name_1, feature_name_2, df, alpha=alpha, kind=kind, **kwargs ) plt.show()
866,399
Plot a correlation heatmap between every feature pair. Args: df: Pandas dataframe containing the target column (named 'target'). features: The list of features to include in the correlation plot. font_size: Font size for heatmap cells and axis labels. figsize: The size of the plot. save_filename: (Optional) The path of the file to save a high-res version of the plot to.
def plot_feature_correlation_heatmap(df, features, font_size=9, figsize=(15, 15), save_filename=None): features = features[:] features += ['target'] mcorr = df[features].corr() mask = np.zeros_like(mcorr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True cmap = sns.diverging_palette(220, 10, as_cmap=True) fig = plt.figure(figsize=figsize) heatmap = sns.heatmap( mcorr, mask=mask, cmap=cmap, square=True, annot=True, fmt='0.2f', annot_kws={'size': font_size}, ) heatmap.tick_params(axis='both', which='major', labelsize=font_size) heatmap.tick_params(axis='both', which='minor', labelsize=font_size) heatmap.set_xticklabels(features, rotation=90) heatmap.set_yticklabels(reversed(features)) plt.show() if save_filename is not None: fig.savefig(save_filename, dpi=300)
866,400
Plot a scatterplot matrix for a list of features, colored by target value. Example: `scatterplot_matrix(X, X.columns.tolist(), downsample_frac=0.01)` Args: df: Pandas dataframe containing the target column (named 'target'). features: The list of features to include in the correlation plot. downsample_frac: Dataframe downsampling rate (0.1 to include 10% of the dataset). figsize: The size of the plot.
def scatterplot_matrix(df, features, downsample_frac=None, figsize=(15, 15)): if downsample_frac: df = df.sample(frac=downsample_frac) plt.figure(figsize=figsize) sns.pairplot(df[features], hue='target') plt.show()
866,401
r""" Inspects members of a class Args: obj (class or module): CommandLine: python -m utool.util_inspect help_members Example: >>> # ENABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> import utool as ut >>> obj = ut.DynStruct >>> result = help_members(obj) >>> print(result)
def help_members(obj, use_other=False): r import utool as ut attrnames = dir(obj) attr_list = [getattr(obj, attrname) for attrname in attrnames] attr_types = ut.lmap(ut.type_str, map(type, attr_list)) unique_types, groupxs = ut.group_indices(attr_types) type_to_items = ut.dzip(unique_types, ut.apply_grouping(attr_list, groupxs)) type_to_itemname = ut.dzip(unique_types, ut.apply_grouping(attrnames, groupxs)) #if memtypes is None: # memtypes = list(type_to_items.keys()) memtypes = ['instancemethod'] # , 'method-wrapper'] func_mems = ut.dict_subset(type_to_items, memtypes, []) func_list = ut.flatten(func_mems.values()) defsig_list = [] num_unbound_args_list = [] num_args_list = [] for func in func_list: #args = ut.get_func_argspec(func).args argspec = ut.get_func_argspec(func) args = argspec.args unbound_args = get_unbound_args(argspec) defsig = ut.func_defsig(func) defsig_list.append(defsig) num_unbound_args_list.append(len(unbound_args)) num_args_list.append(len(args)) group = ut.hierarchical_group_items(defsig_list, [num_unbound_args_list, num_args_list]) print(repr(obj)) print(ut.repr3(group, strvals=True)) if use_other: other_mems = ut.delete_keys(type_to_items.copy(), memtypes) other_mems_attrnames = ut.dict_subset(type_to_itemname, other_mems.keys()) named_other_attrs = ut.dict_union_combine(other_mems_attrnames, other_mems, lambda x, y: list(zip(x, y))) print(ut.repr4(named_other_attrs, nl=2, strvals=True))
866,551
list_class_funcnames Args: fname (str): filepath blank_pats (list): defaults to ' #' Returns: list: funcname_list Example: >>> # DISABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> fname = 'util_class.py' >>> blank_pats = [' #'] >>> funcname_list = list_class_funcnames(fname, blank_pats) >>> print(funcname_list)
def list_class_funcnames(fname, blank_pats=[' #']): with open(fname, 'r') as file_: lines = file_.readlines() funcname_list = [] #full_line_ = '' for lx, line in enumerate(lines): #full_line_ += line if any([line.startswith(pat) for pat in blank_pats]): funcname_list.append('') if line.startswith(' def '): def_x = line.find('def') rparen_x = line.find('(') funcname = line[(def_x + 3):rparen_x] #print(funcname) funcname_list.append(funcname) return funcname_list
866,560
r""" Args: func (func): Returns: dict: CommandLine: python -m utool.util_inspect get_kwdefaults Example: >>> # ENABLE_DOCTEST >>> from utool.util_inspect import * # NOQA >>> import utool as ut >>> func = dummy_func >>> parse_source = True >>> kwdefaults = get_kwdefaults(func, parse_source) >>> print('kwdefaults = %s' % (ut.repr4(kwdefaults),))
def get_kwdefaults(func, parse_source=False): r #import utool as ut #with ut.embed_on_exception_context: argspec = inspect.getargspec(func) kwdefaults = {} if argspec.args is None or argspec.defaults is None: pass else: args = argspec.args defaults = argspec.defaults #kwdefaults = OrderedDict(zip(argspec.args[::-1], argspec.defaults[::-1])) kwpos = len(args) - len(defaults) kwdefaults = OrderedDict(zip(args[kwpos:], defaults)) if parse_source and argspec.keywords: # TODO parse for kwargs.get/pop keyword_defaults = parse_func_kwarg_keys(func, with_vals=True) for key, val in keyword_defaults: assert key not in kwdefaults, 'parsing error' kwdefaults[key] = val return kwdefaults
866,564
rebinds all class methods Args: self (object): class instance to reload class_ (type): type to reload as Example: >>> # DISABLE_DOCTEST >>> from utool.util_class import * # NOQA >>> self = '?' >>> class_ = '?' >>> result = reload_class_methods(self, class_) >>> print(result)
def reload_class_methods(self, class_, verbose=True): if verbose: print('[util_class] Reloading self=%r as class_=%r' % (self, class_)) self.__class__ = class_ for key in dir(class_): # Get unbound reloaded method func = getattr(class_, key) if isinstance(func, types.MethodType): # inject it into the old instance inject_func_as_method(self, func, class_=class_, allow_override=True, verbose=verbose)
866,668
Wait until connection state is no longer ``IMPORT_CONFIGURATION``. Args: connection_id (str): Heroku Connect connection to monitor. wait_interval (int): How frequently to poll in seconds. Raises: CommandError: If fetch connection information fails.
def wait_for_import(self, connection_id, wait_interval): self.stdout.write(self.style.NOTICE('Waiting for import'), ending='') state = utils.ConnectionStates.IMPORT_CONFIGURATION while state == utils.ConnectionStates.IMPORT_CONFIGURATION: # before you get the first state, the API can be a bit behind self.stdout.write(self.style.NOTICE('.'), ending='') time.sleep(wait_interval) # take a breath try: connection = utils.get_connection(connection_id) except requests.HTTPError as e: raise CommandError("Failed to fetch connection information.") from e else: state = connection['state'] self.stdout.write(self.style.NOTICE(' Done!'))
866,939
Get a QuerySet for all trigger log objects for the same connected model. Args: exclude_self (bool): Whether to exclude this log object from the result list
def related(self, *, exclude_self=False): manager = type(self)._default_manager queryset = manager.related_to(self) if exclude_self: queryset = queryset.exclude(id=self.id) return queryset
867,052
Make a new, non-archived :class:`.TriggerLog` instance with duplicate data. Args: **kwargs: Set as attributes of the new instance, overriding what would otherwise be copied from ``self``. Returns: The new (unpersisted) :class:`TriggerLog` instance.
def _to_live_trigger_log(self, **kwargs): field_names = (field.name for field in TriggerLogAbstract._meta.get_fields()) attributes = {name: getattr(self, name) for name in field_names} del attributes['id'] # this is a completely new log, it should get its own id on save attributes.update(kwargs) return TriggerLog(**attributes)
867,057
Allocates more space if needbe. Ensures len(``list_``) == ``size_``. Args: list_ (list): ``list`` to extend size_ (int): amount to exent by
def ensure_list_size(list_, size_): lendiff = (size_) - len(list_) if lendiff > 0: extension = [None for _ in range(lendiff)] list_.extend(extension)
867,158
Rebuilds unflat list from invertible_flatten1 Args: flat_list (list): the flattened list reverse_list (list): the list which undoes flattenting Returns: unflat_list2: original nested list SeeAlso: invertible_flatten1 invertible_flatten2 unflatten2
def unflatten1(flat_list, reverse_list): unflat_list2 = [[flat_list[index] for index in tup] for tup in reverse_list] return unflat_list2
867,166
checks to see if list is equal everywhere Args: list_ (list): Returns: True if all items in the list are equal
def allsame(list_, strict=True): if len(list_) == 0: return True first_item = list_[0] return list_all_eq_to(list_, first_item, strict)
867,176
checks to see if list is equal everywhere to a value Args: list_ (list): val : value to check against Returns: True if all items in the list are equal to val
def list_all_eq_to(list_, val, strict=True): if util_type.HAVE_NUMPY and isinstance(val, np.ndarray): return all([np.all(item == val) for item in list_]) try: # FUTURE WARNING # FutureWarning: comparison to `None` will result in an elementwise object comparison in the future. with warnings.catch_warnings(): warnings.filterwarnings('ignore', category=FutureWarning) flags = [item == val for item in list_] return all([np.all(flag) if hasattr(flag, '__array__') else flag for flag in flags]) #return all([item == val for item in list_]) except ValueError: if not strict: return all([repr(item) == repr(val) for item in list_]) else: raise
867,177
Returns each item in item_list where not flag in flag_list Args: item_list (list): flag_list (list): Returns: dirty_items
def get_dirty_items(item_list, flag_list): assert len(item_list) == len(flag_list) dirty_items = [item for (item, flag) in zip(item_list, flag_list) if not flag] #print('num_dirty_items = %r' % len(dirty_items)) #print('item_list = %r' % (item_list,)) #print('flag_list = %r' % (flag_list,)) return dirty_items
867,178
like np.compress but for lists Returns items in item list where the corresponding item in flag list is True Args: item_list (list): list of items to mask flag_list (list): list of booleans used as a mask Returns: list : filtered_items - masked items
def compress(item_list, flag_list): assert len(item_list) == len(flag_list), ( 'lists should correspond. len(item_list)=%r len(flag_list)=%r' % (len(item_list), len(flag_list))) filtered_items = list(util_iter.iter_compress(item_list, flag_list)) return filtered_items
867,179
Returns items in item list where the corresponding item in flag list is true Args: item_list (list): list of items flag_list (list): list of truthy values Returns: filtered_items : items where the corresponding flag was truthy SeeAlso: util_iter.ifilterfalse_items
def filterfalse_items(item_list, flag_list): assert len(item_list) == len(flag_list) filtered_items = list(util_iter.ifilterfalse_items(item_list, flag_list)) return filtered_items
867,183
Returns a list of flags corresponding to the first time an item is seen Args: list_ (list): list of items Returns: flag_iter
def iflag_unique_items(list_): seen = set() def unseen(item): if item in seen: return False seen.add(item) return True flag_iter = (unseen(item) for item in list_) return flag_iter
867,191
like np.argsort but for lists Args: *args: multiple lists to sort by **kwargs: reverse (bool): sort order is descending if True else acscending CommandLine: python -m utool.util_list argsort Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> result = ut.argsort({'a': 3, 'b': 2, 'c': 100}) >>> print(result)
def argsort(*args, **kwargs): if len(args) == 1 and isinstance(args[0], dict): dict_ = args[0] index_list = list(dict_.keys()) value_list = list(dict_.values()) return sortedby2(index_list, value_list) else: index_list = list(range(len(args[0]))) return sortedby2(index_list, *args, **kwargs)
867,198
Returns index / key of the item with the smallest value. Args: input_ (dict or list): Note: a[argmin(a, key=key)] == min(a, key=key)
def argmin(input_, key=None): # if isinstance(input_, dict): # return list(input_.keys())[argmin(list(input_.values()))] # elif hasattr(input_, 'index'): # return input_.index(min(input_)) # else: # return min(enumerate(input_), key=operator.itemgetter(1))[0] if isinstance(input, dict): return list(input.keys())[argmin(list(input.values()), key=key)] else: if key is None: def _key(item): return item[1] else: def _key(item): return key(item[1]) return min(enumerate(input), key=_key)[0]
867,201
Map like function Args: func: function to apply input_ : either an iterable or scalar value Returns: If ``input_`` is iterable this function behaves like map otherwise applies func to ``input_``
def scalar_input_map(func, input_): if util_iter.isiterable(input_): return list(map(func, input_)) else: return func(input_)
867,213
Determines if a list is sorted Args: list_ (list): op (func): sorted operation (default=operator.le) Returns: bool : True if the list is sorted
def issorted(list_, op=operator.le): return all(op(list_[ix], list_[ix + 1]) for ix in range(len(list_) - 1))
867,218
r""" Args: items (list): Returns: dict: duplicate_map of indexes CommandLine: python -m utool.util_list --test-find_duplicate_items Example: >>> # DISABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> items = [0, 1, 2, 3, 3, 0, 12, 2, 9] >>> duplicate_map = find_duplicate_items(items) >>> result = str(duplicate_map) >>> print(result)
def find_duplicate_items(items, k=2): r import utool as ut # Build item histogram duplicate_map = ut.ddict(list) for count, item in enumerate(items): duplicate_map[item].append(count) # remove singleton items singleton_keys = [] for key in six.iterkeys(duplicate_map): if len(duplicate_map[key]) == 1: singleton_keys.append(key) for key in singleton_keys: del duplicate_map[key] duplicate_map = dict(duplicate_map) return duplicate_map
867,223
Returns the deepest level of nesting within a list of lists Args: list_ : a nested listlike object func : depth aggregation strategy (defaults to max) _depth : internal var Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = [[[[[1]]], [3]], [[1], [3]], [[1], [3]]] >>> result = (list_depth(list_, _depth=0)) >>> print(result)
def list_depth(list_, func=max, _depth=0): depth_list = [list_depth(item, func=func, _depth=_depth + 1) for item in list_ if util_type.is_listlike(item)] if len(depth_list) > 0: return func(depth_list) else: return _depth
867,226
r""" Args: list_ (list): Returns: list: argmaxima CommandLine: python -m utool.util_list --exec-list_argmaxima Example: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> list_ = np.array([1, 2, 3, 3, 3, 2, 1]) >>> argmaxima = list_argmaxima(list_) >>> result = ('argmaxima = %s' % (str(argmaxima),)) >>> print(result) argmaxima = [2 3 4]
def list_argmaxima(list_): r argmax = list_argmax(list_) maxval = list_[argmax] argmaxima = np.where((np.isclose(maxval, list_)))[0] return argmaxima
867,237
r""" Args: data (?): Returns: ?: CommandLine: python -m utool.util_hash _covert_to_hashable Example: >>> # DISABLE_DOCTEST >>> from utool.util_hash import * # NOQA >>> from utool.util_hash import _covert_to_hashable # NOQA >>> import utool as ut >>> data = np.array([1], dtype=np.int64) >>> result = _covert_to_hashable(data) >>> print(result)
def _covert_to_hashable(data): r if isinstance(data, six.binary_type): hashable = data prefix = b'TXT' elif util_type.HAVE_NUMPY and isinstance(data, np.ndarray): if data.dtype.kind == 'O': msg = '[ut] hashing ndarrays with dtype=object is unstable' warnings.warn(msg, RuntimeWarning) hashable = data.dumps() else: hashable = data.tobytes() prefix = b'NDARR' elif isinstance(data, six.text_type): # convert unicode into bytes hashable = data.encode('utf-8') prefix = b'TXT' elif isinstance(data, uuid.UUID): hashable = data.bytes prefix = b'UUID' elif isinstance(data, int): # warnings.warn('[util_hash] Hashing ints is slow, numpy is prefered') hashable = _int_to_bytes(data) # hashable = data.to_bytes(8, byteorder='big') prefix = b'INT' # elif isinstance(data, float): # hashable = repr(data).encode('utf8') # prefix = b'FLT' elif util_type.HAVE_NUMPY and isinstance(data, np.int64): return _covert_to_hashable(int(data)) elif util_type.HAVE_NUMPY and isinstance(data, np.float64): a, b = float(data).as_integer_ratio() hashable = (a.to_bytes(8, byteorder='big') + b.to_bytes(8, byteorder='big')) prefix = b'FLOAT' else: raise TypeError('unknown hashable type=%r' % (type(data))) # import bencode # hashable = bencode.Bencoder.encode(data).encode('utf-8') # prefix = b'BEN' prefix = b'' return prefix, hashable
867,263
r""" Args: num (scalar): low (scalar): high (scalar): msg (str):
def assert_inbounds(num, low, high, msg='', eq=False, verbose=not util_arg.QUIET): r from utool import util_str if util_arg.NO_ASSERTS: return passed = util_alg.inbounds(num, low, high, eq=eq) if isinstance(passed, np.ndarray): passflag = np.all(passed) else: passflag = passed if not passflag: failednum = num.compress(~passed) if isinstance(num, np.ndarray) else num failedlow = low.compress(~passed) if isinstance(low, np.ndarray) else low failedhigh = high.compress(~passed) if isinstance(high, np.ndarray) else high msg_ = 'num=%r is out of bounds=(%r, %r)' % (failednum, failedlow, failedhigh) raise AssertionError(msg_ + '\n' + msg) else: if verbose: op = '<=' if eq else '<' fmtstr = 'Passed assert_inbounds: {low} {op} {num} {op} {high}' print(fmtstr.format(low=low, op=op, num=util_str.truncate_str(str(num)), high=high))
867,513
r""" Args: arr_test (ndarray or list): arr_target (ndarray or list): thresh (scalar or ndarray or list):
def assert_almost_eq(arr_test, arr_target, thresh=1E-11): r if util_arg.NO_ASSERTS: return import utool as ut arr1 = np.array(arr_test) arr2 = np.array(arr_target) passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True) if not np.all(passed): failed_xs = np.where(np.logical_not(passed)) failed_error = error.take(failed_xs) failed_arr_test = arr1.take(failed_xs) failed_arr_target = arr2.take(failed_xs) msg_list = [ 'FAILED ASSERT ALMOST EQUAL', ' * failed_xs = %r' % (failed_xs,), ' * failed_error = %r' % (failed_error,), ' * failed_arr_test = %r' % (failed_arr_test,), ' * failed_arr_target = %r' % (failed_arr_target,), ] msg = '\n'.join(msg_list) raise AssertionError(msg) return error
867,514
r""" Args: arr_test (ndarray or list): arr_target (ndarray or list): thresh (scalar or ndarray or list):
def assert_lessthan(arr_test, arr_max, msg=''): r if util_arg.NO_ASSERTS: return arr1 = np.array(arr_test) arr2 = np.array(arr_max) error = arr_max - arr_test passed = error >= 0 if not np.all(passed): failed_xs = np.where(np.logical_not(passed)) failed_error = error.take(failed_xs) failed_arr_test = arr1.take(failed_xs) failed_arr_target = arr2.take(failed_xs) msg_list = [ 'FAILED ASSERT LESSTHAN', msg, ' * failed_xs = %r' % (failed_xs,), ' * failed_error = %r' % (failed_error,), ' * failed_arr_test = %r' % (failed_arr_test,), ' * failed_arr_target = %r' % (failed_arr_target,), ] msg = '\n'.join(msg_list) raise AssertionError(msg) return error
867,515
r""" Removes template comments and vim sentinals Args: code_text (str): Returns: str: code_text_
def remove_codeblock_syntax_sentinals(code_text): r flags = re.MULTILINE | re.DOTALL code_text_ = code_text code_text_ = re.sub(r'^ *# *REM [^\n]*$\n?', '', code_text_, flags=flags) code_text_ = re.sub(r'^ *# STARTBLOCK *$\n', '', code_text_, flags=flags) code_text_ = re.sub(r'^ *# ENDBLOCK *$\n?', '', code_text_, flags=flags) code_text_ = code_text_.rstrip() return code_text_
867,588
upper diagnoal of cartesian product of self and self. Weird name. fixme Args: list_ (list): Returns: list: CommandLine: python -m utool.util_alg --exec-upper_diag_self_prodx Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> list_ = [1, 2, 3] >>> result = upper_diag_self_prodx(list_) >>> print(result) [(1, 2), (1, 3), (2, 3)]
def upper_diag_self_prodx(list_): return [(item1, item2) for n1, item1 in enumerate(list_) for n2, item2 in enumerate(list_) if n1 < n2]
867,671
r""" Args: num (float): References: stackoverflow.com/questions/6189956/finding-decimal-places Example: >>> # ENABLE_DOCTEST >>> from utool.util_alg import * # NOQA >>> num = 15.05 >>> result = number_of_decimals(num) >>> print(result) 2
def number_of_decimals(num): r exp = decimal.Decimal(str(num)).as_tuple().exponent return max(0, -exp)
867,693
Import a module, or a typename within a module from its name. Arguments: name: An absolute or relative (starts with a .) Python path path: If name is relative, path is prepended to it. base_path: (DEPRECATED) Same as path typename: (DEPRECATED) Same as path
def import_symbol(name=None, path=None, typename=None, base_path=None): _, symbol = _import(name or typename, path or base_path) return symbol
867,770
r""" returns a copy of dict_ without keys in the negative_keys list Args: dict_ (dict): negative_keys (list):
def dict_setdiff(dict_, negative_keys): r keys = [key for key in six.iterkeys(dict_) if key not in set(negative_keys)] subdict_ = dict_subset(dict_, keys) return subdict_
867,803
dictinfo In depth debugging info Args: dict_ (dict): Returns: str Example: >>> # DISABLE_DOCTEST >>> from utool.util_dict import * # NOQA >>> dict_ = {} >>> result = dictinfo(dict_) >>> print(result)
def dictinfo(dict_): import utool as ut if not isinstance(dict_, dict): return 'expected dict got %r' % type(dict_) keys = list(dict_.keys()) vals = list(dict_.values()) num_keys = len(keys) key_types = list(set(map(type, keys))) val_types = list(set(map(type, vals))) fmtstr_ = '\n' + ut.unindent(.strip('\n')) if len(val_types) == 1: if val_types[0] == np.ndarray: # each key holds an ndarray val_shape_stats = ut.get_stats(set(map(np.shape, vals)), axis=0) val_shape_stats_str = ut.repr4(val_shape_stats, strvals=True, newlines=False) val_dtypes = list(set([val.dtype for val in vals])) fmtstr_ += ut.unindent(.strip('\n')) elif val_types[0] == list: # each key holds a list val_len_stats = ut.get_stats(set(map(len, vals))) val_len_stats_str = ut.repr4(val_len_stats, strvals=True, newlines=False) depth = ut.list_depth(vals) deep_val_types = list(set(ut.list_deep_types(vals))) fmtstr_ += ut.unindent(.strip('\n')) if len(deep_val_types) == 1: if deep_val_types[0] == np.ndarray: deep_val_dtypes = list(set([val.dtype for val in vals])) fmtstr_ += ut.unindent().strip('\n') elif val_types[0] in [np.uint8, np.int8, np.int32, np.int64, np.float16, np.float32, np.float64]: # each key holds a scalar val_stats = ut.get_stats(vals) fmtstr_ += ut.unindent().strip('\n') fmtstr = fmtstr_.format(**locals()) return ut.indent(fmtstr)
867,814
Groups a list of items using the first element in each pair as the item and the second element as the groupid. Args: pair_list (list): list of 2-tuples (item, groupid) Returns: dict: groupid_to_items: maps a groupid to a list of items SeeAlso: group_items
def group_pairs(pair_list): # Initialize dict of lists groupid_to_items = defaultdict(list) # Insert each item into the correct group for item, groupid in pair_list: groupid_to_items[groupid].append(item) return groupid_to_items
867,829
Load pickled features for train and test sets, assuming they are saved in the `features` folder along with their column names. Args: feature_lists: A list containing the names of the feature lists to load. Returns: A tuple containing 3 items: train dataframe, test dataframe, and a list describing the index ranges for the feature lists.
def load_feature_lists(self, feature_lists): column_names = [] feature_ranges = [] running_feature_count = 0 for list_id in feature_lists: feature_list_names = load_lines(self.features_dir + 'X_train_{}.names'.format(list_id)) column_names.extend(feature_list_names) start_index = running_feature_count end_index = running_feature_count + len(feature_list_names) - 1 running_feature_count += len(feature_list_names) feature_ranges.append([list_id, start_index, end_index]) X_train = np.hstack([ load(self.features_dir + 'X_train_{}.pickle'.format(list_id)) for list_id in feature_lists ]) X_test = np.hstack([ load(self.features_dir + 'X_test_{}.pickle'.format(list_id)) for list_id in feature_lists ]) df_train = pd.DataFrame(X_train, columns=column_names) df_test = pd.DataFrame(X_test, columns=column_names) return df_train, df_test, feature_ranges
867,873
Save features for the training and test sets to disk, along with their metadata. Args: train_features: A NumPy array of features for the training set. test_features: A NumPy array of features for the test set. feature_names: A list containing the names of the feature columns. feature_list_id: The name for this feature list.
def save_features(self, train_features, test_features, feature_names, feature_list_id): self.save_feature_names(feature_names, feature_list_id) self.save_feature_list(train_features, 'train', feature_list_id) self.save_feature_list(test_features, 'test', feature_list_id)
867,874
Save the names of the features for the given feature list to a metadata file. Example: `save_feature_names(['num_employees', 'stock_price'], 'company')`. Args: feature_names: A list containing the names of the features, matching the column order. feature_list_id: The name for this feature list.
def save_feature_names(self, feature_names, feature_list_id): save_lines(feature_names, self.features_dir + 'X_train_{}.names'.format(feature_list_id))
867,875
Pickle the specified feature list to a file. Example: `save_feature_list(project, X_tfidf_train, 'train', 'tfidf')`. Args: obj: The object to pickle (e.g., a numpy array or a Pandas dataframe) project: An instance of pygoose project. set_id: The id of the subset (e.g., 'train' or 'test') feature_list_id: The name for this feature list.
def save_feature_list(self, obj, set_id, feature_list_id): save(obj, self.features_dir + 'X_{}_{}.pickle'.format(set_id, feature_list_id))
867,876
Returns the weights for each class based on the frequencies of the samples. Args: y: A list of true labels (the labels must be hashable). smooth_factor: A factor that smooths extremely uneven weights. Returns: A dictionary with the weight for each class.
def get_class_weights(y, smooth_factor=0): from collections import Counter counter = Counter(y) if smooth_factor > 0: p = max(counter.values()) * smooth_factor for k in counter.keys(): counter[k] += p majority = max(counter.values()) return {cls: float(majority / count) for cls, count in counter.items()}
867,984
Plots the learning history for a Keras model, assuming the validation data was provided to the 'fit' function. Args: history: The return value from the 'fit' function. figsize: The size of the plot.
def plot_loss_history(history, figsize=(15, 8)): plt.figure(figsize=figsize) plt.plot(history.history["loss"]) plt.plot(history.history["val_loss"]) plt.xlabel("# Epochs") plt.ylabel("Loss") plt.legend(["Training", "Validation"]) plt.title("Loss over time") plt.show()
867,985
Retry calling the decorated function using an exponential backoff. Args: exceptions: The exception to check. may be a tuple of exceptions to check. tries: Number of times to try (not retry) before giving up. delay: Initial delay between retries in seconds. backoff: Backoff multiplier (e.g. value of 2 will double the delay each retry). logger: Logger to use. If None, print.
def retry(exceptions, tries=5, delay=1, backoff=2, logger=None): def deco_retry(func): @wraps(func) async def f_retry(self, *args, **kwargs): if not iscoroutine(func): f = coroutine(func) else: f = func mtries, mdelay = tries, delay while mtries > 1: try: return await f(self, *args, **kwargs) except exceptions: if logger: logger.info('Retrying %s after %s seconds', f.__name__, mdelay) sleep(mdelay) mtries -= 1 mdelay *= backoff return await f(self, *args, **kwargs) return f_retry return deco_retry
868,008
r""" Args: start (int): (default = 0) step (int): (default = 1) Returns: func: next_ CommandLine: python -m utool.util_iter --test-next_counter Example: >>> # ENABLE_DOCTEST >>> from utool.util_iter import * # NOQA >>> start = 1 >>> step = 1 >>> next_ = next_counter(start, step) >>> result = str([next_(), next_(), next_()]) >>> print(result) [1, 2, 3]
def next_counter(start=0, step=1): r count_gen = it.count(start, step) next_ = functools.partial(six.next, count_gen) return next_
868,024
iter_compress - like numpy compress Args: item_iter (list): flag_iter (list): of bools Returns: list: true_items Example: >>> # ENABLE_DOCTEST >>> from utool.util_iter import * # NOQA >>> item_iter = [1, 2, 3, 4, 5] >>> flag_iter = [False, True, True, False, True] >>> true_items = iter_compress(item_iter, flag_iter) >>> result = list(true_items) >>> print(result) [2, 3, 5]
def iter_compress(item_iter, flag_iter): # TODO: Just use it.compress true_items = (item for (item, flag) in zip(item_iter, flag_iter) if flag) return true_items
868,029
ifilterfalse_items Args: item_iter (list): flag_iter (list): of bools Example: >>> # ENABLE_DOCTEST >>> from utool.util_iter import * # NOQA >>> item_iter = [1, 2, 3, 4, 5] >>> flag_iter = [False, True, True, False, True] >>> false_items = ifilterfalse_items(item_iter, flag_iter) >>> result = list(false_items) >>> print(result) [1, 4]
def ifilterfalse_items(item_iter, flag_iter): false_items = (item for (item, flag) in zip(item_iter, flag_iter) if not flag) return false_items
868,030
Yields `num` items from the cartesian product of items in a random order. Args: items (list of sequences): items to get caresian product of packed in a list or tuple. (note this deviates from api of it.product) Example: import utool as ut items = [(1, 2, 3), (4, 5, 6, 7)] rng = 0 list(ut.random_product(items, rng=0)) list(ut.random_product(items, num=3, rng=0))
def random_product(items, num=None, rng=None): import utool as ut rng = ut.ensure_rng(rng, 'python') seen = set() items = [list(g) for g in items] max_num = ut.prod(map(len, items)) if num is None: num = max_num if num > max_num: raise ValueError('num exceedes maximum number of products') # TODO: make this more efficient when num is large if num > max_num // 2: for prod in ut.shuffle(list(it.product(*items)), rng=rng): yield prod else: while len(seen) < num: # combo = tuple(sorted(rng.choice(items, size, replace=False))) idxs = tuple(rng.randint(0, len(g) - 1) for g in items) if idxs not in seen: seen.add(idxs) prod = tuple(g[x] for g, x in zip(items, idxs)) yield prod
868,039
removes all chars in char_list from str_ Args: str_ (str): char_list (list): Returns: str: outstr Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> str_ = '1, 2, 3, 4' >>> char_list = [','] >>> result = remove_chars(str_, char_list) >>> print(result) 1 2 3 4
def remove_chars(str_, char_list): outstr = str_[:] for char in char_list: outstr = outstr.replace(char, '') return outstr
868,204
r""" returns the number of preceding spaces Args: text (str): unicode text Returns: int: indentation CommandLine: python -m utool.util_str --exec-get_minimum_indentation --show Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> import utool as ut >>> text = ' foo\n bar' >>> result = get_minimum_indentation(text) >>> print(result) 3
def get_minimum_indentation(text): r lines = text.split('\n') indentations = [get_indentation(line_) for line_ in lines if len(line_.strip()) > 0] if len(indentations) == 0: return 0 return min(indentations)
868,205
r""" Convineince indentjoin similar to '\n '.join(strlist) but indent is also prefixed Args: strlist (?): indent (str): suffix (str): Returns: str: joined list
def indentjoin(strlist, indent='\n ', suffix=''): r indent_ = indent strlist = list(strlist) if len(strlist) == 0: return '' return indent_ + indent_.join([six.text_type(str_) + suffix for str_ in strlist])
868,208
r""" Args: text (str): CommandLine: python -m utool.util_str --exec-pack_paragraph --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> import utool as ut >>> width = 80 >>> text = lorium_ipsum() >>> result = packtext(text) >>> print(result)
def packtext(text, width=80): r import utool as ut import textwrap new_text = '\n'.join(textwrap.wrap(text, width)) new_text = ut.remove_doublspaces(new_text).strip() return new_text
868,212
String of function call signature Args: func (function): live python function Returns: str: callsig CommandLine: python -m utool.util_str --exec-func_callsig Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> func = func_str >>> callsig = func_callsig(func) >>> result = str(callsig) >>> print(result) func_str(func, args, kwargs, type_aliases, packed, packkw, truncate)
def func_callsig(func, with_name=True): import inspect argspec = inspect.getargspec(func) (args, varargs, varkw, defaults) = argspec callsig = inspect.formatargspec(*argspec[0:3]) if with_name: callsig = get_callable_name(func) + callsig return callsig
868,226
r""" Returns: list: a list of human-readable dictionary items Args: explicit : if True uses dict(key=val,...) format instead of {key:val,...}
def dict_itemstr_list(dict_, **dictkw): r import utool as ut explicit = dictkw.get('explicit', False) dictkw['explicit'] = _rectify_countdown_or_bool(explicit) dosort = dictkw.get('sorted_', None) if dosort is None: dosort = True if dosort and not isinstance(dict_, collections.OrderedDict): key_order = dictkw.get('key_order', None) def iteritems(d): if key_order is None: # specify order explicilty try: return iter(sorted(six.iteritems(d))) except TypeError: # catches case where keys are of different types return six.iteritems(d) else: # Enforce specific key order # TODO: depricate and just use ordered dicts unordered_keys = list(d.keys()) other_keys = sorted(list(set(unordered_keys) - set(key_order))) keys = key_order + other_keys return ((key, d[key]) for key in keys) else: iteritems = six.iteritems _valstr = _make_valstr(**dictkw) precision = dictkw.get('precision', None) kvsep = dictkw.get('kvsep', ': ') if explicit: kvsep = '=' def make_item_str(key, val): if explicit or dictkw.get('strkeys', False): key_str = six.text_type(key) else: key_str = repr2(key, precision=precision) prefix = key_str + kvsep val_str = _valstr(val) # FIXME: get indentation right if util_type.HAVE_NUMPY and isinstance(val, np.ndarray): item_str = hz_str(prefix, val_str) else: # padded_indent = ' ' * min(len(indent_), len(prefix)) # val_str = val_str.replace('\n', '\n' + padded_indent) item_str = prefix + val_str return item_str itemstr_list = [make_item_str(key, val) for (key, val) in iteritems(dict_)] reverse = False key_order_metric = dictkw.get('key_order_metric', None) if key_order_metric is not None: if key_order_metric.startswith('-'): key_order_metric = key_order_metric[1:] reverse = True if key_order_metric == 'strlen': metric_list = [len(itemstr) for itemstr in itemstr_list] itemstr_list = ut.sortedby(itemstr_list, metric_list, reverse=reverse) elif key_order_metric == 'val': metric_list = [val for (key, val) in iteritems(dict_)] itemstr_list = ut.sortedby(itemstr_list, metric_list, reverse=reverse) maxlen = dictkw.get('maxlen', None) if maxlen is not None and len(itemstr_list) > maxlen: itemstr_list = itemstr_list[0:maxlen] return itemstr_list
868,236
Works on must functionlike objects including str, which has no func_name Args: func (function): Returns: str: CommandLine: python -m utool.util_str --exec-get_callable_name Example: >>> # ENABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> func = len >>> result = get_callable_name(func) >>> print(result) len
def get_callable_name(func): try: return meta_util_six.get_funcname(func) except AttributeError: if isinstance(func, type): return repr(func).replace('<type \'', '').replace('\'>', '') elif hasattr(func, '__name__'): return func.__name__ else: raise NotImplementedError(('cannot get func_name of func=%r' 'type(func)=%r') % (func, type(func)))
868,242
r""" Args: text (str): Returns: str: text_with_lineno - string with numbered lines
def number_text_lines(text): r numbered_linelist = [ ''.join((('%2d' % (count + 1)), ' >>> ', line)) for count, line in enumerate(text.splitlines()) ] text_with_lineno = '\n'.join(numbered_linelist) return text_with_lineno
868,250
r""" Uses pyfiglet to create bubble text. Args: font (str): default=cybermedium, other fonts include: cybersmall and cyberlarge. References: http://www.figlet.org/ Example: >>> # ENABLE_DOCTEST >>> import utool as ut >>> bubble_text = ut.bubbletext('TESTING BUBBLE TEXT', font='cybermedium') >>> print(bubble_text)
def bubbletext(text, font='cybermedium'): r import utool as ut pyfiglet = ut.tryimport('pyfiglet', 'git+https://github.com/pwaller/pyfiglet') if pyfiglet is None: return text else: bubble_text = pyfiglet.figlet_format(text, font=font) return bubble_text
868,257
r""" Args: underscore_case (?): Returns: str: title_str CommandLine: python -m utool.util_str --exec-to_title_caps Example: >>> # DISABLE_DOCTEST >>> from utool.util_str import * # NOQA >>> underscore_case = 'the_foo_bar_func' >>> title_str = to_title_caps(underscore_case) >>> result = ('title_str = %s' % (str(title_str),)) >>> print(result) title_str = The Foo Bar Func
def to_title_caps(underscore_case): r words = underscore_case.split('_') words2 = [ word[0].upper() + word[1:] for count, word in enumerate(words) ] title_str = ' '.join(words2) return title_str
868,259
r""" Args: fpath_list (list): dpath (str): directory relative to main tex file Returns: str: figure_str CommandLine: python -m utool.util_latex --test-get_latex_figure_str Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> fpath_list = ['figures/foo.png'] >>> figure_str = get_latex_figure_str(fpath_list) >>> result = str(figure_str) >>> print(result)
def get_latex_figure_str(fpath_list, caption_str=None, label_str=None, width_str=r'\textwidth', height_str=None, nCols=None, dpath=None, colpos_sep=' ', nlsep='', use_sublbls=None, use_frame=False): r import utool as ut if nCols is None: nCols = len(fpath_list) USE_SUBFIGURE = True if width_str is not None: colwidth = (1.0 / nCols) if USE_SUBFIGURE: colwidth *= .95 graphics_sizestr = ('%.2f' % (colwidth,)) + width_str else: graphics_sizestr = '[width=%.1f%s]' % (colwidth, width_str) elif height_str is not None: graphics_sizestr = '[height=%s]' % (height_str) else: graphics_sizestr = '' if dpath is not None: fpath_list = [ut.relpath_unix(fpath_, dpath) for fpath_ in fpath_list] if USE_SUBFIGURE: # References: https://en.wikibooks.org/wiki/LaTeX/Floats,_Figures_and_Captions#Subfloats # TODO ? http://tex.stackexchange.com/questions/159290/how-can-i-place-a-vertical-rule-between-subfigures # Use subfigures graphics_list = [] sublbl_prefix = label_str if label_str is not None else '' for count, fpath in enumerate(fpath_list): CHRLBLS = True if CHRLBLS: #subchar = chr(97 + count) subchar = chr(65 + count) else: subchar = str(count) parts = [] subfigure_str = '' if len(fpath_list) > 1: parts.append('\\begin{subfigure}[h]{' + graphics_sizestr + '}') parts.append('\\centering') graphics_part = '\\includegraphics[width=%s]{%s}' % (width_str, fpath,) if use_frame: parts.append('\\fbox{%s}' % (graphics_part,)) else: parts.append(graphics_part) if use_sublbls is True or use_sublbls is None and len(fpath_list) > 1: parts.append('\\caption{}\\label{sub:' + sublbl_prefix + subchar + '}') if len(fpath_list) > 1: parts.append('\\end{subfigure}') subfigure_str = ''.join(parts) graphics_list.append(subfigure_str) else: if True: graphics_list = [ r'\includegraphics%s{%s}\captionof{figure}{%s}' % ( graphics_sizestr, fpath, 'fd', #'(' + str(count) + ')' #'(' + chr(97 + count) + ')' ) for count, fpath in enumerate(fpath_list)] else: graphics_list = [r'\includegraphics%s{%s}' % (graphics_sizestr, fpath,) for fpath in fpath_list] #graphics_list = [r'\includegraphics%s{%s}' % (graphics_sizestr, fpath,) ] #nRows = len(graphics_list) // nCols # Add separators NL = '\n' if USE_SUBFIGURE: col_spacer_mid = NL + '~~' + '% --' + NL col_spacer_end = NL + r'\\' + '% --' + NL else: col_spacer_mid = NL + '&' + NL col_spacer_end = NL + r'\\' + nlsep + NL sep_list = [ col_spacer_mid if count % nCols > 0 else col_spacer_end for count in range(1, len(graphics_list) + 1) ] if len(sep_list) > 0: sep_list[-1] = '' graphics_list_ = [graphstr + sep for graphstr, sep in zip(graphics_list, sep_list)] #graphics_body = '\n&\n'.join(graphics_list) graphics_body = ''.join(graphics_list_) header_str = colpos_sep.join(['c'] * nCols) if USE_SUBFIGURE: figure_body = graphics_body else: figure_body = ut.codeblock( r ) % (header_str, graphics_body) if caption_str is not None: #tabular_body += '\n\caption{\\footnotesize{%s}}' % (caption_str,) if label_str is not None: figure_body += '\n\caption[%s]{%s}' % (label_str, caption_str,) else: figure_body += '\n\caption{%s}' % (caption_str,) if label_str is not None: figure_body += '\n\label{fig:%s}' % (label_str,) #figure_fmtstr = ut.codeblock( # r #) figure_fmtstr = ut.codeblock( r ) figure_str = figure_fmtstr % (figure_body) return figure_str
868,292
r""" Args: _cmdname (?): Returns: ?: command_name CommandLine: python -m utool.util_latex --exec-latex_sanitize_command_name Example: >>> # DISABLE_DOCTEST >>> from utool.util_latex import * # NOQA >>> _cmdname = '#foo bar.' >>> command_name = latex_sanitize_command_name(_cmdname) >>> result = ('command_name = %s' % (str(command_name),)) >>> print(result) FooBar
def latex_sanitize_command_name(_cmdname): r import utool as ut command_name = _cmdname try: def subroman(match): import roman try: groupdict = match.groupdict() num = int(groupdict['num']) if num == 0: return '' return roman.toRoman(num) except Exception as ex: ut.printex(ex, keys=['groupdict']) raise command_name = re.sub(ut.named_field('num', r'\d+'), subroman, command_name) except ImportError as ex: if ut.SUPER_STRICT: ut.printex(ex) raise # remove numbers command_name = re.sub(r'[\d' + re.escape('#()[]{}.') + ']', '', command_name) # Remove _ for camel case #def to_camel_case(str_list): # # hacky # return ''.join([str_ if len(str_) < 1 else str_[0].upper() + str_[1:] for str_ in str_list]) #command_name = to_cammel_case(re.split('[_ ]', command_name)[::2]) str_list = re.split('[_ ]', command_name) #command_name = to_cammel_case(str_list) command_name = ut.to_camel_case('_'.join(str_list), mixed=True) return command_name
868,297
Invoke the lexer on an input string an return the list of tokens. This is relatively inefficient and should only be used for testing/debugging as it slurps up all tokens into one list. Args: data: The input to be tokenized. Returns: A list of LexTokens
def tokenize(self, data, *args, **kwargs): self.lexer.input(data) tokens = list() while True: token = self.lexer.token() if not token: break tokens.append(token) return tokens
868,443
Constructs the JsonParser based on the grammar contained herein. Successful construction builds the ply.yacc instance and sets self.parser. Args: lexer: A ply.lex or JsonLexer instance that will produce JSON_TOKENS.
def __init__(self, lexer=None, **kwargs): if lexer is not None: if isinstance(lexer, JbossLexer): self.lexer = lexer.lexer else: # Assume that the lexer is a ply.lex instance or similar self.lexer = lexer else: self.lexer = JbossLexer().lexer kwargs.setdefault('debug', False) kwargs.setdefault('write_tables', False) self.parser = ply.yacc.yacc(module=self, **kwargs)
868,444
Parse the input JSON data string into a python data structure. Args: data: An input data string lexer: An optional ply.lex instance that overrides the default lexer. Returns: A python dict or list representing the input JSON data.
def parse(self, data, lexer=None, *args, **kwargs): if lexer is None: lexer = self.lexer return self.parser.parse(data, lexer=lexer, *args, **kwargs)
868,448
Generates attributes values of specific edges Args: on_missing (str): Strategy for handling nodes missing from G. Can be {'error', 'default'}. defaults to 'error'. on_keyerr (str): Strategy for handling keys missing from node dicts. Can be {'error', 'default'}. defaults to 'default' if default is specified, otherwise defaults to 'error'.
def nx_gen_edge_values(G, key, edges=None, default=util_const.NoParam, on_missing='error', on_keyerr='default'): if edges is None: edges = G.edges() if on_missing is None: on_missing = 'error' if on_keyerr is None: on_keyerr = 'default' if default is util_const.NoParam and on_keyerr == 'default': on_keyerr = 'error' # Generate `data_iter` edges and data dictionary if on_missing == 'error': data_iter = (G.adj[u][v] for u, v in edges) elif on_missing == 'default': data_iter = (G.adj[u][v] if G.has_edge(u, v) else {} for u, v in edges) else: raise KeyError('on_missing={} must be error, filter or default'.format( on_missing)) # Get `value_iter` desired value out of dictionary if on_keyerr == 'error': value_iter = (d[key] for d in data_iter) elif on_keyerr == 'default': value_iter = (d.get(key, default) for d in data_iter) else: raise KeyError('on_keyerr={} must be error or default'.format(on_keyerr)) return value_iter
868,674
Read a single Python file in as code and extract members from it. Args: url -- a URL either absolute (contains ':') or relative base_path -- if url is relative, base_path is prepended to it. The resulting URL needs to look something like this: https://github.com/foo/bar/blob/master/bibliopixel/myfile.MyClass
def load_location(url, base_path=None, module=False): if base_path and ':' not in url: slashes = base_path.endswith('/') + url.startswith('/') if slashes == 0: url = base_path + '/' + url elif slashes == 1: url = base_path + url else: url = base_path[:-1] + url slash = url.rfind('/') url_root, filepath = url[:slash + 1], url[slash + 1:] filename, *python_path = filepath.split('.') whitelist.check_url(url_root) file_url = url_root + filename + '.py' source = data.load(file_url, False) compiled = compile(source, file_url, mode='exec') local = {} exec(compiled, local) try: names = local['__all__'] except KeyError: names = local if python_path and python_path[0] == 'py': python_path.pop(0) if not python_path: if module: return local python_path = [importer.guess_name(names, filename, url)] first, *rest = python_path try: result = local[first] except: raise AttributeError(first) for r in rest: result = getattr(result, r) return result
868,874
r""" Opens a url in the specified or default browser Args: url (str): web url CommandLine: python -m utool.util_grabdata --test-open_url_in_browser Example: >>> # DISABLE_DOCTEST >>> # SCRIPT >>> from utool.util_grabdata import * # NOQA >>> url = 'http://www.jrsoftware.org/isdl.php' >>> open_url_in_browser(url, 'chrome')
def open_url_in_browser(url, browsername=None, fallback=False): r import webbrowser print('[utool] Opening url=%r in browser' % (url,)) if browsername is None: browser = webbrowser.open(url) else: browser = get_prefered_browser(pref_list=[browsername], fallback=fallback) return browser.open(url)
868,982
Serialize an object to disk using pickle protocol. Args: obj: The object to serialize. filename: Path to the output file. protocol: Version of the pickle protocol.
def save(obj, filename, protocol=4): with open(filename, 'wb') as f: pickle.dump(obj, f, protocol=protocol)
869,100
Load a JSON object from the specified file. Args: filename: Path to the input JSON file. **kwargs: Additional arguments to `json.load`. Returns: The object deserialized from JSON.
def load_json(filename, **kwargs): with open(filename, 'r', encoding='utf-8') as f: return json.load(f, **kwargs)
869,101
Save an object as a JSON file. Args: obj: The object to save. Must be JSON-serializable. filename: Path to the output file. **kwargs: Additional arguments to `json.dump`.
def save_json(obj, filename, **kwargs): with open(filename, 'w', encoding='utf-8') as f: json.dump(obj, f, **kwargs)
869,102