code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def date_from_string(string, format_string=None): if isinstance(format_string, str): return datetime.datetime.strptime(string, format_string).date() elif format_string is None: format_string = [ "%Y-%m-%d", "%m-%d-%Y", "%m/%d/%Y", "%d/%m/%Y", ] for format in format_string: try: return datetime.datetime.strptime(string, format).date() except ValueError: continue raise ValueError("Could not produce date from string: {}".format(string))
Runs through a few common string formats for datetimes, and attempts to coerce them into a datetime. Alternatively, format_string can provide either a single string to attempt or an iterable of strings to attempt.
def to_datetime(plain_date, hours=0, minutes=0, seconds=0, ms=0): # don't mess with datetimes if isinstance(plain_date, datetime.datetime): return plain_date return datetime.datetime( plain_date.year, plain_date.month, plain_date.day, hours, minutes, seconds, ms, )
given a datetime.date, gives back a datetime.datetime
def get_containing_period(cls, *periods): if any(not isinstance(period, TimePeriod) for period in periods): raise TypeError("periods must all be TimePeriods: {}".format(periods)) latest = datetime.datetime.min earliest = datetime.datetime.max for period in periods: # the best we can do to conain None is None! if period._latest is None: latest = None elif latest is not None and period._latest > latest: latest = period._latest if period._earliest is None: earliest = None elif earliest is not None and period._earliest < earliest: earliest = period._earliest return TimePeriod(earliest, latest)
Given a bunch of TimePeriods, return a TimePeriod that most closely contains them.
def get_attr(obj, string_rep, default=_get_attr_raise_on_attribute_error, separator="."): attribute_chain = string_rep.split(separator) current_obj = obj for attr in attribute_chain: try: current_obj = getattr(current_obj, attr) except AttributeError: if default is _get_attr_raise_on_attribute_error: raise AttributeError( "Bad attribute \"{}\" in chain: \"{}\"".format(attr, string_rep) ) return default return current_obj
getattr via a chain of attributes like so: >>> import datetime >>> some_date = datetime.date.today() >>> get_attr(some_date, "month.numerator.__doc__") 'int(x[, base]) -> integer\n\nConvert a string or number to an integer, ...
def get_user_password(env, param, force=False): username = utils.assemble_username(env, param) if not utils.confirm_credential_display(force): return # Retrieve the credential from the keychain password = password_get(username) if password: return (username, password) else: return False
Allows the user to print the credential for a particular keyring entry to the screen
def pull_env_credential(env, param, value): rex = "USE_KEYRING\[([\x27\x22])(.*)\\1\]" # This is the old-style, per-environment keyring credential if value == "USE_KEYRING": username = utils.assemble_username(env, param) # This is the new-style, global keyring credential that can be applied # to multiple environments else: global_identifier = re.match(rex, value).group(2) username = utils.assemble_username('global', global_identifier) return (username, password_get(username))
Dissects a keyring credential lookup string from the supernova config file and returns the username/password combo
def password_get(username=None): password = keyring.get_password('supernova', username) if password is None: split_username = tuple(username.split(':')) msg = ("Couldn't find a credential for {0}:{1}. You need to set one " "with: supernova-keyring -s {0} {1}").format(*split_username) raise LookupError(msg) else: return password.encode('ascii')
Retrieves a password from the keychain based on the environment and configuration parameter pair. If this fails, None is returned.
def set_user_password(environment, parameter, password): username = '%s:%s' % (environment, parameter) return password_set(username, password)
Sets a user's password in the keyring storage
def password_set(username=None, password=None): result = keyring.set_password('supernova', username, password) # NOTE: keyring returns None when the storage is successful. That's weird. if result is None: return True else: return False
Stores a password in a keychain for a particular environment and configuration parameter pair.
def prep_shell_environment(nova_env, nova_creds): new_env = {} for key, value in prep_nova_creds(nova_env, nova_creds): if type(value) == six.binary_type: value = value.decode() new_env[key] = value return new_env
Appends new variables to the current shell environment temporarily.
def prep_nova_creds(nova_env, nova_creds): try: raw_creds = dict(nova_creds.get('DEFAULT', {}), **nova_creds[nova_env]) except KeyError: msg = "{0} was not found in your supernova configuration "\ "file".format(nova_env) raise KeyError(msg) proxy_re = re.compile(r"(^http_proxy|^https_proxy)") creds = [] for param, value in raw_creds.items(): if not proxy_re.match(param): param = param.upper() if not hasattr(value, 'startswith'): continue # Get values from the keyring if we find a USE_KEYRING constant if value.startswith("USE_KEYRING"): username, credential = pull_env_credential(nova_env, param, value) else: credential = value.strip("\"'") # Make sure we got something valid from the configuration file or # the keyring if not credential: raise LookupError("No matching credentials found in keyring") creds.append((param, credential)) return creds
Finds relevant config options in the supernova config and cleans them up for novaclient.
def load_config(config_file_override=False): supernova_config = get_config_file(config_file_override) supernova_config_dir = get_config_directory(config_file_override) if not supernova_config and not supernova_config_dir: raise Exception("Couldn't find a valid configuration file to parse") nova_creds = ConfigObj() # Can we successfully read the configuration file? if supernova_config: try: nova_creds.merge(ConfigObj(supernova_config)) except: raise("There's an error in your configuration file") if supernova_config_dir: for dir_file in os.listdir(supernova_config_dir): full_path = ''.join((supernova_config_dir, dir_file)) try: nova_creds.merge(ConfigObj(full_path)) except: msg = "Skipping '%s', Parsing Error.".format(full_path) print(msg) create_dynamic_configs(nova_creds) return nova_creds
Pulls the supernova configuration file and reads it
def get_config_file(override_files=False): if override_files: if isinstance(override_files, six.string_types): possible_configs = [override_files] else: raise Exception("Config file override must be a string") else: xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \ os.path.expanduser('~/.config') possible_configs = [os.path.join(xdg_config_home, "supernova"), os.path.expanduser("~/.supernova"), ".supernova"] for config_file in reversed(possible_configs): if os.path.isfile(config_file): return config_file return False
Looks for the most specific configuration file available. An override can be provided as a string if needed.
def get_config_directory(override_files=False): if override_files: possible_dirs = [override_files] else: xdg_config_home = os.environ.get('XDG_CONFIG_HOME') or \ os.path.expanduser('~/.config') possible_dirs = [os.path.join(xdg_config_home, "supernova.d/"), os.path.expanduser("~/.supernova.d/"), ".supernova.d/"] for config_dir in reversed(possible_dirs): if os.path.isdir(config_dir): return config_dir return False
Looks for the most specific configuration directory possible, in order to load individual configuration files.
def execute_executable(nova_args, env_vars): process = subprocess.Popen(nova_args, stdout=sys.stdout, stderr=subprocess.PIPE, env=env_vars) process.wait() return process
Executes the executable given by the user. Hey, I know this method has a silly name, but I write the code here and I'm silly.
def check_for_debug(supernova_args, nova_args): # Heat requires special handling for debug arguments if supernova_args['debug'] and supernova_args['executable'] == 'heat': nova_args.insert(0, '-d ') elif supernova_args['debug']: nova_args.insert(0, '--debug ') return nova_args
If the user wanted to run the executable with debugging enabled, we need to apply the correct arguments to the executable. Heat is a corner case since it uses -d instead of --debug.
def check_for_executable(supernova_args, env_vars): exe = supernova_args.get('executable', 'default') if exe != 'default': return supernova_args if 'OS_EXECUTABLE' in env_vars.keys(): supernova_args['executable'] = env_vars['OS_EXECUTABLE'] return supernova_args supernova_args['executable'] = 'nova' return supernova_args
It's possible that a user might set their custom executable via an environment variable. If we detect one, we should add it to supernova's arguments ONLY IF an executable wasn't set on the command line. The command line executable must take priority.
def check_for_bypass_url(raw_creds, nova_args): if 'BYPASS_URL' in raw_creds.keys(): bypass_args = ['--bypass-url', raw_creds['BYPASS_URL']] nova_args = bypass_args + nova_args return nova_args
Return a list of extra args that need to be passed on cmdline to nova.
def handle_stderr(stderr_pipe): stderr_output = stderr_pipe.read() if len(stderr_output) > 0: click.secho("\n__ Error Output {0}".format('_'*62), fg='white', bold=True) click.echo(stderr_output) return True
Takes stderr from the command's output and displays it AFTER the stdout is printed by run_command().
def run_command(nova_creds, nova_args, supernova_args): nova_env = supernova_args['nova_env'] # (gtmanfred) make a copy of this object. If we don't copy it, the insert # to 0 happens multiple times because it is the same object in memory. nova_args = copy.copy(nova_args) # Get the environment variables ready env_vars = os.environ.copy() env_vars.update(credentials.prep_shell_environment(nova_env, nova_creds)) # BYPASS_URL is a weird one, so we need to send it as an argument, # not an environment variable. nova_args = check_for_bypass_url(nova_creds[nova_env], nova_args) # Check for OS_EXECUTABLE supernova_args = check_for_executable(supernova_args, env_vars) # Check for a debug override nova_args = check_for_debug(supernova_args, nova_args) # Print a small message for the user (very helpful for groups) msg = "Running %s against %s..." % (supernova_args.get('executable'), nova_env) if not supernova_args.get('quiet'): click.echo("[%s] %s " % (click.style('SUPERNOVA', fg='green'), msg)) # Call executable and connect stdout to the current terminal # so that any unicode characters from the executable's list will be # displayed appropriately. # # In other news, I hate how python 2.6 does unicode. nova_args.insert(0, supernova_args['executable']) nova_args = [nova_arg.strip() for nova_arg in nova_args] process = execute_executable(nova_args, env_vars) # If the user asked us to be quiet, then let's not print stderr if not supernova_args.get('quiet'): handle_stderr(process.stderr) return process.returncode
Sets the environment variables for the executable, runs the executable, and handles the output.
def check_environment_presets(): presets = [x for x in os.environ.copy().keys() if x.startswith('NOVA_') or x.startswith('OS_')] if len(presets) < 1: return True else: click.echo("_" * 80) click.echo("*WARNING* Found existing environment variables that may " "cause conflicts:") for preset in presets: click.echo(" - %s" % preset) click.echo("_" * 80) return False
Checks for environment variables that can cause problems with supernova
def get_envs_in_group(group_name, nova_creds): envs = [] for key, value in nova_creds.items(): supernova_groups = value.get('SUPERNOVA_GROUP', []) if hasattr(supernova_groups, 'startswith'): supernova_groups = [supernova_groups] if group_name in supernova_groups: envs.append(key) elif group_name == 'all': envs.append(key) return envs
Takes a group_name and finds any environments that have a SUPERNOVA_GROUP configuration line that matches the group_name.
def is_valid_group(group_name, nova_creds): valid_groups = [] for key, value in nova_creds.items(): supernova_groups = value.get('SUPERNOVA_GROUP', []) if hasattr(supernova_groups, 'startswith'): supernova_groups = [supernova_groups] valid_groups.extend(supernova_groups) valid_groups.append('all') if group_name in valid_groups: return True else: return False
Checks to see if the configuration file contains a SUPERNOVA_GROUP configuration option.
def rm_prefix(name): if name.startswith('nova_'): return name[5:] elif name.startswith('novaclient_'): return name[11:] elif name.startswith('os_'): return name[3:] else: return name
Removes nova_ os_ novaclient_ prefix from string.
def __pad(strdata): if request.args.get('callback'): return "%s(%s);" % (request.args.get('callback'), strdata) else: return strdata
Pads `strdata` with a Request's callback argument, if specified, or does nothing.
def __dumps(*args, **kwargs): indent = None if (current_app.config.get('JSONIFY_PRETTYPRINT_REGULAR', False) and not request.is_xhr): indent = 2 return json.dumps(args[0] if len(args) is 1 else dict(*args, **kwargs), indent=indent)
Serializes `args` and `kwargs` as JSON. Supports serializing an array as the top-level object, if it is the only argument.
def jsonpify(*args, **kwargs): return current_app.response_class(__pad(__dumps(*args, **kwargs)), mimetype=__mimetype())
Creates a :class:`~flask.Response` with the JSON or JSON-P representation of the given arguments with an `application/json` or `application/javascript` mimetype, respectively. The arguments to this function are the same as to the :class:`dict` constructor, but also accept an array. If a `callback` is specified in the request arguments, the response is JSON-Padded. Example usage:: @app.route('/_get_current_user') def get_current_user(): return jsonify(username=g.user.username, email=g.user.email, id=g.user.id) GET /_get_current_user: This will send a JSON response like this to the browser:: { "username": "admin", "email": "admin@localhost", "id": 42 } or, if a callback is specified, GET /_get_current_user?callback=displayUsers Will result in a JSON response like this to the browser:: displayUsers({ "username": "admin", "email": "admin@localhost", "id": 42 }); This requires Python 2.6 or an installed version of simplejson. For security reasons only objects are supported toplevel. For more information about this, have a look at :ref:`json-security`. .. versionadded:: 0.2
def update_type_lookups(self): self.type_to_typestring = dict(zip(self.types, self.python_type_strings)) self.typestring_to_type = dict(zip(self.python_type_strings, self.types))
Update type and typestring lookup dicts. Must be called once the ``types`` and ``python_type_strings`` attributes are set so that ``type_to_typestring`` and ``typestring_to_type`` are constructed. .. versionadded:: 0.2 Notes ----- Subclasses need to call this function explicitly.
def get_type_string(self, data, type_string): if type_string is not None: return type_string else: tp = type(data) try: return self.type_to_typestring[tp] except KeyError: return self.type_to_typestring[tp.__module__ + '.' + tp.__name__]
Gets type string. Finds the type string for 'data' contained in ``python_type_strings`` using its ``type``. Non-``None`` 'type_string` overrides whatever type string is looked up. The override makes it easier for subclasses to convert something that the parent marshaller can write to disk but still put the right type string in place). Parameters ---------- data : type to be marshalled The Python object that is being written to disk. type_string : str or None If it is a ``str``, it overrides any looked up type string. ``None`` means don't override. Returns ------- str The type string associated with 'data'. Will be 'type_string' if it is not ``None``. Notes ----- Subclasses probably do not need to override this method.
def write(self, f, grp, name, data, type_string, options): raise NotImplementedError('Can''t write data type: ' + str(type(data)))
Writes an object's metadata to file. Writes the Python object 'data' to 'name' in h5py.Group 'grp'. .. versionchanged:: 0.2 Arguements changed. Parameters ---------- f : h5py.File The HDF5 file handle that is open. grp : h5py.Group or h5py.File The parent HDF5 Group (or File if at '/') that contains the object with the specified name. name : str Name of the object. data The object to write to file. type_string : str or None The type string for `data`. If it is ``None``, one will have to be gotten by ``get_type_string``. options : hdf5storage.core.Options hdf5storage options object. Raises ------ NotImplementedError If writing 'data' to file is currently not supported. hdf5storage.exceptions.TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `options.action_for_matlab_incompatible` is set to ``'error'``. Notes ----- Must be overridden in a subclass because a ``NotImplementedError`` is thrown immediately. See Also -------- hdf5storage.utilities.write_data
def _replace_fun_unescape(m): slsh = b'\\'.decode('ascii') s = m.group(0) count = s.count(slsh) if count % 2 == 0: return s else: if sys.hexversion >= 0x03000000: c = chr(int(s[(count + 1):], base=16)) else: c = unichr(int(s[(count + 1):], base=16)) return slsh * (count - 1) + c
Decode single hex/unicode escapes found in regex matches. Supports single hex/unicode escapes of the form ``'\\xYY'``, ``'\\uYYYY'``, and ``'\\UYYYYYYYY'`` where Y is a hex digit. Only decodes if there is an odd number of backslashes. .. versionadded:: 0.2 Parameters ---------- m : regex match Returns ------- c : str The unescaped character.
def escape_path(pth): if isinstance(pth, bytes): pth = pth.decode('utf-8') if sys.hexversion >= 0x03000000: if not isinstance(pth, str): raise TypeError('pth must be str or bytes.') match = _find_dots_re.match(pth) if match is None: prefix = '' s = pth else: prefix = '\\x2e' * match.end() s = pth[match.end():] else: if not isinstance(pth, unicode): raise TypeError('pth must be unicode or str.') match = _find_dots_re.match(pth) if match is None: prefix = unicode('') s = pth else: prefix = unicode('\\x2e') * match.end() s = pth[match.end():] return prefix + _find_fslashnull_re.sub(_replace_fun_escape, s)
Hex/unicode escapes a path. Escapes a path so that it can be represented faithfully in an HDF5 file without changing directories. This means that leading ``'.'`` must be escaped. ``'/'`` and null must be escaped to. Backslashes are escaped as double backslashes. Other escaped characters are replaced with ``'\\xYY'``, ``'\\uYYYY', or ``'\\UYYYYYYYY'`` where Y are hex digits depending on the unicode numerical value of the character. for ``'.'``, both slashes, and null; this will be the former (``'\\xYY'``). .. versionadded:: 0.2 Parameters ---------- pth : str or bytes The path to escape. Returns ------- epth : str The escaped path. Raises ------ TypeError If `pth` is not the right type. See Also -------- unescape_path
def unescape_path(pth): if isinstance(pth, bytes): pth = pth.decode('utf-8') if sys.hexversion >= 0x03000000: if not isinstance(pth, str): raise TypeError('pth must be str or bytes.') else: if not isinstance(pth, unicode): raise TypeError('pth must be unicode or str.') # Look for invalid escapes. if _find_invalid_escape_re.search(pth) is not None: raise ValueError('Invalid escape found.') # Do all hex/unicode escapes. s = _find_escapes_re.sub(_replace_fun_unescape, pth) # Do all double backslash escapes. return s.replace(b'\\\\'.decode('ascii'), b'\\'.decode('ascii'))
Hex/unicode unescapes a path. Unescapes a path. Valid escapeds are ``'\\xYY'``, ``'\\uYYYY', or ``'\\UYYYYYYYY'`` where Y are hex digits giving the character's unicode numerical value and double backslashes which are the escape for single backslashes. .. versionadded:: 0.2 Parameters ---------- pth : str The path to unescape. Returns ------- unpth : str The unescaped path. Raises ------ TypeError If `pth` is not the right type. ValueError If an invalid escape is found. See Also -------- escape_path
def does_dtype_have_a_zero_shape(dt): components = [dt] while 0 != len(components): c = components.pop() if 0 in c.shape: return True if c.names is not None: components.extend([v[0] for v in c.fields.values()]) if c.base != c: components.append(c.base) return False
Determine whether a dtype (or its fields) have zero shape. Determines whether the given ``numpy.dtype`` has a shape with a zero element or if one of its fields does, or if one of its fields' fields does, and so on recursively. The following dtypes do not have zero shape. * ``'uint8'`` * ``[('a', 'int32'), ('blah', 'float16', (3, 3))]`` * ``[('a', [('b', 'complex64')], (2, 1, 3))]`` But the following do * ``('uint8', (1, 0))`` * ``[('a', 'int32'), ('blah', 'float16', (3, 0))]`` * ``[('a', [('b', 'complex64')], (2, 0, 3))]`` Parameters ---------- dt : numpy.dtype The dtype to check. Returns ------- yesno : bool Whether `dt` or one of its fields has a shape with at least one element that is zero. Raises ------ TypeError If `dt` is not a ``numpy.dtype``.
def write_data(f, grp, name, data, type_string, options): # Get the marshaller for type(data). The required modules should be # here and imported. tp = type(data) m, has_modules = \ options.marshaller_collection.get_marshaller_for_type(tp) # If a marshaller was found and we have the required modules, use it # to write the data. Otherwise, return an error. If we get something # other than None back, then we must recurse through the # entries. Also, we must set the H5PATH attribute to be the path to # the containing group. if m is not None and has_modules: m.write(f, grp, name, data, type_string, options) else: raise NotImplementedError('Can''t write data type: '+str(tp))
Writes a piece of data into an open HDF5 file. Low level function to store a Python type (`data`) into the specified Group. Parameters ---------- f : h5py.File The open HDF5 file. grp : h5py.Group or h5py.File The Group to place the data in. name : str The name to write the data to. data : any The data to write. type_string : str or None The type string of the data, or ``None`` to deduce automatically. options : hdf5storage.core.Options The options to use when writing. Raises ------ NotImplementedError If writing `data` is not supported. TypeNotMatlabCompatibleError If writing a type not compatible with MATLAB and `options.action_for_matlab_incompatible` is set to ``'error'``. See Also -------- hdf5storage.write : Higher level version. read_data hdf5storage.Options
def read_object_array(f, data, options): # Go through all the elements of data and read them using their # references, and the putting the output in new object array. data_derefed = np.zeros(shape=data.shape, dtype='object') for index, x in np.ndenumerate(data): data_derefed[index] = read_data(f, None, None, options, dsetgrp=f[x]) return data_derefed
Reads an array of objects recursively. Read the elements of the given HDF5 Reference array recursively in the and constructs a ``numpy.object_`` array from its elements, which is returned. Parameters ---------- f : h5py.File The HDF5 file handle that is open. data : numpy.ndarray of h5py.Reference The array of HDF5 References to read and make an object array from. options : hdf5storage.core.Options hdf5storage options object. Raises ------ NotImplementedError If reading the object from file is currently not supported. Returns ------- obj_array : numpy.ndarray of numpy.object\_ The Python object array containing the items pointed to by `data`. See Also -------- write_object_array hdf5storage.Options.group_for_references h5py.Reference
def next_unused_name_in_group(grp, length): # While # # ltrs = string.ascii_letters + string.digits # name = ''.join([random.choice(ltrs) for i in range(length)]) # # seems intuitive, its performance is abysmal compared to # # '%0{0}x'.format(length) % random.getrandbits(length * 4) # # The difference is a factor of 20. Idea from # # https://stackoverflow.com/questions/2782229/most-lightweight-way- # to-create-a-random-string-and-a-random-hexadecimal-number/ # 35161595#35161595 fmt = '%0{0}x'.format(length) name = fmt % random.getrandbits(length * 4) while name in grp: name = fmt % random.getrandbits(length * 4) return name
Gives a name that isn't used in a Group. Generates a name of the desired length that is not a Dataset or Group in the given group. Note, if length is not large enough and `grp` is full enough, there may be no available names meaning that this function will hang. Parameters ---------- grp : h5py.Group or h5py.File The HDF5 Group (or File if at '/') to generate an unused name in. length : int Number of characters the name should be. Returns ------- name : str A name that isn't already an existing Dataset or Group in `grp`.
def convert_numpy_str_to_uint16(data): # An empty string should be an empty uint16 if data.nbytes == 0: return np.uint16([]) # We need to use the UTF-16 codec for our endianness. Using the # right one means we don't have to worry about removing the BOM. if sys.byteorder == 'little': codec = 'UTF-16LE' else: codec = 'UTF-16BE' # numpy.char.encode can do the conversion element wise. Then, we # just have convert to uin16 with the appropriate dimensions. The # dimensions are gotten from the shape of the converted data with # the number of column increased by the number of words (pair of # bytes) in the strings. cdata = np.char.encode(np.atleast_1d(data), codec) shape = list(cdata.shape) shape[-1] *= (cdata.dtype.itemsize // 2) return np.ndarray(shape=shape, dtype='uint16', buffer=cdata.tostring())
Converts a numpy.unicode\_ to UTF-16 in numpy.uint16 form. Convert a ``numpy.unicode_`` or an array of them (they are UTF-32 strings) to UTF-16 in the equivalent array of ``numpy.uint16``. The conversion will throw an exception if any characters cannot be converted to UTF-16. Strings are expanded along rows (across columns) so a 2x3x4 array of 10 element strings will get turned into a 2x30x4 array of uint16's if every UTF-32 character converts easily to a UTF-16 singlet, as opposed to a UTF-16 doublet. Parameters ---------- data : numpy.unicode\_ or numpy.ndarray of numpy.unicode\_ The string or array of them to convert. Returns ------- array : numpy.ndarray of numpy.uint16 The result of the conversion. Raises ------ UnicodeEncodeError If a UTF-32 character has no UTF-16 representation. See Also -------- convert_numpy_str_to_uint32 convert_to_numpy_str
def convert_numpy_str_to_uint32(data): if data.nbytes == 0: # An empty string should be an empty uint32. return np.uint32([]) else: # We need to calculate the new shape from the current shape, # which will have to be expanded along the rows to fit all the # characters (the dtype.itemsize gets the number of bytes in # each string, which is just 4 times the number of # characters. Then it is a mstter of getting a view of the # string (in flattened form so that it is contiguous) as uint32 # and then reshaping it. shape = list(np.atleast_1d(data).shape) shape[-1] *= data.dtype.itemsize//4 return data.flatten().view(np.uint32).reshape(tuple(shape))
Converts a numpy.unicode\_ to its numpy.uint32 representation. Convert a ``numpy.unicode_`` or an array of them (they are UTF-32 strings) into the equivalent array of ``numpy.uint32`` that is byte for byte identical. Strings are expanded along rows (across columns) so a 2x3x4 array of 10 element strings will get turned into a 2x30x4 array of uint32's. Parameters ---------- data : numpy.unicode\_ or numpy.ndarray of numpy.unicode\_ The string or array of them to convert. Returns ------- array : numpy.ndarray of numpy.uint32 The result of the conversion. See Also -------- convert_numpy_str_to_uint16 convert_to_numpy_str
def encode_complex(data, complex_names): # Grab the dtype name, and convert it to the right non-complex type # if it isn't already one. dtype_name = data.dtype.name if dtype_name[0:7] == 'complex': dtype_name = 'float' + str(int(float(dtype_name[7:])/2)) # Create the new version of the data with the right field names for # the real and complex parts. This is easy to do with putting the # right detype in the view function. dt = np.dtype([(complex_names[0], dtype_name), (complex_names[1], dtype_name)]) return data.view(dt).copy()
Encodes complex data to having arbitrary complex field names. Encodes complex `data` to have the real and imaginary field names given in `complex_numbers`. This is needed because the field names have to be set so that it can be written to an HDF5 file with the right field names (HDF5 doesn't have a native complex type, so H5T_COMPOUND have to be used). Parameters ---------- data : arraylike The data to encode as a complex type with the desired real and imaginary part field names. complex_names : tuple of 2 str ``tuple`` of the names to use (in order) for the real and imaginary fields. Returns ------- d : encoded data `data` encoded into having the specified field names for the real and imaginary parts. See Also -------- decode_complex
def convert_attribute_to_string(value): if value is None: return value elif (sys.hexversion >= 0x03000000 and isinstance(value, str)) \ or (sys.hexversion < 0x03000000 \ and isinstance(value, unicode)): return value elif isinstance(value, bytes): return value.decode() elif isinstance(value, np.unicode_): return str(value) elif isinstance(value, np.bytes_): return value.decode() else: return None
Convert an attribute value to a string. Converts the attribute value to a string if possible (get ``None`` if isn't a string type). .. versionadded:: 0.2 Parameters ---------- value : The Attribute value. Returns ------- s : str or None The ``str`` value of the attribute if the conversion is possible, or ``None`` if not.
def set_attribute(target, name, value): try: target.attrs.modify(name, value) except: target.attrs.create(name, value)
Sets an attribute on a Dataset or Group. If the attribute `name` doesn't exist yet, it is created. If it already exists, it is overwritten if it differs from `value`. Notes ----- ``set_attributes_all`` is the fastest way to set and delete Attributes in bulk. Parameters ---------- target : Dataset or Group Dataset or Group to set the attribute of. name : str Name of the attribute to set. value : numpy type other than numpy.unicode\_ Value to set the attribute to. See Also -------- set_attributes_all
def set_attribute_string(target, name, value): set_attribute(target, name, np.bytes_(value))
Sets an attribute to a string on a Dataset or Group. If the attribute `name` doesn't exist yet, it is created. If it already exists, it is overwritten if it differs from `value`. Notes ----- ``set_attributes_all`` is the fastest way to set and delete Attributes in bulk. Parameters ---------- target : Dataset or Group Dataset or Group to set the string attribute of. name : str Name of the attribute to set. value : string Value to set the attribute to. Can be any sort of string type that will convert to a ``numpy.bytes_`` See Also -------- set_attributes_all
def set_attribute_string_array(target, name, string_list): s_list = [convert_to_str(s) for s in string_list] if sys.hexversion >= 0x03000000: target.attrs.create(name, s_list, dtype=h5py.special_dtype(vlen=str)) else: target.attrs.create(name, s_list, dtype=h5py.special_dtype(vlen=unicode))
Sets an attribute to an array of string on a Dataset or Group. If the attribute `name` doesn't exist yet, it is created. If it already exists, it is overwritten with the list of string `string_list` (they will be vlen strings). Notes ----- ``set_attributes_all`` is the fastest way to set and delete Attributes in bulk. Parameters ---------- target : Dataset or Group Dataset or Group to set the string array attribute of. name : str Name of the attribute to set. string_list : list of str List of strings to set the attribute to. Strings must be ``str`` See Also -------- set_attributes_all
def find_thirdparty_marshaller_plugins(): all_plugins = tuple(pkg_resources.iter_entry_points( 'hdf5storage.marshallers.plugins')) return {ver: {p.module_name: p for p in all_plugins if p.name == ver} for ver in supported_marshaller_api_versions()}
Find, but don't load, all third party marshaller plugins. Third party marshaller plugins declare the entry point ``'hdf5storage.marshallers.plugins'`` with the name being the Marshaller API version and the target being a function that returns a ``tuple`` or ``list`` of all the marshallers provided by that plugin when given the hdf5storage version (``str``) as its only argument. .. versionadded:: 0.2 Returns ------- plugins : dict The marshaller obtaining entry points from third party plugins. The keys are the Marshaller API versions (``str``) and the values are ``dict`` of the entry points, with the module names as the keys (``str``) and the values being the entry points (``pkg_resources.EntryPoint``). See Also -------- supported_marshaller_api_versions
def _import_marshaller_modules(self, m): try: for name in m.required_modules: if name not in sys.modules: if _has_importlib: importlib.import_module(name) else: __import__(name) except ImportError: return False except: raise else: return True
Imports the modules required by the marshaller. Parameters ---------- m : marshaller The marshaller to load the modules for. Returns ------- success : bool Whether the modules `m` requires could be imported successfully or not.
def add_marshaller(self, marshallers): if not isinstance(marshallers, collections.Iterable): marshallers = [marshallers] for m in marshallers: if not isinstance(m, Marshallers.TypeMarshaller): raise TypeError('Each marshaller must inherit from ' 'hdf5storage.Marshallers.' 'TypeMarshaller.') if m not in self._user_marshallers: self._user_marshallers.append(m) self._update_marshallers()
Add a marshaller/s to the user provided list. Adds a marshaller or a list of them to the user provided set of marshallers. Note that the builtin marshallers take priority when choosing the right marshaller. .. versionchanged:: 0.2 All marshallers must now inherit from ``hdf5storage.Marshallers.TypeMarshaller``. .. versionchanged:: 0.2 Builtin marshallers take priority over user provided ones. Parameters ---------- marshallers : marshaller or iterable of marshallers The user marshaller/s to add to the user provided collection. Must inherit from ``hdf5storage.Marshallers.TypeMarshaller``. Raises ------ TypeError If one of `marshallers` is the wrong type. See Also -------- hdf5storage.Marshallers.TypeMarshaller
def remove_marshaller(self, marshallers): if not isinstance(marshallers, collections.Iterable): marshallers = [marshallers] for m in marshallers: if m in self._user_marshallers: self._user_marshallers.remove(m) self._update_marshallers()
Removes a marshaller/s from the user provided list. Removes a marshaller or a list of them from the user provided set of marshallers. Parameters ---------- marshallers : marshaller or list of marshallers The user marshaller/s to from the user provided collection.
def get_marshaller_for_type(self, tp): if not isinstance(tp, str): tp = tp.__module__ + '.' + tp.__name__ if tp in self._types: index = self._types[tp] else: return None, False m = self._marshallers[index] if self._imported_required_modules[index]: return m, True if not self._has_required_modules[index]: return m, False success = self._import_marshaller_modules(m) self._has_required_modules[index] = success self._imported_required_modules[index] = success return m, success
Gets the appropriate marshaller for a type. Retrieves the marshaller, if any, that can be used to read/write a Python object with type 'tp'. The modules it requires, if available, will be loaded. Parameters ---------- tp : type or str Python object ``type`` (which would be the class reference) or its string representation like ``'collections.deque'``. Returns ------- marshaller : marshaller or None The marshaller that can read/write the type to file. ``None`` if no appropriate marshaller is found. has_required_modules : bool Whether the required modules for reading the type are present or not. See Also -------- hdf5storage.Marshallers.TypeMarshaller.types
def get_marshaller_for_type_string(self, type_string): if type_string in self._type_strings: index = self._type_strings[type_string] m = self._marshallers[index] if self._imported_required_modules[index]: return m, True if not self._has_required_modules[index]: return m, False success = self._import_marshaller_modules(m) self._has_required_modules[index] = success self._imported_required_modules[index] = success return m, success else: return None, False
Gets the appropriate marshaller for a type string. Retrieves the marshaller, if any, that can be used to read/write a Python object with the given type string. The modules it requires, if available, will be loaded. Parameters ---------- type_string : str Type string for a Python object. Returns ------- marshaller : marshaller or None The marshaller that can read/write the type to file. ``None`` if no appropriate marshaller is found. has_required_modules : bool Whether the required modules for reading the type are present or not. See Also -------- hdf5storage.Marshallers.TypeMarshaller.python_type_strings
def get_marshaller_for_matlab_class(self, matlab_class): if matlab_class in self._matlab_classes: index = self._matlab_classes[matlab_class] m = self._marshallers[index] if self._imported_required_modules[index]: return m, True if not self._has_required_modules[index]: return m, False success = self._import_marshaller_modules(m) self._has_required_modules[index] = success self._imported_required_modules[index] = success return m, success else: return None, False
Gets the appropriate marshaller for a MATLAB class string. Retrieves the marshaller, if any, that can be used to read/write a Python object associated with the given MATLAB class string. The modules it requires, if available, will be loaded. Parameters ---------- matlab_class : str MATLAB class string for a Python object. Returns ------- marshaller : marshaller or None The marshaller that can read/write the type to file. ``None`` if no appropriate marshaller is found. has_required_modules : bool Whether the required modules for reading the type are present or not. See Also -------- hdf5storage.Marshallers.TypeMarshaller.python_type_strings
def build_cycle_graph(num_nodes): graph = UndirectedGraph() if num_nodes > 0: first_node = graph.new_node() if num_nodes > 1: previous_node = first_node for _ in range(num_nodes - 1): new_node = graph.new_node() graph.new_edge(previous_node, new_node) previous_node = new_node graph.new_edge(previous_node, first_node) return graph
Builds a cycle graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/CycleGraph.html
def build_wheel_graph(num_nodes): # The easiest way to build a wheel graph is to build # C_n-1 and then add a hub node and spoke edges graph = build_cycle_graph(num_nodes - 1) cycle_graph_vertices = graph.get_all_node_ids() node_id = graph.new_node() for cycle_node in cycle_graph_vertices: graph.new_edge(node_id, cycle_node) return graph
Builds a wheel graph with the specified number of nodes. Ref: http://mathworld.wolfram.com/WheelGraph.html
def build_k5_graph(): graph = UndirectedGraph() # K5 has 5 nodes for _ in range(5): graph.new_node() # K5 has 10 edges # --Edge: a graph.new_edge(1, 2) # --Edge: b graph.new_edge(2, 3) # --Edge: c graph.new_edge(3, 4) # --Edge: d graph.new_edge(4, 5) # --Edge: e graph.new_edge(5, 1) # --Edge: f graph.new_edge(1, 3) # --Edge: g graph.new_edge(1, 4) # --Edge: h graph.new_edge(2, 4) # --Edge: i graph.new_edge(2, 5) # --Edge: j graph.new_edge(3, 5) return graph
Makes a new K5 graph. Ref: http://mathworld.wolfram.com/Pentatope.html
def build_k33_graph(): graph = UndirectedGraph() # K3,3 has 6 nodes for _ in range(1, 7): graph.new_node() # K3,3 has 9 edges # --Edge: a graph.new_edge(1, 4) # --Edge: b graph.new_edge(1, 5) # --Edge: c graph.new_edge(1, 6) # --Edge: d graph.new_edge(2, 4) # --Edge: e graph.new_edge(2, 5) # --Edge: f graph.new_edge(2, 6) # --Edge: g graph.new_edge(3, 4) # --Edge: h graph.new_edge(3, 5) # --Edge: i graph.new_edge(3, 6) return graph
Makes a new K3,3 graph. Ref: http://mathworld.wolfram.com/UtilityGraph.html
def build_groetzch_graph(): # Because the graph is so complicated, we want to # build it via adjacency matrix specification # -- Initialize the matrix to all zeros adj = [[0 for _ in range(11)] for _ in range(11)] # -- Add individual edge connections row_connections = [] row_connections.append( (1,2,7,10) ) row_connections.append( (0,3,6,9) ) row_connections.append( (0,4,6,8) ) row_connections.append( (1,4,8,10) ) row_connections.append( (2,3,7,9) ) row_connections.append( (6,7,8,9,10) ) row_connections.append( (1,2,5) ) row_connections.append( (0,4,5) ) row_connections.append( (2,3,5) ) row_connections.append( (1,4,5) ) row_connections.append( (0,3,5) ) for j, tpl in enumerate(row_connections): for i in tpl: adj[j][i] = 1 adj[i][j] = 1 # Debug print the adjacency matrix #for row in adj: # print row graph, _ = create_graph_from_adjacency_matrix(adj) return graph
Makes a new Groetzsch graph. Ref: http://mathworld.wolfram.com/GroetzschGraph.html
def build_franklin_graph(): # The easiest way to build the Franklin graph is to start # with C12 and add the additional 6 edges graph = build_cycle_graph(12) edge_tpls = [ (1,8), (2,7), (3,10), (4,9), (5,12), (6,11) ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
Makes a new Franklin graph. Ref: http://mathworld.wolfram.com/FranklinGraph.html
def build_chvatal_graph(): # The easiest way to build the Chvatal graph is to start # with C12 and add the additional 12 edges graph = build_cycle_graph(12) edge_tpls = [ (1,7), (1,9), (2,5), (2,11), (3,7), (3,9), (4,10), (4,12), (5,8), (6,10), (6,12), (8,11), ] for i, j in edge_tpls: graph.new_edge(i, j) return graph
Makes a new Chvatal graph. Ref: http://mathworld.wolfram.com/ChvatalGraph.html
def new_node(self): node_id = self.generate_node_id() node = {'id': node_id, 'edges': [], 'data': {} } self.nodes[node_id] = node self._num_nodes += 1 return node_id
Adds a new, blank node to the graph. Returns the node id of the new node.
def new_edge(self, node_a, node_b, cost=1): # Verify that both nodes exist in the graph try: self.nodes[node_a] except KeyError: raise NonexistentNodeError(node_a) try: self.nodes[node_b] except KeyError: raise NonexistentNodeError(node_b) # Create the new edge edge_id = self.generate_edge_id() edge = {'id': edge_id, 'vertices': (node_a, node_b), 'cost': cost, 'data': {} } self.edges[edge_id] = edge self.nodes[node_a]['edges'].append(edge_id) self._num_edges += 1 return edge_id
Adds a new edge from node_a to node_b that has a cost. Returns the edge id of the new edge.
def neighbors(self, node_id): node = self.get_node(node_id) return [self.get_edge(edge_id)['vertices'][1] for edge_id in node['edges']]
Find all the nodes where there is an edge from the specified node to that node. Returns a list of node ids.
def adjacent(self, node_a, node_b): neighbors = self.neighbors(node_a) return node_b in neighbors
Determines whether there is an edge from node_a to node_b. Returns True if such an edge exists, otherwise returns False.
def edge_cost(self, node_a, node_b): cost = float('inf') node_object_a = self.get_node(node_a) for edge_id in node_object_a['edges']: edge = self.get_edge(edge_id) tpl = (node_a, node_b) if edge['vertices'] == tpl: cost = edge['cost'] break return cost
Returns the cost of moving between the edge that connects node_a to node_b. Returns +inf if no such edge exists.
def get_node(self, node_id): try: node_object = self.nodes[node_id] except KeyError: raise NonexistentNodeError(node_id) return node_object
Returns the node object identified by "node_id".
def get_edge(self, edge_id): try: edge_object = self.edges[edge_id] except KeyError: raise NonexistentEdgeError(edge_id) return edge_object
Returns the edge object identified by "edge_id".
def delete_edge_by_nodes(self, node_a, node_b): node = self.get_node(node_a) # Determine the edge ids edge_ids = [] for e_id in node['edges']: edge = self.get_edge(e_id) if edge['vertices'][1] == node_b: edge_ids.append(e_id) # Delete the edges for e in edge_ids: self.delete_edge_by_id(e)
Removes all the edges from node_a to node_b from the graph.
def delete_node(self, node_id): node = self.get_node(node_id) # Remove all edges from the node for e in node['edges']: self.delete_edge_by_id(e) # Remove all edges to the node edges = [edge_id for edge_id, edge in list(self.edges.items()) if edge['vertices'][1] == node_id] for e in edges: self.delete_edge_by_id(e) # Remove the node from the node list del self.nodes[node_id] self._num_nodes -= 1
Removes the node identified by node_id from the graph.
def move_edge_source(self, edge_id, node_a, node_b): # Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (node_b, edge['vertices'][1]) # Remove the edge from node_a node = self.get_node(node_a) node['edges'].remove(edge_id) # Add the edge to node_b node = self.get_node(node_b) node['edges'].append(edge_id)
Moves an edge originating from node_a so that it originates from node_b.
def move_edge_target(self, edge_id, node_a): # Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (edge['vertices'][0], node_a)
Moves an edge so that it targets node_a.
def get_edge_ids_by_node_ids(self, node_a, node_b): # Check if the nodes are adjacent if not self.adjacent(node_a, node_b): return [] # They're adjacent, so pull the list of edges from node_a and determine which ones point to node_b node = self.get_node(node_a) return [edge_id for edge_id in node['edges'] if self.get_edge(edge_id)['vertices'][1] == node_b]
Returns a list of edge ids connecting node_a to node_b.
def get_first_edge_id_by_node_ids(self, node_a, node_b): ret = self.get_edge_ids_by_node_ids(node_a, node_b) if not ret: return None else: return ret[0]
Returns the first (and possibly only) edge connecting node_a and node_b.
def find_biconnected_components(graph): list_of_components = [] # Run the algorithm on each of the connected components of the graph components = get_connected_components_as_subgraphs(graph) for component in components: # --Call the internal biconnnected components function to find # --the edge lists for this particular connected component edge_list = _internal_get_biconnected_components_edge_lists(component) list_of_components.extend(edge_list) return list_of_components
Finds all the biconnected components in a graph. Returns a list of lists, each containing the edges that form a biconnected component. Returns an empty list for an empty graph.
def find_biconnected_components_as_subgraphs(graph): list_of_graphs = [] list_of_components = find_biconnected_components(graph) for edge_list in list_of_components: subgraph = get_subgraph_from_edge_list(graph, edge_list) list_of_graphs.append(subgraph) return list_of_graphs
Finds the biconnected components and returns them as subgraphs.
def find_articulation_vertices(graph): articulation_vertices = [] all_nodes = graph.get_all_node_ids() if len(all_nodes) == 0: return articulation_vertices # Run the algorithm on each of the connected components of the graph components = get_connected_components_as_subgraphs(graph) for component in components: # --Call the internal articulation vertices function to find # --the node list for this particular connected component vertex_list = _internal_get_cut_vertex_list(component) articulation_vertices.extend(vertex_list) return articulation_vertices
Finds all of the articulation vertices within a graph. Returns a list of all articulation vertices within the graph. Returns an empty list for an empty graph.
def output_component(graph, edge_stack, u, v): edge_list = [] while len(edge_stack) > 0: edge_id = edge_stack.popleft() edge_list.append(edge_id) edge = graph.get_edge(edge_id) tpl_a = (u, v) tpl_b = (v, u) if tpl_a == edge['vertices'] or tpl_b == edge['vertices']: break return edge_list
Helper function to pop edges off the stack and produce a list of them.
def depth_first_search(graph, root_node=None): ordering, parent_lookup, children_lookup = depth_first_search_with_parent_data(graph, root_node) return ordering
Searches through the tree in a breadth-first fashion. If root_node is None, an arbitrary node will be used as the root. If root_node is not None, it will be used as the root for the search tree. Returns a list of nodes, in the order that they were reached.
def depth_first_search_with_parent_data(graph, root_node = None, adjacency_lists = None): ordering = [] parent_lookup = {} children_lookup = defaultdict(lambda: []) all_nodes = graph.get_all_node_ids() if not all_nodes: return ordering, parent_lookup, children_lookup stack = deque() discovered = defaultdict(lambda: False) unvisited_nodes = set(all_nodes) if root_node is None: root_node = all_nodes[0] if adjacency_lists is None: adj = lambda v: graph.neighbors(v) else: adj = lambda v: adjacency_lists[v] # --Initialize the stack, simulating the DFS call on the root node stack.appendleft(root_node) parent_lookup[root_node] = root_node # We're using a non-recursive implementation of DFS, since Python isn't great for deep recursion while True: # Main DFS Loop while len(stack) > 0: u = stack.popleft() if not discovered[u]: discovered[u] = True if u in unvisited_nodes: unvisited_nodes.remove(u) ordering.append(u) neighbors = adj(u) # When adding the new nodes to the stack, we want to add them in reverse order so that # the order the nodes are visited is the same as with a recursive DFS implementation for n in neighbors[::-1]: if discovered[n]: # If the node already exists in the discovered nodes list # we don't want to re-add it to the stack continue stack.appendleft(n) parent_lookup[n] = u children_lookup[u].append(n) # While there are still nodes that need visiting, repopulate the stack if len(unvisited_nodes) > 0: u = unvisited_nodes.pop() stack.appendleft(u) else: break return ordering, parent_lookup, children_lookup
Performs a depth-first search with visiting order of nodes determined by provided adjacency lists, and also returns a parent lookup dict and a children lookup dict.
def breadth_first_search(graph, root_node=None): ordering = [] all_nodes = graph.get_all_node_ids() if not all_nodes: return ordering queue = deque() discovered = defaultdict(lambda: False) to_visit = set(all_nodes) if root_node is None: root_node = all_nodes[0] discovered[root_node] = True queue.appendleft(root_node) # We need to make sure we visit all the nodes, including disconnected ones while True: # BFS Main Loop while len(queue) > 0: current_node = queue.pop() ordering.append(current_node) to_visit.remove(current_node) for n in graph.neighbors(current_node): if not discovered[n]: discovered[n] = True queue.appendleft(n) # New root node if we still have more nodes if len(to_visit) > 0: node = to_visit.pop() to_visit.add(node) # --We need this here because we remove the node as part of the BFS algorithm discovered[node] = True queue.appendleft(node) else: break return ordering
Searches through the tree in a breadth-first fashion. If root_node is None, an arbitrary node will be used as the root. If root_node is not None, it will be used as the root for the search tree. Returns a list of nodes, in the order that they were reached.
def graph_to_dot(graph, node_renderer=None, edge_renderer=None): node_pairs = list(graph.nodes.items()) edge_pairs = list(graph.edges.items()) if node_renderer is None: node_renderer_wrapper = lambda nid: '' else: node_renderer_wrapper = lambda nid: ' [%s]' % ','.join( ['%s=%s' % tpl for tpl in list(node_renderer(graph, nid).items())]) # Start the graph graph_string = 'digraph G {\n' graph_string += 'overlap=scale;\n' # Print the nodes (placeholder) for node_id, node in node_pairs: graph_string += '%i%s;\n' % (node_id, node_renderer_wrapper(node_id)) # Print the edges for edge_id, edge in edge_pairs: node_a = edge['vertices'][0] node_b = edge['vertices'][1] graph_string += '%i -> %i;\n' % (node_a, node_b) # Finish the graph graph_string += '}' return graph_string
Produces a DOT specification string from the provided graph.
def get_connected_components(graph): list_of_components = [] component = [] # Not strictly necessary due to the while loop structure, but it helps the automated analysis tools # Store a list of all unreached vertices unreached = set(graph.get_all_node_ids()) to_explore = deque() while len(unreached) > 0: # This happens when we reach the end of a connected component and still have more vertices to search through if len(to_explore) == 0: n = unreached.pop() unreached.add(n) to_explore.append(n) component = [] list_of_components.append(component) # This is the BFS that searches for connected vertices while len(to_explore) > 0: n = to_explore.pop() if n in unreached: component.append(n) unreached.remove(n) nodes = graph.neighbors(n) for n in nodes: if n in unreached: to_explore.append(n) return list_of_components
Finds all connected components of the graph. Returns a list of lists, each containing the nodes that form a connected component. Returns an empty list for an empty graph.
def get_connected_components_as_subgraphs(graph): components = get_connected_components(graph) list_of_graphs = [] for c in components: edge_ids = set() nodes = [graph.get_node(node) for node in c] for n in nodes: # --Loop through the edges in each node, to determine if it should be included for e in n['edges']: # --Only add the edge to the subgraph if both ends are in the subgraph edge = graph.get_edge(e) a, b = edge['vertices'] if a in c and b in c: edge_ids.add(e) # --Build the subgraph and add it to the list list_of_edges = list(edge_ids) subgraph = make_subgraph(graph, c, list_of_edges) list_of_graphs.append(subgraph) return list_of_graphs
Finds all connected components of the graph. Returns a list of graph objects, each representing a connected component. Returns an empty list for an empty graph.
def new_edge(self, node_a, node_b, cost=1): edge_id = super(UndirectedGraph, self).new_edge(node_a, node_b, cost) self.nodes[node_b]['edges'].append(edge_id) return edge_id
Adds a new, undirected edge between node_a and node_b with a cost. Returns the edge id of the new edge.
def neighbors(self, node_id): node = self.get_node(node_id) flattened_nodes_list = [] for a, b in [self.get_edge(edge_id)['vertices'] for edge_id in node['edges']]: flattened_nodes_list.append(a) flattened_nodes_list.append(b) node_set = set(flattened_nodes_list) if node_id in node_set: node_set.remove(node_id) return [nid for nid in node_set]
Find all the nodes where there is an edge from the specified node to that node. Returns a list of node ids.
def delete_edge_by_id(self, edge_id): edge = self.get_edge(edge_id) # Remove the edge from the "from node" # --Determine the from node from_node_id = edge['vertices'][0] from_node = self.get_node(from_node_id) # --Remove the edge from it from_node['edges'].remove(edge_id) # Remove the edge from the "to node" to_node_id = edge['vertices'][1] to_node = self.get_node(to_node_id) # --Remove the edge from it to_node['edges'].remove(edge_id) # Remove the edge from the edge list del self.edges[edge_id] self._num_edges -= 1
Removes the edge identified by "edge_id" from the graph.
def move_edge_target(self, edge_id, node_a): # Grab the edge edge = self.get_edge(edge_id) # Remove the edge from the original "target node" original_target_node_id = edge['vertices'][1] original_target_node = self.get_node(original_target_node_id) original_target_node['edges'].remove(edge_id) # Add the edge to the new target node new_target_node_id = node_a new_target_node = self.get_node(new_target_node_id) new_target_node['edges'].append(edge_id) # Alter the vertices on the edge edge['vertices'] = (edge['vertices'][0], node_a)
Moves an edge so that it targets node_a.
def find_minimum_spanning_tree(graph): mst = [] if graph.num_nodes() == 0: return mst if graph.num_edges() == 0: return mst connected_components = get_connected_components(graph) if len(connected_components) > 1: raise DisconnectedGraphError edge_list = kruskal_mst(graph) return edge_list
Calculates a minimum spanning tree for a graph. Returns a list of edges that define the tree. Returns an empty list for an empty graph.
def find_minimum_spanning_tree_as_subgraph(graph): edge_list = find_minimum_spanning_tree(graph) subgraph = get_subgraph_from_edge_list(graph, edge_list) return subgraph
Calculates a minimum spanning tree and returns a graph representation.
def find_minimum_spanning_forest(graph): msf = [] if graph.num_nodes() == 0: return msf if graph.num_edges() == 0: return msf connected_components = get_connected_components_as_subgraphs(graph) for subgraph in connected_components: edge_list = kruskal_mst(subgraph) msf.append(edge_list) return msf
Calculates the minimum spanning forest of a disconnected graph. Returns a list of lists, each containing the edges that define that tree. Returns an empty list for an empty graph.
def find_minimum_spanning_forest_as_subgraphs(graph): forest = find_minimum_spanning_forest(graph) list_of_subgraphs = [get_subgraph_from_edge_list(graph, edge_list) for edge_list in forest] return list_of_subgraphs
Calculates the minimum spanning forest and returns a list of trees as subgraphs.
def kruskal_mst(graph): edges_accepted = 0 ds = DisjointSet() pq = PriorityQueue() accepted_edges = [] label_lookup = {} nodes = graph.get_all_node_ids() num_vertices = len(nodes) for n in nodes: label = ds.add_set() label_lookup[n] = label edges = graph.get_all_edge_objects() for e in edges: pq.put(e['id'], e['cost']) while edges_accepted < (num_vertices - 1): edge_id = pq.get() edge = graph.get_edge(edge_id) node_a, node_b = edge['vertices'] label_a = label_lookup[node_a] label_b = label_lookup[node_b] a_set = ds.find(label_a) b_set = ds.find(label_b) if a_set != b_set: edges_accepted += 1 accepted_edges.append(edge_id) ds.union(a_set, b_set) return accepted_edges
Implements Kruskal's Algorithm for finding minimum spanning trees. Assumes a non-empty, connected graph.
def __get_cycle(graph, ordering, parent_lookup): root_node = ordering[0] for i in range(2, len(ordering)): current_node = ordering[i] if graph.adjacent(current_node, root_node): path = [] while current_node != root_node: path.append(current_node) current_node = parent_lookup[current_node] path.append(root_node) path.reverse() return path
Gets the main cycle of the dfs tree.
def __get_segments_from_node(node, graph): list_of_segments = [] node_object = graph.get_node(node) for e in node_object['edges']: list_of_segments.append(e) return list_of_segments
Calculates the segments that can emanate from a particular node on the main cycle.
def __get_segments_from_cycle(graph, cycle_path): list_of_segments = [] # We work through the cycle in a bottom-up fashion for n in cycle_path[::-1]: segments = __get_segments_from_node(n, graph) if segments: list_of_segments.append(segments) return list_of_segments
Calculates the segments that emanate from the main cycle.
def make_subgraph(graph, vertices, edges): # Copy the entire graph local_graph = copy.deepcopy(graph) # Remove all the edges that aren't in the list edges_to_delete = [x for x in local_graph.get_all_edge_ids() if x not in edges] for e in edges_to_delete: local_graph.delete_edge_by_id(e) # Remove all the vertices that aren't in the list nodes_to_delete = [x for x in local_graph.get_all_node_ids() if x not in vertices] for n in nodes_to_delete: local_graph.delete_node(n) return local_graph
Converts a subgraph given by a list of vertices and edges into a graph object.
def convert_graph_directed_to_undirected(dg): udg = UndirectedGraph() # Copy the graph # --Copy nodes # --Copy edges udg.nodes = copy.deepcopy(dg.nodes) udg.edges = copy.deepcopy(dg.edges) udg.next_node_id = dg.next_node_id udg.next_edge_id = dg.next_edge_id # Convert the directed edges into undirected edges for edge_id in udg.get_all_edge_ids(): edge = udg.get_edge(edge_id) target_node_id = edge['vertices'][1] target_node = udg.get_node(target_node_id) target_node['edges'].append(edge_id) return udg
Converts a directed graph into an undirected graph. Directed edges are made undirected.
def remove_duplicate_edges_directed(dg): # With directed edges, we can just hash the to and from node id tuples and if # a node happens to conflict with one that already exists, we delete it # --For aesthetic, we sort the edge ids so that lower edge ids are kept lookup = {} edges = sorted(dg.get_all_edge_ids()) for edge_id in edges: e = dg.get_edge(edge_id) tpl = e['vertices'] if tpl in lookup: dg.delete_edge_by_id(edge_id) else: lookup[tpl] = edge_id
Removes duplicate edges from a directed graph.
def remove_duplicate_edges_undirected(udg): # With undirected edges, we need to hash both combinations of the to-from node ids, since a-b and b-a are equivalent # --For aesthetic, we sort the edge ids so that lower edges ids are kept lookup = {} edges = sorted(udg.get_all_edge_ids()) for edge_id in edges: e = udg.get_edge(edge_id) tpl_a = e['vertices'] tpl_b = (tpl_a[1], tpl_a[0]) if tpl_a in lookup or tpl_b in lookup: udg.delete_edge_by_id(edge_id) else: lookup[tpl_a] = edge_id lookup[tpl_b] = edge_id
Removes duplicate edges from an undirected graph.
def get_vertices_from_edge_list(graph, edge_list): node_set = set() for edge_id in edge_list: edge = graph.get_edge(edge_id) a, b = edge['vertices'] node_set.add(a) node_set.add(b) return list(node_set)
Transforms a list of edges into a list of the nodes those edges connect. Returns a list of nodes, or an empty list if given an empty list.
def get_subgraph_from_edge_list(graph, edge_list): node_list = get_vertices_from_edge_list(graph, edge_list) subgraph = make_subgraph(graph, node_list, edge_list) return subgraph
Transforms a list of edges into a subgraph.
def merge_graphs(main_graph, addition_graph): node_mapping = {} edge_mapping = {} for node in addition_graph.get_all_node_objects(): node_id = node['id'] new_id = main_graph.new_node() node_mapping[node_id] = new_id for edge in addition_graph.get_all_edge_objects(): edge_id = edge['id'] old_vertex_a_id, old_vertex_b_id = edge['vertices'] new_vertex_a_id = node_mapping[old_vertex_a_id] new_vertex_b_id = node_mapping[old_vertex_b_id] new_edge_id = main_graph.new_edge(new_vertex_a_id, new_vertex_b_id) edge_mapping[edge_id] = new_edge_id return node_mapping, edge_mapping
Merges an ''addition_graph'' into the ''main_graph''. Returns a tuple of dictionaries, mapping old node ids and edge ids to new ids.