code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def getAddr (self, ifname): if sys.platform == 'darwin': return ifconfig_inet(ifname).get('address') return self._getaddr(ifname, self.SIOCGIFADDR)
Get the inet addr for an interface. @param ifname: interface name @type ifname: string
def additive_coupling(name, x, mid_channels=512, reverse=False, activation="relu", dropout=0.0): with tf.variable_scope(name, reuse=tf.AUTO_REUSE): output_channels = common_layers.shape_list(x)[-1] // 2 x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1) z1 = x1 shift = conv_stack("nn", x1, mid_channels, output_channels=output_channels, activation=activation, dropout=dropout) if not reverse: z2 = x2 + shift else: z2 = x2 - shift return tf.concat([z1, z2], axis=3), 0.0
Reversible additive coupling layer. Args: name: variable scope. x: 4-D Tensor, shape=(NHWC). mid_channels: number of channels in the coupling layer. reverse: Forward or reverse operation. activation: "relu" or "gatu" dropout: default, 0.0 Returns: output: 4-D Tensor, shape=(NHWC) objective: 0.0
def peak_templates(self): peak_templates = [] for peak_descr in self: expanded_dims = [dim_group.dimensions for dim_group in peak_descr] templates = product(*expanded_dims) for template in templates: peak_templates.append(PeakTemplate(template)) return peak_templates
Create a list of concrete peak templates from a list of general peak descriptions. :return: List of peak templates. :rtype: :py:class:`list`
def _clean(self): found_ids = {} nodes = [self._nodes[_node.Root.ID]] while nodes: node = nodes.pop() found_ids[node.id] = None nodes = nodes + node.children for node_id in self._nodes: if node_id in found_ids: continue logger.error('Dangling node: %s', node_id) for node_id in found_ids: if node_id in self._nodes: continue logger.error('Unregistered node: %s', node_id)
Recursively check that all nodes are reachable.
def _cell_scalar(self, name=None): if name is None: field, name = self.active_scalar_info if field != CELL_DATA_FIELD: raise RuntimeError('Must specify an array to fetch.') vtkarr = self.GetCellData().GetArray(name) if vtkarr is None: raise AssertionError('({}) is not a cell scalar'.format(name)) if isinstance(vtkarr, vtk.vtkBitArray): vtkarr = vtk_bit_array_to_char(vtkarr) if name not in self._cell_bool_array_names: self._cell_bool_array_names.append(name) array = vtk_to_numpy(vtkarr) if array.dtype == np.uint8 and name in self._cell_bool_array_names: array = array.view(np.bool) return array
Returns the cell scalars of a vtk object Parameters ---------- name : str Name of cell scalars to retrive. Returns ------- scalars : np.ndarray Numpy array of scalars
def set_boot_arch(arch='default'): if arch not in ['i386', 'x86_64', 'default']: msg = 'Invalid value passed for arch.\n' \ 'Must be i386, x86_64, or default.\n' \ 'Passed: {0}'.format(arch) raise SaltInvocationError(msg) cmd = 'systemsetup -setkernelbootarchitecture {0}'.format(arch) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( arch, get_boot_arch, )
Set the kernel to boot in 32 or 64 bit mode on next boot. .. note:: When this function fails with the error ``changes to kernel architecture failed to save!``, then the boot arch is not updated. This is either an Apple bug, not available on the test system, or a result of system files being locked down in macOS (SIP Protection). :param str arch: A string representing the desired architecture. If no value is passed, default is assumed. Valid values include: - i386 - x86_64 - default :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash salt '*' system.set_boot_arch i386
def inv_matrix(self) -> np.ndarray: if self._inv_matrix is None: self._inv_matrix = inv(self._matrix) self._inv_matrix.setflags(write=False) return self._inv_matrix
Inverse of lattice matrix.
def _getImageSize(filename): result = None file = open(filename, 'rb') if file.read(8) == b'\x89PNG\r\n\x1a\n': while 1: length, = _struct.unpack('>i', file.read(4)) chunkID = file.read(4) if chunkID == '': break if chunkID == b'IHDR': result = _struct.unpack('>ii', file.read(8)) break file.seek(4 + length, 1) file.close() return result file.seek(0) if file.read(8) == b'BM': file.seek(18, 0) result = _struct.unpack('<ii', file.read(8)) file.close() return result
Try to get the width and height of a bmp of png image file
def setup_file_logger(filename, formatting, log_level): logger = logging.getLogger() if logger.handlers: logger.removeHandler(logger.handlers[0]) handler = logging.FileHandler(filename) logger.addHandler(handler) formatter = logging.Formatter(*formatting) handler.setFormatter(formatter) logger.setLevel(log_level) handler.setLevel(log_level) return logger
A helper function for creating a file logger. Accepts arguments, as it is used in Status and LoggingWriter.
def djrepo_path(self): root, ext = os.path.splitext(self.filepath) path = root + ".djrepo" return path
The path of the djrepo file. None if file does not exist.
def _setup(self): default_settings.reload() environment_variable = self._kwargs.get( "ENVVAR_FOR_DYNACONF", default_settings.ENVVAR_FOR_DYNACONF ) settings_module = os.environ.get(environment_variable) self._wrapped = Settings( settings_module=settings_module, **self._kwargs ) self.logger.debug("Lazy Settings _setup ...")
Initial setup, run once.
def _compile(pattern, flags): return re.compile(WcParse(pattern, flags & FLAG_MASK).parse())
Compile the pattern to regex.
def badnick(self, me=None, nick=None, **kw): if me == '*': self.bot.set_nick(self.bot.nick + '_') self.bot.log.debug('Trying to regain nickname in 30s...') self.nick_handle = self.bot.loop.call_later( 30, self.bot.set_nick, self.bot.original_nick)
Use alt nick on nick error
def get_assessment_section_mdata(): return { 'assessment_taken': { 'element_label': { 'text': 'assessment taken', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'instructions': { 'text': 'accepts an osid.id.Id object', 'languageTypeId': str(DEFAULT_LANGUAGE_TYPE), 'scriptTypeId': str(DEFAULT_SCRIPT_TYPE), 'formatTypeId': str(DEFAULT_FORMAT_TYPE), }, 'required': False, 'read_only': False, 'linked': False, 'array': False, 'default_id_values': [''], 'syntax': 'ID', 'id_set': [], }, }
Return default mdata map for AssessmentSection
def _process_unresolved_indirect_jumps(self): l.info("%d indirect jumps to resolve.", len(self._indirect_jumps_to_resolve)) all_targets = set() for idx, jump in enumerate(self._indirect_jumps_to_resolve): if self._low_priority: self._release_gil(idx, 20, 0.0001) all_targets |= self._process_one_indirect_jump(jump) self._indirect_jumps_to_resolve.clear() return all_targets
Resolve all unresolved indirect jumps found in previous scanning. Currently we support resolving the following types of indirect jumps: - Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block - Ijk_Boring: jump tables - For an up-to-date list, see analyses/cfg/indirect_jump_resolvers :return: A set of concrete indirect jump targets (ints). :rtype: set
def list_previous_page(self): uri = self._paging.get("domain", {}).get("prev_uri") if uri is None: raise exc.NoMoreResults("There are no previous pages of domains " "to list.") return self._list(uri)
When paging through results, this will return the previous page, using the same limit. If there are no more results, a NoMoreResults exception will be raised.
def flatten_all_paths(self, group_filter=lambda x: True, path_filter=lambda x: True, path_conversions=CONVERSIONS): return flatten_all_paths(self.tree.getroot(), group_filter, path_filter, path_conversions)
Forward the tree of this document into the more general flatten_all_paths function and return the result.
def __get_package_manager(self): package_manager = "" args = "" sudo_required = True if system.is_osx(): package_manager = "brew" sudo_required = False args = " install" elif system.is_debian(): package_manager = "apt-get" args = " -y install" elif system.is_fedora(): package_manager = "yum" args = " install" elif system.is_arch(): package_manager = "pacman" args = " --noconfirm -S" if lib.which(package_manager) is None: self.logger.warn("Package manager %s not installed! Packages will not be installed." % package_manager) self.package_manager = None self.package_manager = package_manager self.sudo_required = sudo_required self.args = args
Installs and verifies package manager
def get_column_for_modelfield(model_field): while model_field.related_model: model_field = model_field.related_model._meta.pk for ColumnClass, modelfield_classes in COLUMN_CLASSES: if isinstance(model_field, tuple(modelfield_classes)): return ColumnClass
Return the built-in Column class for a model field class.
def get_hash_as_int(*args, group: cmod.PairingGroup = None): group = group if group else cmod.PairingGroup(PAIRING_GROUP) h_challenge = sha256() serialedArgs = [group.serialize(arg) if isGroupElement(arg) else cmod.Conversion.IP2OS(arg) for arg in args] for arg in sorted(serialedArgs): h_challenge.update(arg) return bytes_to_int(h_challenge.digest())
Enumerate over the input tuple and generate a hash using the tuple values :param args: sequence of either group or integer elements :param group: pairing group if an element is a group element :return:
def _set(self, **kwargs): for param, value in kwargs.items(): p = getattr(self, param) if value is not None: try: value = p.typeConverter(value) except TypeError as e: raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e)) self._paramMap[p] = value return self
Sets user-supplied params.
def find_best_candidate(self): self.fill_percent_done() i_b = np.argmax(self.percent_done.ravel()) if self.percent_done.ravel()[i_b] <= 0: return None I = self.percent_done.ravel() == self.percent_done.ravel()[i_b] if I.sum() == 1: return i_b else: I2 = np.argmax(self.max_elev.ravel()[I]) return I.nonzero()[0][I2]
Determine which tile, when processed, would complete the largest percentage of unresolved edge pixels. This is a heuristic function and does not give the optimal tile.
def get(self): opts = current_app.config['RECORDS_REST_SORT_OPTIONS'].get( self.search_index) sort_fields = [] if opts: for key, item in sorted(opts.items(), key=lambda x: x[1]['order']): sort_fields.append( {key: dict( title=item['title'], default_order=item.get('default_order', 'asc'))} ) return jsonify(dict( sort_fields=sort_fields, max_result_window=self.max_result_window, default_media_type=self.default_media_type, search_media_types=sorted(self.search_media_types), item_media_types=sorted(self.item_media_types), ))
Get options.
def _ExtractGMailSearchQuery(self, url): if 'search/' not in url: return None _, _, line = url.partition('search/') line, _, _ = line.partition('/') line, _, _ = line.partition('?') return line.replace('+', ' ')
Extracts a search query from a GMail search URL. GMail: https://mail.google.com/mail/u/0/#search/query[/?] Args: url (str): URL. Returns: str: search query or None if no query was found.
def Range(start, limit, delta): return np.arange(start, limit, delta, dtype=np.int32),
Range op.
def _generate_key_map(entity_list, key, entity_class): key_map = {} for obj in entity_list: key_map[obj[key]] = entity_class(**obj) return key_map
Helper method to generate map from key to entity object for given list of dicts. Args: entity_list: List consisting of dict. key: Key in each dict which will be key in the map. entity_class: Class representing the entity. Returns: Map mapping key to entity object.
def escape_latex_str_if_str(value): if not isinstance(value, str): return value for regex, replace_text in REGEX_ESCAPE_CHARS: value = re.sub(regex, replace_text, value) value = re.sub(REGEX_BACKSLASH, r'\\\\', value) return value
Escape a latex string
def lookup_host(self, name): res = self.lookup_by_host(name=name) try: return dict(ip=res["ip-address"], mac=res["hardware-address"], hostname=res["name"].decode('utf-8')) except KeyError: raise OmapiErrorAttributeNotFound()
Look for a host object with given name and return the name, mac, and ip address @type name: str @rtype: dict or None @raises ValueError: @raises OmapiError: @raises OmapiErrorNotFound: if no host object with the given name could be found @raises OmapiErrorAttributeNotFound: if lease could be found, but objects lacks ip, mac or name @raises socket.error:
def clear(self): node = self._first while node is not None: next_node = node._next node._list = node._prev = node._next = None node = next_node self._size = 0
Remove all nodes from the list.
def populate_user(self, request, sociallogin, data): username = data.get('username') first_name = data.get('first_name') last_name = data.get('last_name') email = data.get('email') name = data.get('name') user = sociallogin.user user_username(user, username or '') user_email(user, valid_email_or_none(email) or '') name_parts = (name or '').partition(' ') user_field(user, 'first_name', first_name or name_parts[0]) user_field(user, 'last_name', last_name or name_parts[2]) return user
Hook that can be used to further populate the user instance. For convenience, we populate several common fields. Note that the user instance being populated represents a suggested User instance that represents the social user that is in the process of being logged in. The User instance need not be completely valid and conflict free. For example, verifying whether or not the username already exists, is not a responsibility.
def key_press_event(self, event): if event.key() == QtCore.Qt.Key_Return: cursor = self.edit.textCursor() cursor.movePosition(cursor.EndOfBlock) self.edit.setTextCursor(cursor) code = _qkey_to_ascii(event) if code: self.process.writeData(code) return False return True
Directly writes the ascii code of the key to the process' stdin. :retuns: False to prevent the event from being propagated to the parent widget.
def delete(self, block, name): self._kvs.delete(self._key(block, name))
Reset the value of the field named `name` to the default
def open(self): self.h_info = SetupDiGetClassDevs(byref(self.guid), None, None, (DIGCF.PRESENT | DIGCF.DEVICEINTERFACE) ) return self.h_info
Calls SetupDiGetClassDevs to obtain a handle to an opaque device information set that describes the device interfaces supported by all the USB collections currently installed in the system. The application should specify DIGCF.PRESENT and DIGCF.INTERFACEDEVICE in the Flags parameter passed to SetupDiGetClassDevs.
def string_to_integer(value, strict=False): if is_undefined(value): if strict: raise ValueError('The value cannot be null') return None try: return int(value) except ValueError: raise ValueError('The specified string "%s" does not represent an integer' % value)
Return an integer corresponding to the string representation of a number. @param value: a string representation of an integer number. @param strict: indicate whether the specified string MUST be of a valid integer number representation. @return: the integer value represented by the string. @raise ValueError: if the string doesn't represent a valid integer, while the argument ``strict`` equals ``True``.
def df(self, version=None, tags=None, ext=None, **kwargs): ext = self._find_extension(version=version, tags=tags) if ext is None: attribs = "{}{}".format( "version={} and ".format(version) if version else "", "tags={}".format(tags) if tags else "", ) raise MissingDatasetError( "No dataset with {} in local store!".format(attribs)) fpath = self.fpath(version=version, tags=tags, ext=ext) fmt = SerializationFormat.by_name(ext) return fmt.deserialize(fpath, **kwargs)
Loads an instance of this dataset into a dataframe. Parameters ---------- version: str, optional The version of the instance of this dataset. tags : list of str, optional The tags associated with the desired instance of this dataset. ext : str, optional The file extension to use. If not given, the default extension is used. **kwargs : extra keyword arguments, optional Extra keyword arguments are forwarded to the deserialization method of the SerializationFormat object corresponding to the extension used. Returns ------- pandas.DataFrame A dataframe containing the desired instance of this dataset.
def connect(port=8813, numRetries=10, host="localhost", proc=None): for wait in range(1, numRetries + 2): try: return Connection(host, port, proc) except socket.error as e: print("Could not connect to TraCI server at %s:%s" % (host, port), e) if wait < numRetries + 1: print(" Retrying in %s seconds" % wait) time.sleep(wait) raise FatalTraCIError(str(e))
Establish a connection to a TraCI-Server and return the connection object. The connection is not saved in the pool and not accessible via traci.switch. It should be safe to use different connections established by this method in different threads.
async def ltrim(self, name, start, end): return await self.execute_command('LTRIM', name, start, end)
Trim the list ``name``, removing all values not within the slice between ``start`` and ``end`` ``start`` and ``end`` can be negative numbers just like Python slicing notation
def generate(env): java_file = SCons.Tool.CreateJavaFileBuilder(env) java_class = SCons.Tool.CreateJavaClassFileBuilder(env) java_class_dir = SCons.Tool.CreateJavaClassDirBuilder(env) java_class.add_emitter(None, emit_java_classes) java_class.add_emitter(env.subst('$JAVASUFFIX'), emit_java_classes) java_class_dir.emitter = emit_java_classes env.AddMethod(Java) env['JAVAC'] = 'javac' env['JAVACFLAGS'] = SCons.Util.CLVar('') env['JAVABOOTCLASSPATH'] = [] env['JAVACLASSPATH'] = [] env['JAVASOURCEPATH'] = [] env['_javapathopt'] = pathopt env['_JAVABOOTCLASSPATH'] = '${_javapathopt("-bootclasspath", "JAVABOOTCLASSPATH")} ' env['_JAVACLASSPATH'] = '${_javapathopt("-classpath", "JAVACLASSPATH")} ' env['_JAVASOURCEPATH'] = '${_javapathopt("-sourcepath", "JAVASOURCEPATH", "_JAVASOURCEPATHDEFAULT")} ' env['_JAVASOURCEPATHDEFAULT'] = '${TARGET.attributes.java_sourcedir}' env['_JAVACCOM'] = '$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET.attributes.java_classdir} $_JAVASOURCEPATH $SOURCES' env['JAVACCOM'] = "${TEMPFILE('$_JAVACCOM','$JAVACCOMSTR')}" env['JAVACLASSSUFFIX'] = '.class' env['JAVASUFFIX'] = '.java'
Add Builders and construction variables for javac to an Environment.
def get_post_fields(request): fields = dict() for field,value in request.form.items(): fields[field] = value return fields
parse through a request, and return fields from post in a dictionary
def nan_circmean(samples, high=2.0*np.pi, low=0.0, axis=None): samples = np.asarray(samples) samples = samples[~np.isnan(samples)] if samples.size == 0: return np.nan ang = (samples - low) * 2.0 * np.pi / (high - low) ssum = np.sin(ang).sum(axis=axis) csum = np.cos(ang).sum(axis=axis) res = np.arctan2(ssum, csum) mask = res < 0.0 if mask.ndim > 0: res[mask] += 2.0 * np.pi elif mask: res += 2.0 * np.pi circmean = res * (high - low) / (2.0 * np.pi) + low return circmean
NaN insensitive version of scipy's circular mean routine Parameters ----------- samples : array_like Input array low : float or int Lower boundary for circular standard deviation range (default=0) high: float or int Upper boundary for circular standard deviation range (default=2 pi) axis : int or NoneType Axis along which standard deviations are computed. The default is to compute the standard deviation of the flattened array Returns -------- circmean : float Circular mean
def node_add_label(node_name, label_name, label_value, **kwargs): cfg = _setup_conn(**kwargs) try: api_instance = kubernetes.client.CoreV1Api() body = { 'metadata': { 'labels': { label_name: label_value} } } api_response = api_instance.patch_node(node_name, body) return api_response except (ApiException, HTTPError) as exc: if isinstance(exc, ApiException) and exc.status == 404: return None else: log.exception('Exception when calling CoreV1Api->patch_node') raise CommandExecutionError(exc) finally: _cleanup(**cfg) return None
Set the value of the label identified by `label_name` to `label_value` on the node identified by the name `node_name`. Creates the lable if not present. CLI Examples:: salt '*' kubernetes.node_add_label node_name="minikube" \ label_name="foo" label_value="bar"
def _set_default_resource_names(self): self.ip_config_name = ''.join([ self.running_instance_id, '-ip-config' ]) self.nic_name = ''.join([self.running_instance_id, '-nic']) self.public_ip_name = ''.join([self.running_instance_id, '-public-ip'])
Generate names for resources based on the running_instance_id.
def _set_digraph_b(self, char): self.has_digraph_b = True self.active_vowel_ro = di_b_lt[char][0] self.active_dgr_b_info = di_b_lt[char]
Sets the second part of a digraph.
def build_connection(url): username = os.environ.get('ELASTICSEARCH_USERNAME') password = os.environ.get('ELASTICSEARCH_PASSWORD') if username and password: return Elasticsearch(url, http_auth=(username, password)) return Elasticsearch(url)
Build an Elasticsearch connection with the given url Elastic.co's Heroku addon doesn't create credientials with access to the cluster by default so they aren't exposed in the URL they provide either. This function works around the situation by grabbing our credentials from the environment via Django settings and building a connection with them.
def _head_length(self, port): if not port: return 0. parent_state_v = self.get_parent_state_v() if parent_state_v is port.parent: return port.port_size[1] return max(port.port_size[1] * 1.5, self._calc_line_width() / 1.3)
Distance from the center of the port to the perpendicular waypoint
def _evalAndDer(self,x): m = len(x) fx = np.zeros((m,self.funcCount)) for j in range(self.funcCount): fx[:,j] = self.functions[j](x) fx[np.isnan(fx)] = np.inf i = np.argmin(fx,axis=1) y = fx[np.arange(m),i] dydx = np.zeros_like(y) for j in range(self.funcCount): c = i == j dydx[c] = self.functions[j].derivative(x[c]) return y,dydx
Returns the level and first derivative of the function at each value in x. Only called internally by HARKinterpolator1D.eval_and_der.
def _validate_timeout(cls, value, name): if value is _Default: return cls.DEFAULT_TIMEOUT if value is None or value is cls.DEFAULT_TIMEOUT: return value if isinstance(value, bool): raise ValueError("Timeout cannot be a boolean value. It must " "be an int, float or None.") try: float(value) except (TypeError, ValueError): raise ValueError("Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)) try: if value <= 0: raise ValueError("Attempted to set %s timeout to %s, but the " "timeout cannot be set to a value less " "than or equal to 0." % (name, value)) except TypeError: raise ValueError("Timeout value %s was %s, but it must be an " "int, float or None." % (name, value)) return value
Check that a timeout attribute is valid. :param value: The timeout value to validate :param name: The name of the timeout attribute to validate. This is used to specify in error messages. :return: The validated and casted version of the given value. :raises ValueError: If it is a numeric value less than or equal to zero, or the type is not an integer, float, or None.
def unit(self, unit): allowed_values = ["cm", "inch", "foot"] if unit is not None and unit not in allowed_values: raise ValueError( "Invalid value for `unit` ({0}), must be one of {1}" .format(unit, allowed_values) ) self._unit = unit
Sets the unit of this Dimensions. :param unit: The unit of this Dimensions. :type: str
def _call(self, method, *args, **kwargs): assert self.session if not kwargs.get('verify'): kwargs['verify'] = self.SSL_VERIFY response = self.session.request(method, *args, **kwargs) response_json = response.text and response.json() or {} if response.status_code < 200 or response.status_code >= 300: message = response_json.get('error', response_json.get('message')) raise HelpScoutRemoteException(response.status_code, message) self.page_current = response_json.get(self.PAGE_CURRENT, 1) self.page_total = response_json.get(self.PAGE_TOTAL, 1) try: return response_json[self.PAGE_DATA_MULTI] except KeyError: pass try: return [response_json[self.PAGE_DATA_SINGLE]] except KeyError: pass return None
Call the remote service and return the response data.
def setup_logging(args): handler = logging.StreamHandler() handler.setLevel(args.log_level) formatter = logging.Formatter(('%(asctime)s - ' '%(name)s - ' '%(levelname)s - ' '%(message)s')) handler.setFormatter(formatter) LOGGER.addHandler(handler)
This sets up the logging. Needs the args to get the log level supplied :param args: The command line arguments
def parser(input_file_path='config.json'): try: with open(input_file_path, 'r') as config_file: config_new = json.load(config_file) config_file.close() except: raise Exception('Config file "'+input_file_path+'" not loaded properly. Please check it an try again.') import copy options = update_config(config_new, copy.deepcopy(default_config)) return options
Parser for the .json file containing the configuration of the method.
def get_config_files(): apps_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), APPS_DIR) custom_apps_dir = os.path.join(os.environ['HOME'], CUSTOM_APPS_DIR) config_files = set() custom_files = set() if os.path.isdir(custom_apps_dir): for filename in os.listdir(custom_apps_dir): if filename.endswith('.cfg'): config_files.add(os.path.join(custom_apps_dir, filename)) custom_files.add(filename) for filename in os.listdir(apps_dir): if filename.endswith('.cfg') and filename not in custom_files: config_files.add(os.path.join(apps_dir, filename)) return config_files
Return the application configuration files. Return a list of configuration files describing the apps supported by Mackup. The files return are absolute full path to those files. e.g. /usr/lib/mackup/applications/bash.cfg Only one config file per application should be returned, custom config having a priority over stock config. Returns: set of strings.
def usage(path): out = __salt__['cmd.run_all']("btrfs filesystem usage {0}".format(path)) salt.utils.fsutils._verify_run(out) ret = {} for section in out['stdout'].split("\n\n"): if section.startswith("Overall:\n"): ret['overall'] = _usage_overall(section) elif section.startswith("Unallocated:\n"): ret['unallocated'] = _usage_unallocated(section) else: ret.update(_usage_specific(section)) return ret
Show in which disk the chunks are allocated. CLI Example: .. code-block:: bash salt '*' btrfs.usage /your/mountpoint
def multiple_domains(self): domains = [] for cookie in iter(self): if cookie.domain is not None and cookie.domain in domains: return True domains.append(cookie.domain) return False
Returns True if there are multiple domains in the jar. Returns False otherwise. :rtype: bool
def _renderBlockDevice(self, block_device, build): rendered_block_device = yield build.render(block_device) if rendered_block_device['volume_size'] is None: source_type = rendered_block_device['source_type'] source_uuid = rendered_block_device['uuid'] volume_size = self._determineVolumeSize(source_type, source_uuid) rendered_block_device['volume_size'] = volume_size return rendered_block_device
Render all of the block device's values.
def get_isolated_cpus(): path = sysfs_path('devices/system/cpu/isolated') isolated = read_first_line(path) if isolated: return parse_cpu_list(isolated) cmdline = read_first_line(proc_path('cmdline')) if cmdline: match = re.search(r'\bisolcpus=([^ ]+)', cmdline) if match: isolated = match.group(1) return parse_cpu_list(isolated) return None
Get the list of isolated CPUs. Return a sorted list of CPU identifiers, or return None if no CPU is isolated.
def subscribe(self, topic, callback, qos): if topic in self.topics: return def _message_callback(mqttc, userdata, msg): callback(msg.topic, msg.payload.decode('utf-8'), msg.qos) self._mqttc.subscribe(topic, qos) self._mqttc.message_callback_add(topic, _message_callback) self.topics[topic] = callback
Subscribe to an MQTT topic.
def make_flatten(decl_or_decls): def proceed_single(decl): answer = [decl] if not isinstance(decl, scopedef_t): return answer for elem in decl.declarations: if isinstance(elem, scopedef_t): answer.extend(proceed_single(elem)) else: answer.append(elem) return answer decls = [] if isinstance(decl_or_decls, list): decls.extend(decl_or_decls) else: decls.append(decl_or_decls) answer = [] for decl in decls: answer.extend(proceed_single(decl)) return answer
Converts tree representation of declarations to flatten one. :param decl_or_decls: reference to list of declaration's or single declaration :type decl_or_decls: :class:`declaration_t` or [ :class:`declaration_t` ] :rtype: [ all internal declarations ]
def setup_paths(self, environ, coll, record=False): if not coll or not self.warcserver.root_dir: return if coll != '$root': pop_path_info(environ) if record: pop_path_info(environ) paths = [self.warcserver.root_dir] if coll != '$root': paths.append(coll) paths.append(self.templates_dir) environ['pywb.templates_dir'] = '/'.join(paths)
Populates the WSGI environment dictionary with the path information necessary to perform a response for content or record. :param dict environ: The WSGI environment dictionary for the request :param str coll: The name of the collection the record is to be served from :param bool record: Should the content being served by recorded (save to a warc). Only valid in record mode
def get_wrapper_class(backend_name): try: return _WRAPPERS[backend_name] except KeyError: if backend_name == 'ni': from .ctwrapper import NIVisaLibrary _WRAPPERS['ni'] = NIVisaLibrary return NIVisaLibrary try: pkg = __import__('pyvisa-' + backend_name) _WRAPPERS[backend_name] = cls = pkg.WRAPPER_CLASS return cls except ImportError: raise ValueError('Wrapper not found: No package named pyvisa-%s' % backend_name)
Return the WRAPPER_CLASS for a given backend. :rtype: pyvisa.highlevel.VisaLibraryBase
def tokenize_words(self): if not self.is_tagged(SENTENCES): self.tokenize_sentences() tok = self.__word_tokenizer text = self.text dicts = [] for sentence in self[SENTENCES]: sent_start, sent_end = sentence[START], sentence[END] sent_text = text[sent_start:sent_end] spans = tok.span_tokenize(sent_text) for start, end in spans: dicts.append({START: start+sent_start, END: end+sent_start, TEXT: sent_text[start:end]}) self[WORDS] = dicts return self
Apply word tokenization and create ``words`` layer. Automatically creates ``paragraphs`` and ``sentences`` layers.
def checkout(self, revision, options): rev = revision.key self.repo.git.checkout(rev)
Checkout a specific revision. :param revision: The revision identifier. :type revision: :class:`Revision` :param options: Any additional options. :type options: ``dict``
def _sort(self, session_groups): session_groups.sort(key=operator.attrgetter('name')) for col_param, extractor in reversed(list(zip(self._request.col_params, self._extractors))): if col_param.order == api_pb2.ORDER_UNSPECIFIED: continue if col_param.order == api_pb2.ORDER_ASC: session_groups.sort( key=_create_key_func( extractor, none_is_largest=not col_param.missing_values_first)) elif col_param.order == api_pb2.ORDER_DESC: session_groups.sort( key=_create_key_func( extractor, none_is_largest=col_param.missing_values_first), reverse=True) else: raise error.HParamsError('Unknown col_param.order given: %s' % col_param)
Sorts 'session_groups' in place according to _request.col_params.
def get_next(self, skip=1): r if super(Reader, self)._next(skip): entry = super(Reader, self)._get_all() if entry: entry['__REALTIME_TIMESTAMP'] = self._get_realtime() entry['__MONOTONIC_TIMESTAMP'] = self._get_monotonic() entry['__CURSOR'] = self._get_cursor() return self._convert_entry(entry) return dict()
r"""Return the next log entry as a dictionary. Entries will be processed with converters specified during Reader creation. Optional `skip` value will return the `skip`-th log entry. Currently a standard dictionary of fields is returned, but in the future this might be changed to a different mapping type, so the calling code should not make assumptions about a specific type.
def process_request_thread(self, request, client_address): try: self.finish_request(request, client_address) self.shutdown_request(request) except Exception as e: self.logger.error(e) self.handle_error(request, client_address) self.shutdown_request(request)
Process the request.
def current_memory_usage(): import psutil proc = psutil.Process(os.getpid()) meminfo = proc.memory_info() rss = meminfo[0] vms = meminfo[1] return rss
Returns this programs current memory usage in bytes
def match_date(self, value, strict=False): value = stringify(value) try: parse(value) except Exception: self.shout('Value %r is not a valid date', strict, value)
if value is a date
def add_data(self, request, pk=None): resp = super().add_data(request, pk) entity = self.get_object() for collection in entity.collections.all(): collection.data.add(*request.data['ids']) return resp
Add data to Entity and it's collection.
def word_ends(self): if not self.is_tagged(WORDS): self.tokenize_words() return self.ends(WORDS)
The list of end positions representing ``words`` layer elements.
def _login(self, user, password, restrict_login=None): payload = {'login': user, 'password': password} if restrict_login: payload['restrict_login'] = True return self._proxy.User.login(payload)
Backend login method for Bugzilla3
def network_lpf(network, snapshots=None, skip_pre=False): _network_prepare_and_run_pf(network, snapshots, skip_pre, linear=True)
Linear power flow for generic network. Parameters ---------- snapshots : list-like|single snapshot A subset or an elements of network.snapshots on which to run the power flow, defaults to network.snapshots skip_pre: bool, default False Skip the preliminary steps of computing topology, calculating dependent values and finding bus controls. Returns ------- None
def not_modified(cls, errors=None): if cls.expose_status: cls.response.content_type = 'application/json' cls.response._status_line = '304 Not Modified' return cls(304, None, errors).to_json
Shortcut API for HTTP 304 `Not Modified` response. Args: errors (list): Response key/value data. Returns: WSResponse Instance.
def get_indic_syllabic_category_property(value, is_bytes=False): obj = unidata.ascii_indic_syllabic_category if is_bytes else unidata.unicode_indic_syllabic_category if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['indicsyllabiccategory'].get(negated, negated) else: value = unidata.unicode_alias['indicsyllabiccategory'].get(value, value) return obj[value]
Get `INDIC SYLLABIC CATEGORY` property.
def to_meta(self, md5=None, file=None): if not md5: if not file: raise ValueError('Must specify either file or md5') md5 = md5_for_file(file) size = os.stat(file).st_size else: size = None return { 'id': self.id_, 'identity': json.dumps(self.dict), 'name': self.sname, 'fqname': self.fqname, 'md5': md5, 'size': size }
Return a dictionary of metadata, for use in the Remote api.
def set_working_directory(working_directory): logger.debug("starting") logger.debug(f"adding {working_directory} to sys.paths") sys.path.append(working_directory) logger.debug("done")
Add working_directory to sys.paths. This allows dynamic loading of arbitrary python modules in cwd. Args: working_directory: string. path to add to sys.paths
def read(fobj, **kwargs): fsamp, arr = wavfile.read(fobj, **kwargs) return TimeSeries(arr, sample_rate=fsamp)
Read a WAV file into a `TimeSeries` Parameters ---------- fobj : `file`, `str` open file-like object or filename to read from **kwargs all keyword arguments are passed onto :func:`scipy.io.wavfile.read` See also -------- scipy.io.wavfile.read for details on how the WAV file is actually read Examples -------- >>> from gwpy.timeseries import TimeSeries >>> t = TimeSeries.read('test.wav')
def linkify_hostgroups_hosts(self, hosts): for hostgroup in self: members = hostgroup.get_hosts() new_members = [] for member in members: member = member.strip() if not member: continue if member == '*': new_members.extend(list(hosts.items.keys())) else: host = hosts.find_by_name(member) if host is not None: new_members.append(host.uuid) if hostgroup.uuid not in host.hostgroups: host.hostgroups.append(hostgroup.uuid) else: hostgroup.add_unknown_members(member) new_members = list(set(new_members)) hostgroup.replace_members(new_members)
We just search for each hostgroup the id of the hosts and replace the names by the found identifiers :param hosts: object Hosts :type hosts: alignak.objects.host.Hosts :return: None
def encode_int(n): global ENCODED_INT_CACHE try: return ENCODED_INT_CACHE[n] except KeyError: pass if n < MIN_29B_INT or n > MAX_29B_INT: raise OverflowError("Out of range") if n < 0: n += 0x20000000 bytes = '' real_value = None if n > 0x1fffff: real_value = n n >>= 1 bytes += chr(0x80 | ((n >> 21) & 0xff)) if n > 0x3fff: bytes += chr(0x80 | ((n >> 14) & 0xff)) if n > 0x7f: bytes += chr(0x80 | ((n >> 7) & 0xff)) if real_value is not None: n = real_value if n > 0x1fffff: bytes += chr(n & 0xff) else: bytes += chr(n & 0x7f) ENCODED_INT_CACHE[n] = bytes return bytes
Encodes an int as a variable length signed 29-bit integer as defined by the spec. @param n: The integer to be encoded @return: The encoded string @rtype: C{str} @raise OverflowError: Out of range.
def enable_shuffle(self, value=None): if value is None: value = not self.shuffled spotifyconnect.Error.maybe_raise(lib.SpPlaybackEnableShuffle(value))
Enable shuffle mode
def entry_snapshots(self, space_id, environment_id, entry_id): return SnapshotsProxy(self, space_id, environment_id, entry_id, 'entries')
Provides access to entry snapshot management methods. API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/snapshots :return: :class:`SnapshotsProxy <contentful_management.snapshots_proxy.SnapshotsProxy>` object. :rtype: contentful.snapshots_proxy.SnapshotsProxy Usage: >>> entry_snapshots_proxy = client.entry_snapshots('cfexampleapi', 'master', 'nyancat') <SnapshotsProxy[entries] space_id="cfexampleapi" environment_id="master" parent_resource_id="nyancat">
def job_file(self): job_file_name = '%s.job' % (self.name) job_file_path = os.path.join(self.initial_dir, job_file_name) self._job_file = job_file_path return self._job_file
The path to the submit description file representing this job.
def transcribe_to_modern(self, text) : phoneme_words = self.transcribe(text, as_phonemes = True) words = [''.join([self.to_modern[0][phoneme.ipa] for phoneme in word]) for word in phoneme_words] modern_text = ' '.join(words) for regexp, replacement in self.to_modern[1]: modern_text = re.sub(regexp, replacement, modern_text) return modern_text
A very first attempt at trancribing from IPA to some modern orthography. The method is intended to provide the student with clues to the pronounciation of old orthographies.
def _treat_devices_removed(self): for device in self._removed_ports.copy(): eventlet.spawn_n(self._process_removed_port, device)
Process the removed devices.
def _dump(f, mesh): dae = mesh_to_collada(mesh) dae.write(f.name)
Writes a mesh to collada file format.
def list_rules(region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: ret = [] NextToken = '' while NextToken is not None: args = {'NextToken': NextToken} if NextToken else {} r = conn.list_rules(**args) ret += r.get('Rules', []) NextToken = r.get('NextToken') return ret except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
List, with details, all Cloudwatch Event rules visible in the current scope. CLI example:: salt myminion boto_cloudwatch_event.list_rules region=us-east-1
def build_indentation_list(parser: str = 'github'): r indentation_list = list() if (parser == 'github' or parser == 'cmark' or parser == 'gitlab' or parser == 'commonmarker' or parser == 'redcarpet'): for i in range(0, md_parser[parser]['header']['max_levels']): indentation_list.append(False) return indentation_list
r"""Create a data structure that holds the state of indentations. :parameter parser: decides the length of the list. Defaults to ``github``. :type parser: str :returns: indentation_list, a list that contains the state of indentations given a header type. :rtype: list :raises: a built-in exception.
def map(self, func): return dict((key, func(value)) for key, value in self.iteritems())
Return a dictionary of the results of func applied to each of the segmentlist objects in self. Example: >>> x = segmentlistdict() >>> x["H1"] = segmentlist([segment(0, 10)]) >>> x["H2"] = segmentlist([segment(5, 15)]) >>> x.map(lambda l: 12 in l) {'H2': True, 'H1': False}
def local_manager_is_default(self, adm_gid, gid): config = self.root['settings']['ugm_localmanager'].attrs rule = config[adm_gid] if gid not in rule['target']: raise Exception(u"group '%s' not managed by '%s'" % (gid, adm_gid)) return gid in rule['default']
Check whether gid is default group for local manager group.
def terminal_sexy_to_wal(data): data["colors"] = {} data["special"] = { "foreground": data["foreground"], "background": data["background"], "cursor": data["color"][9] } for i, color in enumerate(data["color"]): data["colors"]["color%s" % i] = color return data
Convert terminal.sexy json schema to wal.
def initalize(self, physics_dta): self.rotation = random.randint(self.rotation_range[0], self.rotation_range[1]) self.current_time = 0.0 self.color = self.start_color self.scale = self.start_scale self.physics = physics_dta
Prepare our particle for use. physics_dta describes the velocity, coordinates, and acceleration of the particle.
def draw_lines(self, *points): point_array = ffi.new('SDL_Point[]', len(points)) for i, p in enumerate(points): point_array[i] = p._ptr[0] check_int_err(lib.SDL_RenderDrawLines(self._ptr, point_array, len(points)))
Draw a series of connected lines on the current rendering target. Args: *points (Point): The points along the lines. Raises: SDLError: If an error is encountered.
def my_on_connect(client): client.send('You connected from %s\n' % client.addrport()) if CLIENTS: client.send('Also connected are:\n') for neighbor in CLIENTS: client.send('%s\n' % neighbor.addrport()) else: client.send('Sadly, you are alone.\n') CLIENTS.append(client)
Example on_connect handler.
def bods2c(name): name = stypes.stringToCharP(name) code = ctypes.c_int(0) found = ctypes.c_int(0) libspice.bods2c_c(name, ctypes.byref(code), ctypes.byref(found)) return code.value, bool(found.value)
Translate a string containing a body name or ID code to an integer code. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bods2c_c.html :param name: String to be translated to an ID code. :type name: str :return: Integer ID code corresponding to name. :rtype: int
def add_constant(self, stream, value): if stream in self.constant_database: raise ArgumentError("Attempted to set the same constant twice", stream=stream, old_value=self.constant_database[stream], new_value=value) self.constant_database[stream] = value
Store a constant value for use in this sensor graph. Constant assignments occur after all sensor graph nodes have been allocated since they must be propogated to all appropriate virtual stream walkers. Args: stream (DataStream): The constant stream to assign the value to value (int): The value to assign.
def transpose(self, trans, scale="C"): if not isinstance(trans, int): raise TypeError("Expected integers, not {}".format(type(trans))) self._root = transpose_note(self._root, trans, scale) if self._on: self._on = transpose_note(self._on, trans, scale) self._reconfigure_chord()
Transpose the chord :param int trans: Transpose key :param str scale: key scale :return:
def create(self, ticket, payload=None, expires=None): if not payload: payload = True self._client.set(str(ticket), payload, expires)
Create a session identifier in memcache associated with ``ticket``.
def _eval_meta_as_summary(meta): if meta == '': return False if len(meta)>500: return False if 'login' in meta.lower(): return False return True
some crude heuristics for now most are implemented on bot-side with domain whitelists
def children(self, unroll=False, skip_not_present=True): for child_inst in self.inst.children: if skip_not_present: if not child_inst.properties.get('ispresent', True): continue if unroll and isinstance(child_inst, comp.AddressableComponent) and child_inst.is_array: range_list = [range(n) for n in child_inst.array_dimensions] for idxs in itertools.product(*range_list): N = Node._factory(child_inst, self.env, self) N.current_idx = idxs yield N else: yield Node._factory(child_inst, self.env, self)
Returns an iterator that provides nodes for all immediate children of this component. Parameters ---------- unroll : bool If True, any children that are arrays are unrolled. skip_not_present : bool If True, skips children whose 'ispresent' property is set to False Yields ------ :class:`~Node` All immediate children
def doLayout(self, width): self.width = width font_sizes = [0] + [frag.get("fontSize", 0) for frag in self] self.fontSize = max(font_sizes) self.height = self.lineHeight = max(frag * self.LINEHEIGHT for frag in font_sizes) y = (self.lineHeight - self.fontSize) for frag in self: frag["y"] = y return self.height
Align words in previous line.
def _create_adapter_type(network_adapter, adapter_type, network_adapter_label=''): log.trace('Configuring virtual machine network ' 'adapter adapter_type=%s', adapter_type) if adapter_type in ['vmxnet', 'vmxnet2', 'vmxnet3', 'e1000', 'e1000e']: edited_network_adapter = salt.utils.vmware.get_network_adapter_type( adapter_type) if isinstance(network_adapter, type(edited_network_adapter)): edited_network_adapter = network_adapter else: if network_adapter: log.trace('Changing type of \'%s\' from \'%s\' to \'%s\'', network_adapter.deviceInfo.label, type(network_adapter).__name__.rsplit(".", 1)[1][7:].lower(), adapter_type) else: if network_adapter: if adapter_type: log.error( 'Cannot change type of \'%s\' to \'%s\'. Not changing type', network_adapter.deviceInfo.label, adapter_type ) edited_network_adapter = network_adapter else: if not adapter_type: log.trace('The type of \'%s\' has not been specified. ' 'Creating of default type \'vmxnet3\'', network_adapter_label) edited_network_adapter = vim.vm.device.VirtualVmxnet3() return edited_network_adapter
Returns a vim.vm.device.VirtualEthernetCard object specifying a virtual ethernet card information network_adapter None or VirtualEthernet object adapter_type String, type of adapter network_adapter_label string, network adapter name