code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def load_from_sens_file(self, filename): sens_data = np.loadtxt(filename, skiprows=1) nid_re = self.add_data(sens_data[:, 2]) nid_im = self.add_data(sens_data[:, 3]) return nid_re, nid_im
Load real and imaginary parts from a sens.dat file generated by CRMod Parameters ---------- filename: string filename of sensitivity file Returns ------- nid_re: int ID of real part of sensitivities nid_im: int ID of imaginary part of sensitivities
def read_wave(path): with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000) frames = wf.getnframes() pcm_data = wf.readframes(frames) duration = frames / sample_rate return pcm_data, sample_rate, duration
Reads a .wav file. Takes the path, and returns (PCM audio data, sample rate).
def _get_jar(self, command, alts=None, allow_missing=False): dirs = [] for bdir in [self._gatk_dir, self._picard_ref]: dirs.extend([bdir, os.path.join(bdir, os.pardir, "gatk")]) if alts is None: alts = [] for check_cmd in [command] + alts: for dir_check in dirs: try: check_file = config_utils.get_jar(check_cmd, dir_check) return check_file except ValueError as msg: if str(msg).find("multiple") > 0: raise else: pass if allow_missing: return None else: raise ValueError("Could not find jar %s in %s:%s" % (command, self._picard_ref, self._gatk_dir))
Retrieve the jar for running the specified command.
def _drop_schema(self, force_drop=False): connection = connections[get_tenant_database_alias()] has_schema = hasattr(connection, 'schema_name') if has_schema and connection.schema_name not in (self.schema_name, get_public_schema_name()): raise Exception("Can't delete tenant outside it's own schema or " "the public schema. Current schema is %s." % connection.schema_name) if has_schema and schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop): self.pre_drop() cursor = connection.cursor() cursor.execute('DROP SCHEMA %s CASCADE' % self.schema_name)
Drops the schema
def MarginalBeta(self, i): alpha0 = self.params.sum() alpha = self.params[i] return Beta(alpha, alpha0 - alpha)
Computes the marginal distribution of the ith element. See http://en.wikipedia.org/wiki/Dirichlet_distribution #Marginal_distributions i: int Returns: Beta object
def get_jpp_revision(via_command='JPrint'): try: output = subprocess.check_output([via_command, '-v'], stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: if e.returncode == 1: output = e.output else: return None except OSError: return None revision = output.decode().split('\n')[0].split()[1].strip() return revision
Retrieves the Jpp revision number
def histogram(self, name, description, labels=None, **kwargs): return self._track( Histogram, lambda metric, time: metric.observe(time), kwargs, name, description, labels, registry=self.registry )
Use a Histogram to track the execution time and invocation count of the method. :param name: the name of the metric :param description: the description of the metric :param labels: a dictionary of `{labelname: callable_or_value}` for labels :param kwargs: additional keyword arguments for creating the Histogram
def _matches_location(price, location): if not price.get('locationGroupId'): return True for group in location['location']['location']['priceGroups']: if group['id'] == price['locationGroupId']: return True return False
Return True if the price object matches the location.
def signup(request): if request.method == 'GET': return render(request, 'user_signup.html', {}, help_text=signup.__doc__) elif request.method == 'POST': if request.user.is_authenticated() and hasattr(request.user, "userprofile"): return render_json(request, { 'error': _('User already logged in'), 'error_type': 'username_logged' }, template='user_json.html', status=400) credentials = json_body(request.body.decode("utf-8")) error = _save_user(request, credentials, new=True) if error is not None: return render_json(request, error, template='user_json.html', status=400) else: auth.login(request, request.user) request.method = "GET" return profile(request, status=201) else: return HttpResponseBadRequest("method %s is not allowed".format(request.method))
Create a new user with the given credentials. GET parameters: html turn on the HTML version of the API POST parameters (JSON): username: user's name email: user's e-mail password: user's password password_check: user's password again to check it first_name (optional): user's first name last_name (optional): user's last name
def get_all_eip_addresses(addresses=None, allocation_ids=None, region=None, key=None, keyid=None, profile=None): return [x.public_ip for x in _get_all_eip_addresses(addresses, allocation_ids, region, key, keyid, profile)]
Get public addresses of some, or all EIPs associated with the current account. addresses (list) - Optional list of addresses. If provided, only the addresses associated with those in the list will be returned. allocation_ids (list) - Optional list of allocation IDs. If provided, only the addresses associated with the given allocation IDs will be returned. returns (list) - A list of the requested EIP addresses CLI Example: .. code-block:: bash salt-call boto_ec2.get_all_eip_addresses .. versionadded:: 2016.3.0
async def identity_of(client: Client, search: str) -> dict: return await client.get(MODULE + '/identity-of/%s' % search, schema=IDENTITY_OF_SCHEMA)
GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return:
def list_databases(self, instance, limit=None, marker=None): return instance.list_databases(limit=limit, marker=marker)
Returns all databases for the specified instance.
def add_properties(entity_proto, property_dict, exclude_from_indexes=None): for name, value in property_dict.iteritems(): set_property(entity_proto.properties, name, value, exclude_from_indexes)
Add values to the given datastore.Entity proto message. Args: entity_proto: datastore.Entity proto message. property_dict: a dictionary from property name to either a python object or datastore.Value. exclude_from_indexes: if the value should be exclude from indexes. None leaves indexing as is (defaults to False if value is not a Value message). Usage: >>> add_properties(proto, {'foo': u'a', 'bar': [1, 2]}) Raises: TypeError: if a given property value type is not supported.
def config(self): response = self._call( mc_calls.Config ) config_list = response.body.get('data', {}).get('entries', []) return config_list
Get a listing of mobile client configuration settings.
def create_class_from_element_tree(target_class, tree, namespace=None, tag=None): if namespace is None: namespace = target_class.c_namespace if tag is None: tag = target_class.c_tag if tree.tag == '{%s}%s' % (namespace, tag): target = target_class() target.harvest_element_tree(tree) return target else: return None
Instantiates the class and populates members according to the tree. Note: Only use this function with classes that have c_namespace and c_tag class members. :param target_class: The class which will be instantiated and populated with the contents of the XML. :param tree: An element tree whose contents will be converted into members of the new target_class instance. :param namespace: The namespace which the XML tree's root node must match. If omitted, the namespace defaults to the c_namespace of the target class. :param tag: The tag which the XML tree's root node must match. If omitted, the tag defaults to the c_tag class member of the target class. :return: An instance of the target class - or None if the tag and namespace of the XML tree's root node did not match the desired namespace and tag.
def get(self, name: str, default: Any = None) -> np.ndarray: if name in self: return self[name] else: return default
Return the value for a named attribute if it exists, else default. If default is not given, it defaults to None, so that this method never raises a KeyError.
def list(self, filter=None, type=None, sort=None, limit=None, page=None): schema = PackageSchema(exclude=('testlist', 'extra_cli_args', 'agent_id', 'options', 'note')) resp = self.service.list(self.base, filter, type, sort, limit, page) ps, l = self.service.decode(schema, resp, many=True, links=True) return Page(ps, l)
Get a list of packages. :param filter: (optional) Filters to apply as a string list. :param type: (optional) `union` or `inter` as string. :param sort: (optional) Sort fields to apply as string list. :param limit: (optional) Limit returned list length. :param page: (optional) Page to return. :return: :class:`packages.Page <packages.Page>` object
def get(self, key, env=None): if env is None: env = self.environment try: ret = self._settings[env][key] except KeyError: ret = None if ret is None: if key == "identity_class": env_var = self.env_dct.get("identity_type") ityp = os.environ.get(env_var) if ityp: return _import_identity(ityp) else: env_var = self.env_dct.get(key) if env_var is not None: ret = os.environ.get(env_var) return ret
Returns the config setting for the specified environment. If no environment is specified, the value for the current environment is returned. If an unknown key or environment is passed, None is returned.
def decode_argument(self, value: bytes, name: str = None) -> str: try: return _unicode(value) except UnicodeDecodeError: raise HTTPError( 400, "Invalid unicode in %s: %r" % (name or "url", value[:40]) )
Decodes an argument from the request. The argument has been percent-decoded and is now a byte string. By default, this method decodes the argument as utf-8 and returns a unicode string, but this may be overridden in subclasses. This method is used as a filter for both `get_argument()` and for values extracted from the url and passed to `get()`/`post()`/etc. The name of the argument is provided if known, but may be None (e.g. for unnamed groups in the url regex).
def _find_joliet_record(self, joliet_path): if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Joliet path requested on non-Joliet ISO') return _find_dr_record_by_name(self.joliet_vd, joliet_path, 'utf-16_be')
An internal method to find an directory record on the ISO given a Joliet path. If the entry is found, it returns the directory record object corresponding to that entry. If the entry could not be found, a pycdlibexception.PyCdlibInvalidInput is raised. Parameters: joliet_path - The Joliet path to lookup. Returns: The directory record entry representing the entry on the ISO.
def list_timeline(self, list_id, since_id=None, max_id=None, count=20): statuses = self._client.list_timeline(list_id=list_id, since_id=since_id, max_id=max_id, count=count) return [Tweet(tweet._json) for tweet in statuses]
List the tweets of specified list. :param list_id: list ID number :param since_id: results will have ID greater than specified ID (more recent than) :param max_id: results will have ID less than specified ID (older than) :param count: number of results per page :return: list of :class:`~responsebot.models.Tweet` objects
def read_bim(file_name): marker_names_chr = None with open(file_name, 'r') as input_file: marker_names_chr = dict([ (i[1], encode_chr(i[0])) for i in [ j.rstrip("\r\n").split("\t") for j in input_file.readlines() ] if encode_chr(i[0]) in {23, 24} ]) return marker_names_chr
Reads the BIM file to gather marker names. :param file_name: the name of the ``bim`` file. :type file_name: str :returns: a :py:class:`dict` containing the chromosomal location of each marker on the sexual chromosomes. It uses the :py:func:`encode_chr` to encode the chromosomes from ``X`` and ``Y`` to ``23`` and ``24``, respectively.
def namespace(self): if self.prefix is None: return self.defaultNamespace() return self.resolvePrefix(self.prefix)
Get the element's namespace. @return: The element's namespace by resolving the prefix, the explicit namespace or the inherited namespace. @rtype: (I{prefix}, I{name})
def unroll_auth_headers(self, authheaders, exclude_signature=False, sep=",", quote=True): res = "" ordered = collections.OrderedDict(sorted(authheaders.items())) form = '{0}=\"{1}\"' if quote else '{0}={1}' if exclude_signature: return sep.join([form.format(k, urlquote(str(v), safe='')) for k, v in ordered.items() if k != 'signature']) else: return sep.join([form.format(k, urlquote(str(v), safe='') if k != 'signature' else str(v)) for k, v in ordered.items()])
Converts an authorization header dict-like object into a string representing the authorization. Keyword arguments: authheaders -- A string-indexable object which contains the headers appropriate for this signature version.
def get_items_of_offer_per_page(self, offer_id, per_page=1000, page=1): return self._get_resource_per_page( resource=OFFER_ITEMS, per_page=per_page, page=page, params={'offer_id': offer_id}, )
Get items of offer per page :param offer_id: the offer id :param per_page: How many objects per page. Default: 1000 :param page: Which page. Default: 1 :return: list
def peek(init, exposes, debug=False): def _peek(store, container, _stack=None): args = [ store.peek(objname, container, _stack=_stack) \ for objname in exposes ] if debug: print(args) return init(*args) return _peek
Default deserializer factory. Arguments: init (callable): type constructor. exposes (iterable): attributes to be peeked and passed to `init`. Returns: callable: deserializer (`peek` routine).
def _run(name, cmd, exec_driver=None, output=None, stdin=None, python_shell=True, output_loglevel='debug', ignore_retcode=False, use_vt=False, keep_env=None): if exec_driver is None: exec_driver = _get_exec_driver() ret = __salt__['container_resource.run']( name, cmd, container_type=__virtualname__, exec_driver=exec_driver, output=output, stdin=stdin, python_shell=python_shell, output_loglevel=output_loglevel, ignore_retcode=ignore_retcode, use_vt=use_vt, keep_env=keep_env) if output in (None, 'all'): return ret else: return ret[output]
Common logic for docker.run functions
def _get_default(self, obj): if self.name in obj._property_values: raise RuntimeError("Bokeh internal error, does not handle the case of self.name already in _property_values") is_themed = obj.themed_values() is not None and self.name in obj.themed_values() default = self.instance_default(obj) if is_themed: unstable_dict = obj._unstable_themed_values else: unstable_dict = obj._unstable_default_values if self.name in unstable_dict: return unstable_dict[self.name] if self.property._may_have_unstable_default(): if isinstance(default, PropertyValueContainer): default._register_owner(obj, self) unstable_dict[self.name] = default return default
Internal implementation of instance attribute access for default values. Handles bookeeping around |PropertyContainer| value, etc.
def _pad(input_signal, length, average=10): padded_input_signal = numpy.zeros(length, input_signal.dtype) start_offset = int((len(padded_input_signal) - len(input_signal)) / 2) padded_input_signal[:start_offset] = numpy.average(input_signal[0:average]) padded_input_signal[start_offset:(start_offset + len(input_signal))] = input_signal[:] padded_input_signal[(start_offset + len(input_signal)):] = numpy.average(input_signal[-average:]) return padded_input_signal
Helper function which increases the length of an input signal. The original is inserted at the centre of the new signal and the extra values are set to the average of the first and last parts of the original, respectively. :param input_signal: the signal to be padded :param length: the length of the padded signal :param average: the number of points at the beginning/end of the signal which are averaged to calculate the padded value :return:
def delete_collection(mongo_uri, database_name, collection_name): client = pymongo.MongoClient(mongo_uri) db = client[database_name] db.drop_collection(collection_name)
Delete a mongo document collection using pymongo. Mongo daemon assumed to be running. Inputs: - mongo_uri: A MongoDB URI. - database_name: The mongo database name as a python string. - collection_name: The mongo collection as a python string.
def _get_serializer(output): serializers = salt.loader.serializers(__opts__) try: return getattr(serializers, output) except AttributeError: raise CommandExecutionError( "Unknown serializer '{0}' found for output option".format(output) )
Helper to return known serializer based on pass output argument
def __add_images_to_manifest(self): xpath_expr = "//manifest:manifest[1]" for content_tree in self.content_trees: manifest_e = content_tree.xpath( xpath_expr, namespaces=self.namespaces ) if not manifest_e: continue for identifier in self.images.keys(): lxml.etree.SubElement( manifest_e[0], '{%s}file-entry' % self.namespaces['manifest'], attrib={ '{%s}full-path' % self.namespaces['manifest']: ( PY3O_IMAGE_PREFIX + identifier ), '{%s}media-type' % self.namespaces['manifest']: '', } )
Add entries for py3o images into the manifest file.
def simpleQuery(self, queryType, rawResults = False, **queryArgs) : return SimpleQuery(self, queryType, rawResults, **queryArgs)
General interface for simple queries. queryType can be something like 'all', 'by-example' etc... everything is in the arango doc. If rawResults, the query will return dictionaries instead of Document objetcs.
def on_connect(client): print "++ Opened connection to %s" % client.addrport() broadcast('%s joins the conversation.\n' % client.addrport() ) CLIENT_LIST.append(client) client.send("Welcome to the Chat Server, %s.\n" % client.addrport() )
Sample on_connect function. Handles new connections.
def _speak_header_always_inherit(self, element): self._speak_header_once_inherit(element) cell_elements = self.html_parser.find(element).find_descendants( 'td[headers],th[headers]' ).list_results() accessible_display = AccessibleDisplayImplementation( self.html_parser, self.configure ) for cell_element in cell_elements: accessible_display.display_cell_header(cell_element)
The cells headers will be spoken for every data cell for element and descendants. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement
def get_accounts(self, username=None): url = "{0}/{1}/accounts".format(self.domain, self.API_VERSION) params = {"username": username} try: return self._Client__call(uri=url, params=params, method="get") except RequestException: return False except AssertionError: return False
Get a list of accounts owned by the user. Parameters ---------- username : string The name of the user. Note: This is only required on the sandbox, on production systems your access token will identify you. See more: http://developer.oanda.com/rest-sandbox/accounts/#-a-name-getaccountsforuser-a-get-accounts-for-a-user
def validate_object_action(self, action_name, obj=None): action_method = getattr(self, action_name) if not getattr(action_method, 'detail', False) and action_name not in ('update', 'partial_update', 'destroy'): return validators = getattr(self, action_name + '_validators', []) for validator in validators: validator(obj or self.get_object())
Execute validation for actions that are related to particular object
def is_subdir(base_path, test_path, trailing_slash=False, wildcards=False): if trailing_slash: base_path = base_path.rsplit('/', 1)[0] + '/' test_path = test_path.rsplit('/', 1)[0] + '/' else: if not base_path.endswith('/'): base_path += '/' if not test_path.endswith('/'): test_path += '/' if wildcards: return fnmatch.fnmatchcase(test_path, base_path) else: return test_path.startswith(base_path)
Return whether the a path is a subpath of another. Args: base_path: The base path test_path: The path which we are testing trailing_slash: If True, the trailing slash is treated with importance. For example, ``/images/`` is a directory while ``/images`` is a file. wildcards: If True, globbing wildcards are matched against paths
def deferToGreenletPool(*args, **kwargs): reactor = args[0] pool = args[1] func = args[2] d = defer.Deferred() def task(): try: reactor.callFromGreenlet(d.callback, func(*args[3:], **kwargs)) except: reactor.callFromGreenlet(d.errback, failure.Failure()) pool.add(spawn(task)) return d
Call function using a greenlet from the given pool and return the result as a Deferred
def remove_children(self, id_): results = self._rls.get_relationships_by_genus_type_for_source(id_, self._relationship_type) if results.available() == 0: raise errors.NotFound() for r in results: self._ras.delete_relationship(r.get_id())
Removes all childrenfrom an ``Id``. arg: id (osid.id.Id): the ``Id`` of the node raise: NotFound - an node identified by the given ``Id`` was not found raise: NullArgument - ``id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def add_member_to_list(self, username, listname, member_type="USER"): return self.client.service.addMemberToList( listname, username, member_type, self.proxy_id )
Add a member to an existing list. Args: username (str): The username of the user to add listname (str): The name of the list to add the user to member_type (str): Normally, this should be "USER". If you are adding a list as a member of another list, set this to "LIST", instead.
def bind(self, callback): handlers = self._handlers if self._self is None: raise RuntimeError('%s already fired, cannot add callbacks' % self) if handlers is None: handlers = [] self._handlers = handlers handlers.append(callback)
Bind a ``callback`` to this event.
def applyconfiguration(targets, conf=None, *args, **kwargs): result = [] for target in targets: configurables = Configurable.get_annotations(target) if not configurables: configurables = [Configurable()] for configurable in configurables: configuredtargets = configurable.applyconfiguration( targets=[target], conf=conf, *args, **kwargs ) result += configuredtargets return result
Apply configuration on input targets. If targets are not annotated by a Configurable, a new one is instanciated. :param Iterable targets: targets to configurate. :param tuple args: applyconfiguration var args. :param dict kwargs: applyconfiguration keywords. :return: configured targets. :rtype: list
def get(self, request, *args, **kwargs): context = self.get_context_data(**kwargs) context.update(self.extra_context) context['crumbs'] = self.get_crumbs() context['title'] = self.title context['suit'] = 'suit' in settings.INSTALLED_APPS if context.get('dashboard_grid', None) is None and self.grid: context['dashboard_grid'] = self.grid return self.render_to_response(context)
Django view get function. Add items of extra_context, crumbs and grid to context. Args: request (): Django's request object. *args (): request args. **kwargs (): request kwargs. Returns: response: render to response with context.
def from_status(cls, status_line, msg=None): method = getattr(cls, status_line.lower()[4:].replace(' ', '_')) return method(msg)
Returns a class method from bottle.HTTPError.status_line attribute. Useful for patching `bottle.HTTPError` for web services. Args: status_line (str): bottle.HTTPError.status_line text. msg: The message data for response. Returns: Class method based on status_line arg. Examples: >>> status_line = '401 Unauthorized' >>> error_msg = 'Get out!' >>> resp = WSResponse.from_status(status_line, error_msg) >>> resp['errors'] ['Get out!'] >>> resp['status_text'] 'Unauthorized'
def cli(env, identifier): mgr = SoftLayer.LoadBalancerManager(env.client) loadbal_id, group_id = loadbal.parse_id(identifier) mgr.reset_service_group(loadbal_id, group_id) env.fout('Load balancer service group connections are being reset!')
Reset connections on a certain service group.
def _make_pretty_arguments(arguments): if arguments.startswith("\n Arguments:"): arguments = "\n".join(map(lambda u: u[6:], arguments.strip().split("\n")[1:])) return "**Arguments:**\n\n%s\n\n" % arguments
Makes the arguments description pretty and returns a formatted string if `arguments` starts with the argument prefix. Otherwise, returns None. Expected input: Arguments: * arg0 - ... ... * arg0 - ... ... Expected output: **Arguments:** * arg0 - ... ... * arg0 - ... ...
def score_for_task(properties, category, result): assert result is not None if properties and Property.create_from_names(properties).is_svcomp: return _svcomp_score(category, result) return None
Return the possible score of task, depending on whether the result is correct or not.
def kakwani(values, ineq_axis, weights = None): from scipy.integrate import simps if weights is None: weights = ones(len(values)) PLCx, PLCy = pseudo_lorenz(values, ineq_axis, weights) LCx, LCy = lorenz(ineq_axis, weights) del PLCx return simps((LCy - PLCy), LCx)
Computes the Kakwani index
def _fetch_pdb(pdb_code): txt = None url = 'http://www.rcsb.org/pdb/files/%s.pdb' % pdb_code.lower() try: response = urlopen(url) txt = response.read() if sys.version_info[0] >= 3: txt = txt.decode('utf-8') else: txt = txt.encode('ascii') except HTTPError as e: print('HTTP Error %s' % e.code) except URLError as e: print('URL Error %s' % e.args) return url, txt
Load PDB file from rcsb.org.
def wrap_search(cls, response): games = [] json = response.json() gamejsons = json['games'] for j in gamejsons: g = cls.wrap_json(j) games.append(g) return games
Wrap the response from a game search into instances and return them :param response: The response from searching a game :type response: :class:`requests.Response` :returns: the new game instances :rtype: :class:`list` of :class:`Game` :raises: None
def _get_relative_path(self, full_path): try: rel_path = Path(full_path).relative_to(Path().absolute()) except ValueError: LOG.error("%s: Couldn't find relative path of '%s' from '%s'.", self.name, full_path, Path().absolute()) return full_path return str(rel_path)
Return the relative path from current path.
def _config_chooser_dialog(self, title_text, description): dialog = Gtk.Dialog(title_text, self.view["preferences_window"], flags=0, buttons= (Gtk.STOCK_CANCEL, Gtk.ResponseType.REJECT, Gtk.STOCK_OK, Gtk.ResponseType.ACCEPT)) label = Gtk.Label(label=description) label.set_padding(xpad=10, ypad=10) dialog.vbox.pack_start(label, True, True, 0) label.show() self._gui_checkbox = Gtk.CheckButton(label="GUI Config") dialog.vbox.pack_start(self._gui_checkbox, True, True, 0) self._gui_checkbox.show() self._core_checkbox = Gtk.CheckButton(label="Core Config") self._core_checkbox.show() dialog.vbox.pack_start(self._core_checkbox, True, True, 0) response = dialog.run() dialog.destroy() return response
Dialog to select which config shall be exported :param title_text: Title text :param description: Description
def add_missing_optional_args_with_value_none(args, optional_args): for name in optional_args: if not name in args.keys(): args[name] = None return args
Adds key-value pairs to the passed dictionary, so that afterwards, the dictionary can be used without needing to check for KeyErrors. If the keys passed as a second argument are not present, they are added with None as a value. :args: The dictionary to be completed. :optional_args: The keys that need to be added, if they are not present. :return: The modified dictionary.
def bltfrm(frmcls, outCell=None): frmcls = ctypes.c_int(frmcls) if not outCell: outCell = stypes.SPICEINT_CELL(1000) libspice.bltfrm_c(frmcls, outCell) return outCell
Return a SPICE set containing the frame IDs of all built-in frames of a specified class. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bltfrm_c.html :param frmcls: Frame class. :type frmcls: int :param outCell: Optional SpiceInt Cell that is returned :type outCell: spiceypy.utils.support_types.SpiceCell :return: Set of ID codes of frames of the specified class. :rtype: spiceypy.utils.support_types.SpiceCell
def isrchc(value, ndim, lenvals, array): value = stypes.stringToCharP(value) array = stypes.listToCharArrayPtr(array, xLen=lenvals, yLen=ndim) ndim = ctypes.c_int(ndim) lenvals = ctypes.c_int(lenvals) return libspice.isrchc_c(value, ndim, lenvals, array)
Search for a given value within a character string array. Return the index of the first matching array entry, or -1 if the key value was not found. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/isrchc_c.html :param value: Key value to be found in array. :type value: str :param ndim: Dimension of array. :type ndim: int :param lenvals: String length. :type lenvals: int :param array: Character string array to search. :type array: list of str :return: The index of the first matching array element or -1 if the value is not found. :rtype: int
def add_text_content_type(application, content_type, default_encoding, dumps, loads): parsed = headers.parse_content_type(content_type) parsed.parameters.pop('charset', None) normalized = str(parsed) add_transcoder(application, handlers.TextContentHandler(normalized, dumps, loads, default_encoding))
Add handler for a text content type. :param tornado.web.Application application: the application to modify :param str content_type: the content type to add :param str default_encoding: encoding to use when one is unspecified :param dumps: function that dumps a dictionary to a string. ``dumps(dict, encoding:str) -> str`` :param loads: function that loads a dictionary from a string. ``loads(str, encoding:str) -> dict`` Note that the ``charset`` parameter is stripped from `content_type` if it is present.
def match_ref(self, ref): if ref in self.refs: self._matched_ref = ref return True return False
Check if the ref matches one the concept's aliases. If so, mark the matched ref so that we use it as the column label.
def sparkline_display_value_type(self, sparkline_display_value_type): allowed_values = ["VALUE", "LABEL"] if sparkline_display_value_type not in allowed_values: raise ValueError( "Invalid value for `sparkline_display_value_type` ({0}), must be one of {1}" .format(sparkline_display_value_type, allowed_values) ) self._sparkline_display_value_type = sparkline_display_value_type
Sets the sparkline_display_value_type of this ChartSettings. For the single stat view, whether to display the name of the query or the value of query # noqa: E501 :param sparkline_display_value_type: The sparkline_display_value_type of this ChartSettings. # noqa: E501 :type: str
def check_bom(file): lead = file.read(3) if len(lead) == 3 and lead == codecs.BOM_UTF8: return codecs.lookup('utf-8').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_BE: if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-be').name elif len(lead) >= 2 and lead[:2] == codecs.BOM_UTF16_LE: if len(lead) == 3: file.seek(-1, os.SEEK_CUR) return codecs.lookup('utf-16-le').name else: file.seek(-len(lead), os.SEEK_CUR) return None
Determines file codec from from its BOM record. If file starts with BOM record encoded with UTF-8 or UTF-16(BE/LE) then corresponding encoding name is returned, otherwise None is returned. In both cases file current position is set to after-BOM bytes. The file must be open in binary mode and positioned at offset 0.
def is_block_device(self): try: return S_ISBLK(self.stat().st_mode) except OSError as e: if e.errno != ENOENT: raise return False
Whether this path is a block device.
def _get_loader(config): if config.endswith('.yml') or config.endswith('.yaml'): if not yaml: LOGGER.error("pyyaml must be installed to use the YAML loader") return None, None return 'yaml', yaml.load else: return 'json', json.loads
Determine which config file type and loader to use based on a filename. :param config str: filename to config file :return: a tuple of the loader type and callable to load :rtype: (str, Callable)
def _free_sequence(tmp1, tmp2=False): if not tmp1 and not tmp2: return [] output = [] if tmp1 and tmp2: output.append('pop de') output.append('ex (sp), hl') output.append('push de') output.append('call __MEM_FREE') output.append('pop hl') output.append('call __MEM_FREE') else: output.append('ex (sp), hl') output.append('call __MEM_FREE') output.append('pop hl') REQUIRES.add('alloc.asm') return output
Outputs a FREEMEM sequence for 1 or 2 ops
def update_from_object(self, obj, criterion=lambda key: key.isupper()): log.debug('Loading config from {0}'.format(obj)) if isinstance(obj, basestring): if '.' in obj: path, name = obj.rsplit('.', 1) mod = __import__(path, globals(), locals(), [name], 0) obj = getattr(mod, name) else: obj = __import__(obj, globals(), locals(), [], 0) self.update( (key, getattr(obj, key)) for key in filter(criterion, dir(obj)) )
Update dict from the attributes of a module, class or other object. By default only attributes with all-uppercase names will be retrieved. Use the ``criterion`` argument to modify that behaviour. :arg obj: Either the actual module/object, or its absolute name, e.g. 'my_app.settings'. :arg criterion: Callable that must return True when passed the name of an attribute, if that attribute is to be used. :type criterion: :py:class:`function` .. versionadded:: 1.0
def numeric_columns(self, include_bool=True): columns = [] for col, dtype in zip(self.columns, self.dtypes): if is_numeric_dtype(dtype) and ( include_bool or (not include_bool and dtype != np.bool_) ): columns.append(col) return columns
Returns the numeric columns of the Manager. Returns: List of index names.
def build_funcs(modules): kernel32 = ['kernel32_'] try: kernel32 += remove_dups(modules['kernel32']) except KeyError: if len(modules) and 'LoadLibraryA' not in kernel32: kernel32.insert(1, 'LoadLibraryA') if len(modules) > 1 and 'LoadLibraryA' not in kernel32: kernel32.insert(1, 'LoadLibraryA') if 'GetProcAddress' not in kernel32: kernel32.insert(1, 'GetProcAddress') logging.debug('kernel32: %s', kernel32) for module, funcs in modules.items(): logging.debug('%s: %s', module, funcs) if module != 'kernel32': kernel32.extend([module + '_'] + remove_dups(funcs)) return kernel32
Build a used functions and modules list for later consumption.
def volume(self): volume = float(np.product(self.primitive.extents)) return volume
Volume of the box Primitive. Returns -------- volume: float, volume of box
def com_google_fonts_check_family_underline_thickness(ttFonts): underTs = {} underlineThickness = None failed = False for ttfont in ttFonts: fontname = ttfont.reader.file.name ut = ttfont['post'].underlineThickness underTs[fontname] = ut if underlineThickness is None: underlineThickness = ut if ut != underlineThickness: failed = True if failed: msg = ("Thickness of the underline is not" " the same accross this family. In order to fix this," " please make sure that the underlineThickness value" " is the same in the 'post' table of all of this family" " font files.\n" "Detected underlineThickness values are:\n") for style in underTs.keys(): msg += "\t{}: {}\n".format(style, underTs[style]) yield FAIL, msg else: yield PASS, "Fonts have consistent underline thickness."
Fonts have consistent underline thickness?
def as_odict(self): if hasattr(self, 'cust_odict'): return self.cust_odict if hasattr(self, 'attr_check'): self.attr_check() odc = odict() for attr in self.attrorder: odc[attr] = getattr(self, attr) return odc
returns an odict version of the object, based on it's attributes
def nl_socket_modify_cb(sk, type_, kind, func, arg): return int(nl_cb_set(sk.s_cb, type_, kind, func, arg))
Modify the callback handler associated with the socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/socket.c#L633 Sets specific callback functions in the existing nl_cb class instance stored in the nl_sock socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). type_ -- which type callback to set (integer). kind -- kind of callback (integer). func -- callback function. arg -- argument to be passed to callback function. Returns: 0 on success or a negative error code.
def load_checkers(): for loader, name, _ in pkgutil.iter_modules([os.path.join(__path__[0], 'checkers')]): loader.find_module(name).load_module(name)
Load the checkers
async def _analog_message(self, data): pin = data[0] value = (data[PrivateConstants.MSB] << 7) + data[PrivateConstants.LSB] self.analog_pins[pin].current_value = value message = [pin, value, Constants.ANALOG] if self.analog_pins[pin].cb: if self.analog_pins[pin].cb_type: await self.analog_pins[pin].cb(message) else: loop = self.loop loop.call_soon(self.analog_pins[pin].cb, message) key = 'A' + str(pin) if key in self.latch_map: await self._check_latch_data(key, message[1])
This is a private message handler method. It is a message handler for analog messages. :param data: message data :returns: None - but saves the data in the pins structure
def moderate(self, comment, entry, request): if self.auto_moderate_comments: return True if check_is_spam(comment, entry, request, self.spam_checker_backends): return True return False
Determine if a new comment should be marked as non-public and await approval. Return ``True`` to put the comment into the moderator queue, or ``False`` to allow it to be showed up immediately.
def _sign_simple_signature_fulfillment(cls, input_, message, key_pairs): input_ = deepcopy(input_) public_key = input_.owners_before[0] message = sha3_256(message.encode()) if input_.fulfills: message.update('{}{}'.format( input_.fulfills.txid, input_.fulfills.output).encode()) try: input_.fulfillment.sign( message.digest(), base58.b58decode(key_pairs[public_key].encode())) except KeyError: raise KeypairMismatchException('Public key {} is not a pair to ' 'any of the private keys' .format(public_key)) return input_
Signs a Ed25519Fulfillment. Args: input_ (:class:`~bigchaindb.common.transaction. Input`) The input to be signed. message (str): The message to be signed key_pairs (dict): The keys to sign the Transaction with.
def run_parallel_with_display(wf, n_threads, display): LogQ = Queue() S = Scheduler(error_handler=display.error_handler) threading.Thread( target=patch, args=(LogQ.source, sink_map(display)), daemon=True).start() W = Queue() \ >> branch(log_job_start >> LogQ.sink) \ >> thread_pool(*repeat(worker, n_threads)) \ >> branch(LogQ.sink) result = S.run(W, get_workflow(wf)) LogQ.wait() return result
Adds a display to the parallel runner. Because messages come in asynchronously now, we start an extra thread just for the display routine.
async def expn( self, address: str, timeout: DefaultNumType = _default ) -> SMTPResponse: await self._ehlo_or_helo_if_needed() parsed_address = parse_address(address) async with self._command_lock: response = await self.execute_command( b"EXPN", parsed_address.encode("ascii"), timeout=timeout ) if response.code != SMTPStatus.completed: raise SMTPResponseException(response.code, response.message) return response
Send an SMTP EXPN command, which expands a mailing list. Not many servers support this command. :raises SMTPResponseException: on unexpected server response code
def calc_A_hat(A, S): return np.dot(S, np.dot(A, np.transpose(S)))
Return the A_hat matrix of A given the skew matrix S
def check_corrupted_files_cmd(java_home, files): files_str = ",".join(files) check_command = CHECK_COMMAND.format( ionice=IONICE, java_home=java_home, files=files_str, ) command = "{check_command} | {reduce_output}".format( check_command=check_command, reduce_output=REDUCE_OUTPUT, ) return command
Check the file corruption of the specified files. :param java_home: the JAVA_HOME :type java_home: string :param files: list of files to be checked :type files: list of string
def get_exception(self): with self.__lock: return ( self.__updateexception if self.__updateexception or self.__closed else self.__exportref.get_exception() )
Returns the exception associated to the export :return: An exception tuple, if any
def absent( name, region, user=None, opts=False): ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} does_exist = __salt__['aws_sqs.queue_exists'](name, region, opts, user) if does_exist: if __opts__['test']: ret['result'] = None ret['comment'] = 'AWS SQS queue {0} is set to be removed'.format( name) return ret removed = __salt__['aws_sqs.delete_queue'](name, region, opts, user) if removed['retcode'] == 0: ret['changes']['removed'] = removed['stdout'] else: ret['result'] = False ret['comment'] = removed['stderr'] else: ret['comment'] = '{0} does not exist in {1}'.format(name, region) return ret
Remove the named SQS queue if it exists. name Name of the SQS queue. region Region to remove the queue from user Name of the user performing the SQS operations opts Include additional arguments and options to the aws command line
def wrap_paths(paths): if isinstance(paths, string_types): raise ValueError( "paths cannot be a string. " "Use array with one element instead." ) return ' '.join('"' + path + '"' for path in paths)
Put quotes around all paths and join them with space in-between.
def kick_jobs(self, num_jobs): with self._sock_ctx() as socket: self._send_message('kick {0}'.format(num_jobs), socket) return self._receive_id(socket)
Kick some number of jobs from the buried queue onto the ready queue. :param num_jobs: Number of jobs to kick :type num_jobs: int If not that many jobs are in the buried queue, it will kick as many as it can.
def disconnect_all(self): with self._mutex: for conn in self.connections: self.object.disconnect(conn.id) self.reparse_connections()
Disconnect all connections to this port.
def salt_extend(extension, name, description, salt_dir, merge): import salt.utils.extend salt.utils.extend.run(extension=extension, name=name, description=description, salt_dir=salt_dir, merge=merge)
Quickstart for developing on the saltstack installation .. versionadded:: 2016.11.0
def room_members(self, stream_id): req_hook = 'pod/v2/room/' + str(stream_id) + '/membership/list' req_args = None status_code, response = self.__rest__.GET_query(req_hook, req_args) self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
get list of room members
def cancel(self): if self.OBSERVE_UPDATES: self.detach() self.ioloop.add_callback(self.cancel_timeouts)
Detach strategy from its sensor and cancel ioloop callbacks.
def calc_mean_time_deviation(timepoints, weights, mean_time=None): timepoints = numpy.array(timepoints) weights = numpy.array(weights) validtools.test_equal_shape(timepoints=timepoints, weights=weights) validtools.test_non_negative(weights=weights) if mean_time is None: mean_time = calc_mean_time(timepoints, weights) return (numpy.sqrt(numpy.dot(weights, (timepoints-mean_time)**2) / numpy.sum(weights)))
Return the weighted deviation of the given timepoints from their mean time. With equal given weights, the is simply the standard deviation of the given time points: >>> from hydpy import calc_mean_time_deviation >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[2., 2.]) 2.0 One can pass a precalculated or alternate mean time: >>> from hydpy import round_ >>> round_(calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[2., 2.], ... mean_time=4.)) 2.236068 >>> round_(calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[1., 3.])) 1.732051 Or, in the most extreme case: >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[0., 4.]) 0.0 There will be some checks for input plausibility perfomed, e.g.: >>> calc_mean_time_deviation(timepoints=[3., 7.], ... weights=[-2., 2.]) Traceback (most recent call last): ... ValueError: While trying to calculate the weighted time deviation \ from mean time, the following error occurred: For the following objects, \ at least one value is negative: weights.
def get_extra_keywords(self): extra_keywords = {} for key, widgets in list(self.widgets_dict.items()): if widgets[0].isChecked(): if isinstance(widgets[1], QLineEdit): extra_keywords[key] = widgets[1].text() elif isinstance(widgets[1], QComboBox): current_index = widgets[1].currentIndex() extra_keywords[key] = widgets[1].itemData(current_index) elif isinstance(widgets[1], (QDoubleSpinBox, QSpinBox)): extra_keywords[key] = widgets[1].value() elif isinstance(widgets[1], QDateTimeEdit): extra_keywords[key] = widgets[1].dateTime().toString( Qt.ISODate) return extra_keywords
Obtain extra keywords from the current state.
def _select_broker_pair(self, rg_destination, victim_partition): broker_source = self._elect_source_broker(victim_partition) broker_destination = rg_destination._elect_dest_broker(victim_partition) return broker_source, broker_destination
Select best-fit source and destination brokers based on partition count and presence of partition over the broker. * Get overloaded and underloaded brokers Best-fit Selection Criteria: Source broker: Select broker containing the victim-partition with maximum partitions. Destination broker: NOT containing the victim-partition with minimum partitions. If no such broker found, return first broker. This helps in ensuring:- * Topic-partitions are distributed across brokers. * Partition-count is balanced across replication-groups.
def get_revision(): proc = Process("git log", ["git", "log", "-1"]) try: while True: line = proc.stdout.pop().strip().decode('utf8') if not line: continue if line.startswith("commit "): return line[7:] finally: with suppress_exception: proc.join()
GET THE CURRENT GIT REVISION
def get_collection(self, path, query, **options): options = self._merge_options(options) if options['iterator_type'] == 'items': return CollectionPageIterator(self, path, query, options).items() if options['iterator_type'] is None: return self.get(path, query, **options) raise Exception('Unknown value for "iterator_type" option: {}'.format( str(options['iterator_type'])))
Get a collection from a collection endpoint. Parses GET request options for a collection endpoint and dispatches a request.
def getValidCertifications(self): certs = [] today = date.today() for c in self.getCertifications(): validfrom = c.getValidFrom() if c else None validto = c.getValidTo() if validfrom else None if not validfrom or not validto: continue validfrom = validfrom.asdatetime().date() validto = validto.asdatetime().date() if (today >= validfrom and today <= validto): certs.append(c) return certs
Returns the certifications fully valid
def page( request, slug, rev_id=None, template_name='wakawaka/page.html', extra_context=None, ): try: queryset = WikiPage.objects.all() page = queryset.get(slug=slug) rev = page.current if rev_id: revision_queryset = Revision.objects.all() rev_specific = revision_queryset.get(pk=rev_id) if rev.pk != rev_specific.pk: rev_specific.is_not_current = True rev = rev_specific except WikiPage.DoesNotExist: if request.user.is_authenticated: kwargs = {'slug': slug} redirect_to = reverse('wakawaka_edit', kwargs=kwargs) return HttpResponseRedirect(redirect_to) raise Http404 template_context = {'page': page, 'rev': rev} template_context.update(extra_context or {}) return render(request, template_name, template_context)
Displays a wiki page. Redirects to the edit view if the page doesn't exist.
def check_vprint(s, vprinter): if vprinter is True: print(s); elif callable(vprinter): vprinter(s);
checked verbose printing
def build_frame(command, payload): packet_length = 2 + len(payload) + 1 ret = struct.pack("BB", 0, packet_length) ret += struct.pack(">H", command.value) ret += payload ret += struct.pack("B", calc_crc(ret)) return ret
Build raw bytes from command and payload.
def get(self, name=None): if name is None: return self._classes else: if name not in self._classes.keys(): return None else: return self._classes[name]
Returns the plugin class object with the given name. Or if a name is not given, the complete plugin dictionary is returned. :param name: Name of a plugin :return: None, single plugin or dictionary of plugins
def deps_status(self): if not self.deps: return [self.S_OK] return [d.status for d in self.deps]
Returns a list with the status of the dependencies.
def reset_stream(stream): try: position = stream.tell() except Exception: position = True if position != 0: try: stream.seek(0) except Exception: message = 'It\'s not possible to reset this stream' raise exceptions.TabulatorException(message)
Reset stream pointer to the first element. If stream is not seekable raise Exception.
def com_google_fonts_check_smart_dropout(ttFont): INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d" if ("prep" in ttFont and INSTRUCTIONS in ttFont["prep"].program.getBytecode()): yield PASS, ("'prep' table contains instructions" " enabling smart dropout control.") else: yield FAIL, ("'prep' table does not contain TrueType " " instructions enabling smart dropout control." " To fix, export the font with autohinting enabled," " or run ttfautohint on the font, or run the " " `gftools fix-nonhinting` script.")
Font enables smart dropout control in "prep" table instructions? B8 01 FF PUSHW 0x01FF 85 SCANCTRL (unconditinally turn on dropout control mode) B0 04 PUSHB 0x04 8D SCANTYPE (enable smart dropout control) Smart dropout control means activating rules 1, 2 and 5: Rule 1: If a pixel's center falls within the glyph outline, that pixel is turned on. Rule 2: If a contour falls exactly on a pixel's center, that pixel is turned on. Rule 5: If a scan line between two adjacent pixel centers (either vertical or horizontal) is intersected by both an on-Transition contour and an off-Transition contour and neither of the pixels was already turned on by rules 1 and 2, turn on the pixel which is closer to the midpoint between the on-Transition contour and off-Transition contour. This is "Smart" dropout control.
def auto_convert_cell_no_flags(cell, units=None, parens_as_neg=True): units = units if units != None else {} return auto_convert_cell(flagable=Flagable(), cell=cell, position=None, worksheet=0, flags={}, units=units, parens_as_neg=parens_as_neg)
Performs a first step conversion of the cell to check it's type or try to convert if a valid conversion exists. This version of conversion doesn't flag changes nor store cell units. Args: units: The dictionary holder for cell units. parens_as_neg: Converts numerics surrounded by parens to negative values