code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def create_window(self): self.undocked_window = window = PluginWindow(self) window.setAttribute(Qt.WA_DeleteOnClose) icon = self.get_plugin_icon() if is_text_string(icon): icon = self.get_icon(icon) window.setWindowIcon(icon) window.setWindowTitle(self.get_plugin_title()) window.setCentralWidget(self) window.resize(self.size()) self.refresh_plugin() self.dockwidget.setFloating(False) self.dockwidget.setVisible(False) window.show()
Create a QMainWindow instance containing this plugin.
def clear_conditions(self, *conkeys, **noclear): offenders = set(conkeys) - set(self.conconf.conditions.keys()) if offenders: raise KeyError(', '.join([off for off in offenders])) offenders = set(noclear) - set({'noclear'}) if offenders: raise KeyError(', '.join([off for off in offenders])) noclear = noclear.get('noclear', False) for ck in self.conconf.conditions: if not conkeys: self.conconf.reset() break elif not noclear and ck in conkeys: self.conconf.set_condition(ck, None) elif noclear and ck not in conkeys: self.conconf.set_condition(ck, None) if not self.no_auto: self.make_mask()
Clear conditions. Clear only the conditions conkeys if specified. Clear only the conditions not specified by conkeys if noclear is True (False default). .. note:: Updates the mask if not no_auto.
def _calculate_day_cost(self, plan, period): plan_pricings = plan.planpricing_set.order_by('-pricing__period').select_related('pricing') selected_pricing = None for plan_pricing in plan_pricings: selected_pricing = plan_pricing if plan_pricing.pricing.period <= period: break if selected_pricing: return (selected_pricing.price / selected_pricing.pricing.period).quantize(Decimal('1.00')) raise ValueError('Plan %s has no pricings.' % plan)
Finds most fitted plan pricing for a given period, and calculate day cost
def encode_offset_commit_request(cls, group, payloads): return kafka.protocol.commit.OffsetCommitRequest[0]( consumer_group=group, topics=[( topic, [( partition, payload.offset, payload.metadata) for partition, payload in six.iteritems(topic_payloads)]) for topic, topic_payloads in six.iteritems(group_by_topic_and_partition(payloads))])
Encode an OffsetCommitRequest struct Arguments: group: string, the consumer group you are committing offsets for payloads: list of OffsetCommitRequestPayload
def face_adjacency(self): adjacency, edges = graph.face_adjacency(mesh=self, return_edges=True) self._cache['face_adjacency_edges'] = edges return adjacency
Find faces that share an edge, which we call here 'adjacent'. Returns ---------- adjacency : (n,2) int Pairs of faces which share an edge Examples --------- In [1]: mesh = trimesh.load('models/featuretype.STL') In [2]: mesh.face_adjacency Out[2]: array([[ 0, 1], [ 2, 3], [ 0, 3], ..., [1112, 949], [3467, 3475], [1113, 3475]]) In [3]: mesh.faces[mesh.face_adjacency[0]] Out[3]: TrackedArray([[ 1, 0, 408], [1239, 0, 1]], dtype=int64) In [4]: import networkx as nx In [5]: graph = nx.from_edgelist(mesh.face_adjacency) In [6]: groups = nx.connected_components(graph)
def aggregate_repeated_calls(frame, options): if frame is None: return None children_by_identifier = {} for child in frame.children: if child.identifier in children_by_identifier: aggregate_frame = children_by_identifier[child.identifier] aggregate_frame.self_time += child.self_time if child.children: aggregate_frame.add_children(child.children) child.remove_from_parent() else: children_by_identifier[child.identifier] = child for child in frame.children: aggregate_repeated_calls(child, options=options) frame._children.sort(key=methodcaller('time'), reverse=True) return frame
Converts a timeline into a time-aggregate summary. Adds together calls along the same call stack, so that repeated calls appear as the same frame. Removes time-linearity - frames are sorted according to total time spent. Useful for outputs that display a summary of execution (e.g. text and html outputs)
def connected_objects(self, from_obj): return self.to_content_type.get_all_objects_for_this_type(pk__in=self.connected_object_ids(from_obj))
Returns a query set matching all connected objects with the given object as a source.
def get_fullsize(self, kwargs): fullsize_args = {} if 'absolute' in kwargs: fullsize_args['absolute'] = kwargs['absolute'] for key in ('width', 'height', 'quality', 'format', 'background', 'crop'): fsk = 'fullsize_' + key if fsk in kwargs: fullsize_args[key] = kwargs[fsk] img_fullsize, _ = self.get_rendition(1, **fullsize_args) return img_fullsize
Get the fullsize rendition URL
def get_instance(self, payload): return InviteInstance( self._version, payload, service_sid=self._solution['service_sid'], channel_sid=self._solution['channel_sid'], )
Build an instance of InviteInstance :param dict payload: Payload response from the API :returns: twilio.rest.chat.v2.service.channel.invite.InviteInstance :rtype: twilio.rest.chat.v2.service.channel.invite.InviteInstance
def subscribe_list(self, list_id): return List(tweepy_list_to_json(self._client.subscribe_list(list_id=list_id)))
Subscribe to a list :param list_id: list ID number :return: :class:`~responsebot.models.List` object
def abbreviate_tab_names_changed(self, settings, key, user_data): abbreviate_tab_names = settings.get_boolean('abbreviate-tab-names') self.guake.abbreviate = abbreviate_tab_names self.guake.recompute_tabs_titles()
If the gconf var abbreviate_tab_names be changed, this method will be called and will update tab names.
def get_sdb_keys(self, path): list_resp = get_with_retry( self.cerberus_url + '/v1/secret/' + path + '/?list=true', headers=self.HEADERS ) throw_if_bad_response(list_resp) return list_resp.json()['data']['keys']
Return the keys for a SDB, which are need for the full secure data path
def set_features(self): allpsms_str = readers.generate_psms_multiple_fractions_strings( self.mergefiles, self.ns) allpeps = preparation.merge_peptides(self.mergefiles, self.ns) self.features = {'psm': allpsms_str, 'peptide': allpeps}
Merge all psms and peptides
def update_rejection_permissions(portal): updated = update_rejection_permissions_for(portal, "bika_ar_workflow", "Reject Analysis Request") if updated: brains = api.search(dict(portal_type="AnalysisRequest"), CATALOG_ANALYSIS_REQUEST_LISTING) update_rolemappings_for(brains, "bika_ar_workflow") updated = update_rejection_permissions_for(portal, "bika_sample_workflow", "Reject Sample") if updated: brains = api.search(dict(portal_type="Sample"), "bika_catalog") update_rolemappings_for(brains, "bika_sample_workflow")
Adds the permission 'Reject Analysis Request' and update the permission mappings accordingly
def _annotate_query(query, generate_dict): annotate_key_list = [] for field_name, annotate_dict in generate_dict.items(): for annotate_name, annotate_func in annotate_dict["annotate_dict"].items(): query = annotate_func(query) annotate_key_list.append(annotate_name) return query, annotate_key_list
Add annotations to the query to retrieve values required by field value generate functions.
def removeSingleCachedFile(self, fileStoreID): with self._CacheState.open(self) as cacheInfo: cachedFile = self.encodedFileID(fileStoreID) cachedFileStats = os.stat(cachedFile) assert cachedFileStats.st_nlink <= self.nlinkThreshold, \ 'Attempting to delete a global file that is in use by another job.' assert cachedFileStats.st_nlink >= self.nlinkThreshold, \ 'A global file has too FEW links at deletion time. Our link threshold is incorrect!' os.remove(cachedFile) if self.nlinkThreshold != 2: cacheInfo.cached -= cachedFileStats.st_size if not cacheInfo.isBalanced(): self.logToMaster('CACHE: The cache was not balanced on removing single file', logging.WARN) self.logToMaster('CACHE: Successfully removed file with ID \'%s\'.' % fileStoreID) return None
Removes a single file described by the fileStoreID from the cache forcibly.
def expand(self, msgpos): MT = self._tree[msgpos] MT.expand(MT.root)
expand message at given position
def _build_url(self, shorten=True): self.url = URL_FORMAT.format(*self._get_url_params(shorten=shorten))
Build the url for a cable ratings page
def status(self): hw_type, name, major, minor, patch, status = self.rpc(0x00, 0x04, result_format="H6sBBBB") status = { 'hw_type': hw_type, 'name': name.decode('utf-8'), 'version': (major, minor, patch), 'status': status } return status
Query the status of an IOTile including its name and version
async def find_backwards(self, stream_name, predicate, predicate_label='predicate'): logger = self._logger.getChild(predicate_label) logger.info('Fetching first matching event') uri = self._head_uri try: page = await self._fetcher.fetch(uri) except HttpNotFoundError as e: raise StreamNotFoundError() from e while True: evt = next(page.iter_events_matching(predicate), None) if evt is not None: return evt uri = page.get_link("next") if uri is None: logger.warning("No matching event found") return None page = await self._fetcher.fetch(uri)
Return first event matching predicate, or None if none exists. Note: 'backwards', both here and in Event Store, means 'towards the event emitted furthest in the past'.
def disconnect(self, callback): try: self._callbacks.remove(callback) except ValueError: self._callbacks.remove(ref(callback))
Disconnects a callback from this signal. :param callback: The callback to disconnect. :param weak: A flag that must have the same value than the one specified during the call to `connect`. .. warning:: If the callback is not connected at the time of call, a :class:`ValueError` exception is thrown. .. note:: You may call `disconnect` from a connected callback.
def financial_float(s, scale_factor=1, typ=float, ignore=FINANCIAL_WHITESPACE, percent_str=PERCENT_SYMBOLS, replace=FINANCIAL_MAPPING, normalize_case=str.lower): percent_scale_factor = 1 if isinstance(s, basestring): s = normalize_case(s).strip() for i in ignore: s = s.replace(normalize_case(i), '') s = s.strip() for old, new in replace: s = s.replace(old, new) for p in percent_str: if s.endswith(p): percent_scale_factor *= 0.01 s = s[:-len(p)] try: return (scale_factor if scale_factor < 1 else percent_scale_factor) * typ(float(s)) except (ValueError, TypeError): return s
Strip dollar signs and commas from financial numerical string Also, convert percentages to fractions/factors (generally between 0 and 1.0) >>> [financial_float(x) for x in ("12k Flat", "12,000 flat", "20%", "$10,000 Flat", "15K flat", "null", "None", "", None)] [12000.0, 12000.0, 0.2, 10000.0, 15000.0, 'null', 'none', '', None]
def traverse_until_fixpoint(predicate, tree): old_tree = None tree = simplify(tree) while tree and old_tree != tree: old_tree = tree tree = tree.traverse(predicate) if not tree: return None tree = simplify(tree) return tree
Traverses the tree again and again until it is not modified.
def fill_transaction_defaults(web3, transaction): defaults = {} for key, default_getter in TRANSACTION_DEFAULTS.items(): if key not in transaction: if callable(default_getter): if web3 is not None: default_val = default_getter(web3, transaction) else: raise ValueError("You must specify %s in the transaction" % key) else: default_val = default_getter defaults[key] = default_val return merge(defaults, transaction)
if web3 is None, fill as much as possible while offline
def upoint2bddpoint(upoint): point = dict() for uniqid in upoint[0]: point[_VARS[uniqid]] = 0 for uniqid in upoint[1]: point[_VARS[uniqid]] = 1 return point
Convert an untyped point into a BDD point. .. seealso:: For definitions of points and untyped points, see the :mod:`pyeda.boolalg.boolfunc` module.
def _six_fail_hook(modname): attribute_of = modname != "six.moves" and modname.startswith("six.moves") if modname != "six.moves" and not attribute_of: raise AstroidBuildingError(modname=modname) module = AstroidBuilder(MANAGER).string_build(_IMPORTS) module.name = "six.moves" if attribute_of: start_index = len(module.name) attribute = modname[start_index:].lstrip(".").replace(".", "_") try: import_attr = module.getattr(attribute)[0] except AttributeInferenceError: raise AstroidBuildingError(modname=modname) if isinstance(import_attr, nodes.Import): submodule = MANAGER.ast_from_module_name(import_attr.names[0][0]) return submodule return module
Fix six.moves imports due to the dynamic nature of this class. Construct a pseudo-module which contains all the necessary imports for six :param modname: Name of failed module :type modname: str :return: An astroid module :rtype: nodes.Module
def get_orgs(self): orgs = [] for resource in self._get_orgs()['resources']: orgs.append(resource['entity']['name']) return orgs
Returns a flat list of the names for the organizations user belongs.
def validate_pro(): cmd = ['python3', 'validate.py', FLAGS.pro_dataset, '--use_tpu', '--tpu_name={}'.format(TPU_NAME), '--work_dir={}'.format(fsdb.working_dir()), '--flagfile=rl_loop/distributed_flags', '--validate_name=pro'] mask_flags.run(cmd)
Validate on professional data.
def _check_for_duplicates(durations, events): df = pd.DataFrame({"t": durations, "e": events}) dup_times = df.loc[df["e"] != 0, "t"].duplicated(keep=False) dup_events = df.loc[df["e"] != 0, ["t", "e"]].duplicated(keep=False) return (dup_times & (~dup_events)).any()
Checks for duplicated event times in the data set. This is narrowed to detecting duplicated event times where the events are of different types
def __copy_extracted(self, path, destination): unpacked_dir = self.filename + '.unpacked' if not os.path.isdir(unpacked_dir): LOGGER.warn( 'Failed to copy extracted file %s, no extracted dir', path ) return source_path = os.path.join(unpacked_dir, path) if not os.path.exists(source_path): LOGGER.warn( 'Failed to copy extracted file %s, does not exist', path ) return destination_path = os.path.join(destination, path) shutil.copyfile(source_path, destination_path)
Copies a file that was already extracted to the destination directory. Args: path (str): Relative (to the root of the archive) of the file to copy. destination (str): Directory to extract the archive to.
def best_assemblyfile(self): for sample in self.metadata: try: filtered_outputfile = os.path.join(self.path, 'raw_assemblies', '{}.fasta'.format(sample.name)) if os.path.isfile(sample.general.assemblyfile): size = os.path.getsize(sample.general.assemblyfile) if size == 0: sample.general.bestassemblyfile = 'NA' else: sample.general.bestassemblyfile = sample.general.assemblyfile shutil.copyfile(sample.general.bestassemblyfile, filtered_outputfile) else: sample.general.bestassemblyfile = 'NA' sample.general.filteredfile = filtered_outputfile except AttributeError: sample.general.assemblyfile = 'NA' sample.general.bestassemblyfile = 'NA'
Determine whether the contigs.fasta output file from the assembler is present. If not, set the .bestassembly attribute to 'NA'
def qgis_version_detailed(): version = str(Qgis.QGIS_VERSION_INT) return [int(version[0]), int(version[1:3]), int(version[3:])]
Get the detailed version of QGIS. :returns: List containing major, minor and patch. :rtype: list
def stopThread(self): if self._thread is not None: self.performSelector_onThread_withObject_waitUntilDone_('stopPowerNotificationsThread', self._thread, None, objc.YES) self._thread = None
Stops spawned NSThread.
def inspect_streamer(self, index): if index >= len(self.graph.streamers): return [_pack_sgerror(SensorGraphError.STREAMER_NOT_ALLOCATED), b'\0'*14] return [Error.NO_ERROR, streamer_descriptor.create_binary_descriptor(self.graph.streamers[index])]
Inspect the streamer at the given index.
def augment_excmessage(prefix=None, suffix=None) -> NoReturn: exc_old = sys.exc_info()[1] message = str(exc_old) if prefix is not None: message = f'{prefix}, the following error occurred: {message}' if suffix is not None: message = f'{message} {suffix}' try: exc_new = type(exc_old)(message) except BaseException: exc_name = str(type(exc_old)).split("'")[1] exc_type = type(exc_name, (BaseException,), {}) exc_type.__module = exc_old.__module__ raise exc_type(message) from exc_old raise exc_new from exc_old
Augment an exception message with additional information while keeping the original traceback. You can prefix and/or suffix text. If you prefix something (which happens much more often in the HydPy framework), the sub-clause ', the following error occurred:' is automatically included: >>> from hydpy.core import objecttools >>> import textwrap >>> try: ... 1 + '1' ... except BaseException: ... prefix = 'While showing how prefixing works' ... suffix = '(This is a final remark.)' ... objecttools.augment_excmessage(prefix, suffix) Traceback (most recent call last): ... TypeError: While showing how prefixing works, the following error \ occurred: unsupported operand type(s) for +: 'int' and 'str' \ (This is a final remark.) Some exceptions derived by site-packages do not support exception chaining due to requiring multiple initialisation arguments. In such cases, |augment_excmessage| generates an exception with the same name on the fly and raises it afterwards, which is pointed out by the exception name mentioning to the "objecttools" module: >>> class WrongError(BaseException): ... def __init__(self, arg1, arg2): ... pass >>> try: ... raise WrongError('info 1', 'info 2') ... except BaseException: ... objecttools.augment_excmessage( ... 'While showing how prefixing works') Traceback (most recent call last): ... hydpy.core.objecttools.hydpy.core.objecttools.WrongError: While showing \ how prefixing works, the following error occurred: ('info 1', 'info 2')
def _sha1_for_file(filename): with open(filename, "rb") as fileobj: contents = fileobj.read() return hashlib.sha1(contents).hexdigest()
Return sha1 for contents of filename.
def single_device_data_message(self, registration_id=None, condition=None, collapse_key=None, delay_while_idle=False, time_to_live=None, restricted_package_name=None, low_priority=False, dry_run=False, data_message=None, content_available=None, android_channel_id=None, timeout=5, extra_notification_kwargs=None, extra_kwargs={}): if registration_id is None: raise InvalidDataError('Invalid registration ID') payload = self.parse_payload( registration_ids=[registration_id], condition=condition, collapse_key=collapse_key, delay_while_idle=delay_while_idle, time_to_live=time_to_live, restricted_package_name=restricted_package_name, low_priority=low_priority, dry_run=dry_run, data_message=data_message, content_available=content_available, remove_notification=True, android_channel_id=android_channel_id, extra_notification_kwargs=extra_notification_kwargs, **extra_kwargs ) self.send_request([payload], timeout) return self.parse_responses()
Send push message to a single device Args: registration_id (list, optional): FCM device registration ID condition (str, optiona): Topic condition to deliver messages to collapse_key (str, optional): Identifier for a group of messages that can be collapsed so that only the last message gets sent when delivery can be resumed. Defaults to `None`. delay_while_idle (bool, optional): deprecated time_to_live (int, optional): How long (in seconds) the message should be kept in FCM storage if the device is offline. The maximum time to live supported is 4 weeks. Defaults to `None` which uses the FCM default of 4 weeks. restricted_package_name (str, optional): Name of package low_priority (bool, optional): Whether to send notification with the low priority flag. Defaults to `False`. dry_run (bool, optional): If `True` no message will be sent but request will be tested. data_message (dict, optional): Custom key-value pairs content_available (bool, optional): Inactive client app is awoken android_channel_id (str, optional): Starting in Android 8.0 (API level 26), all notifications must be assigned to a channel. For each channel, you can set the visual and auditory behavior that is applied to all notifications in that channel. Then, users can change these settings and decide which notification channels from your app should be intrusive or visible at all. timeout (int, optional): set time limit for the request extra_notification_kwargs (dict, optional): More notification keyword arguments extra_kwargs (dict, optional): More keyword arguments Returns: dict: Response from FCM server (`multicast_id`, `success`, `failure`, `canonical_ids`, `results`) Raises: AuthenticationError: If :attr:`api_key` is not set or provided or there is an error authenticating the sender. FCMServerError: Internal server error or timeout error on Firebase cloud messaging server InvalidDataError: Invalid data provided InternalPackageError: Mostly from changes in the response of FCM, contact the project owner to resolve the issue
def submit(self, command="", blocksize=1, job_name="parsl.auto"): instance, name = self.create_instance(command=command) self.provisioned_blocks += 1 self.resources[name] = {"job_id": name, "status": translate_table[instance['status']]} return name
The submit method takes the command string to be executed upon instantiation of a resource most often to start a pilot. Args : - command (str) : The bash command string to be executed. - blocksize (int) : Blocksize to be requested KWargs: - job_name (str) : Human friendly name to be assigned to the job request Returns: - A job identifier, this could be an integer, string etc Raises: - ExecutionProviderException or its subclasses
def fillna(series_or_arr, missing_value=0.0): if pandas.notnull(missing_value): if isinstance(series_or_arr, (numpy.ndarray)): series_or_arr[numpy.isnan(series_or_arr)] = missing_value else: series_or_arr.fillna(missing_value, inplace=True) return series_or_arr
Fill missing values in pandas objects and numpy arrays. Arguments --------- series_or_arr : pandas.Series, numpy.ndarray The numpy array or pandas series for which the missing values need to be replaced. missing_value : float, int, str The value to replace the missing value with. Default 0.0. Returns ------- pandas.Series, numpy.ndarray The numpy array or pandas series with the missing values filled.
def _effective_view_filter(self): if self._effective_view == EFFECTIVE: now = datetime.datetime.utcnow() return {'startDate': {'$$lte': now}, 'endDate': {'$$gte': now}} return {}
Returns the mongodb relationship filter for effective views
def save(self): if not self.is_valid(): return self._errors _new = self.is_new() if _new: self._initialize_id() with Mutex(self): self._write(_new) return True
Saves the instance to the datastore.
def ustr(obj): if sys.version_info[0] == 2: if type(obj) in [str, basestring]: return unicode(obj, DEFAULT_ENCODING) else: return unicode(obj) else: if type(obj) in [bytes]: return obj.decode(DEFAULT_ENCODING) else: return str(obj)
Python 2 and 3 utility method that converts an obj to unicode in python 2 and to a str object in python 3
def delete_resourcegroupitems(scenario_id, item_ids, **kwargs): user_id = int(kwargs.get('user_id')) _get_scenario(scenario_id, user_id) for item_id in item_ids: rgi = db.DBSession.query(ResourceGroupItem).\ filter(ResourceGroupItem.id==item_id).one() db.DBSession.delete(rgi) db.DBSession.flush()
Delete specified items in a group, in a scenario.
def write_tree_newick(self, filename, hide_rooted_prefix=False): if not isinstance(filename, str): raise TypeError("filename must be a str") treestr = self.newick() if hide_rooted_prefix: if treestr.startswith('[&R]'): treestr = treestr[4:].strip() else: warn("Specified hide_rooted_prefix, but tree was not rooted") if filename.lower().endswith('.gz'): f = gopen(expanduser(filename),'wb',9); f.write(treestr.encode()); f.close() else: f = open(expanduser(filename),'w'); f.write(treestr); f.close()
Write this ``Tree`` to a Newick file Args: ``filename`` (``str``): Path to desired output file (plain-text or gzipped)
def get_schema_dir(db_version=1): v = str(db_version) return os.path.join(_top_dir, '..', 'schemata', 'versions', v)
Get path to directory with schemata. :param db_version: Version of the database :type db_version: int :return: Path :rtype: str
def copy_graph(subject, existing_graph): new_graph = rdflib.Graph() for predicate, object_ in existing_graph.predicate_objects(): new_graph.add((subject, predicate, object_)) return new_graph
Function takes a subject and an existing graph, returns a new graph with all predicate and objects of the existing graph copied to the new_graph with subject as the new subject Args: subject(rdflib.URIRef): A URIRef subject existing_graph(rdflib.Graph): A rdflib.Graph Returns: rdflib.Graph
def temporal_segmentation(segments, min_time): final_segments = [] for segment in segments: final_segments.append([]) for point in segment: if point.dt > min_time: final_segments.append([]) final_segments[-1].append(point) return final_segments
Segments based on time distant points Args: segments (:obj:`list` of :obj:`list` of :obj:`Point`): segment points min_time (int): minimum required time for segmentation
def xml_entity_escape(data): data = data.replace("&", "&amp;") data = data.replace(">", "&gt;") data = data.replace("<", "&lt;") return data
replace special characters with their XML entity versions
def _apply_mapping(self, mapping): self._POST["P0100LDR__"] = mapping[0] self._POST["P0200FMT__"] = mapping[1] self._POST["P0300BAS__a"] = mapping[2] self._POST["P07022001_b"] = mapping[3] self._POST["P1501IST1_a"] = mapping[4]
Map some case specific data to the fields in internal dictionary.
def mount(dev, mountpoint, flags='', log=None): ensureDirectory(mountpoint) systemCall('mount %s %s %s' % (flags, dev, mountpoint), log=log)
Mount the given dev to the given mountpoint by using the given flags
def unpause(self): self._pause_level -= 1 if not self._pause_level: self._offset = self._paused_time - self._clock()
Unpause the animation.
async def read(self) -> bytes: if self._read_bytes is None: body = bytearray() while True: chunk = await self._payload.readany() body.extend(chunk) if self._client_max_size: body_size = len(body) if body_size >= self._client_max_size: raise HTTPRequestEntityTooLarge( max_size=self._client_max_size, actual_size=body_size ) if not chunk: break self._read_bytes = bytes(body) return self._read_bytes
Read request body if present. Returns bytes object with full request content.
def to_pandas(self): if not self.is_raw(): raise ValueError('Cannot convert to pandas Index if not evaluated.') from pandas import Index as PandasIndex return PandasIndex(self.values, self.dtype, name=self.name)
Convert to pandas Index. Returns ------- pandas.base.Index
def is_builtin(text): from spyder.py3compat import builtins return text in [str(name) for name in dir(builtins) if not name.startswith('_')]
Test if passed string is the name of a Python builtin object
def read_creds_from_environment_variables(): creds = init_creds() if 'AWS_ACCESS_KEY_ID' in os.environ and 'AWS_SECRET_ACCESS_KEY' in os.environ: creds['AccessKeyId'] = os.environ['AWS_ACCESS_KEY_ID'] creds['SecretAccessKey'] = os.environ['AWS_SECRET_ACCESS_KEY'] if 'AWS_SESSION_TOKEN' in os.environ: creds['SessionToken'] = os.environ['AWS_SESSION_TOKEN'] return creds
Read credentials from environment variables :return:
def set_transform_interface_params(spec, input_features, output_features, are_optional = False): input_features = _fm.process_or_validate_features(input_features) output_features = _fm.process_or_validate_features(output_features) for (fname, ftype) in input_features: input_ = spec.description.input.add() input_.name = fname datatypes._set_datatype(input_.type, ftype) if are_optional: input_.type.isOptional = are_optional for (fname, ftype) in output_features: output_ = spec.description.output.add() output_.name = fname datatypes._set_datatype(output_.type, ftype) return spec
Common utilities to set transform interface params.
def get_specific_nodes(self, node, names): nodes = [(x.tagName, x) for x in node.childNodes if x.nodeType == x.ELEMENT_NODE and x.tagName in names] return dict(nodes)
Given a node and a sequence of strings in `names`, return a dictionary containing the names as keys and child `ELEMENT_NODEs`, that have a `tagName` equal to the name.
def generate_manifest(self, progressbar=None): items = dict() if progressbar: progressbar.label = "Generating manifest" for handle in self._storage_broker.iter_item_handles(): key = dtoolcore.utils.generate_identifier(handle) value = self._storage_broker.item_properties(handle) items[key] = value if progressbar: progressbar.item_show_func = lambda x: handle progressbar.update(1) manifest = { "items": items, "dtoolcore_version": __version__, "hash_function": self._storage_broker.hasher.name } return manifest
Return manifest generated from knowledge about contents.
def _get_params(self): params = {'accountNumber': self._service.accountNumber} for key, val in self.__dict__.iteritems(): if key in self.field_order: if isinstance(val, str,): val = val.decode('utf8') params[key] = val for key in self.field_order: if key not in params: params[key] = u'' def order_keys(k): if k[0] in self.field_order: return self.field_order.index(k[0]) return len(self.field_order) + 1 params = OrderedDict(sorted(params.items(), key=order_keys)) if hasattr(self, 'hash') and self.hash is not None: params['hash'] = self.hash return params
Generate SOAP parameters.
def _verify_cert(self, sock: ssl.SSLSocket): verify_mode = self._ssl_context.verify_mode assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL), \ 'Unknown verify mode {}'.format(verify_mode) if verify_mode == ssl.CERT_NONE: return cert = sock.getpeercert() if not cert and verify_mode == ssl.CERT_OPTIONAL: return if not cert: raise SSLVerificationError('No SSL certificate given') try: ssl.match_hostname(cert, self._hostname) except ssl.CertificateError as error: raise SSLVerificationError('Invalid SSL certificate') from error
Check if certificate matches hostname.
def fetch(self): self.retrieveVals() for parent_name in self._graphNames: graph = self._graphDict[parent_name] if self.isMultigraph: print "multigraph %s" % self._getMultigraphID(parent_name) print self._formatVals(graph.getVals()) print if (self.isMultigraph and self._nestedGraphs and self._subgraphDict and self._subgraphNames): for (parent_name, subgraph_names) in self._subgraphNames.iteritems(): for graph_name in subgraph_names: graph = self._subgraphDict[parent_name][graph_name] print "multigraph %s" % self.getMultigraphID(parent_name, graph_name) print self._formatVals(graph.getVals()) print return True
Implements Munin Plugin Fetch Option. Prints out measured values.
def combine_first(self, other): new_index = self.index.union(other.index) this = self.reindex(new_index, copy=False) other = other.reindex(new_index, copy=False) if is_datetimelike(this) and not is_datetimelike(other): other = to_datetime(other) return this.where(notna(this), other)
Combine Series values, choosing the calling Series's values first. Parameters ---------- other : Series The value(s) to be combined with the `Series`. Returns ------- Series The result of combining the Series with the other object. See Also -------- Series.combine : Perform elementwise operation on two Series using a given function. Notes ----- Result index will be the union of the two indexes. Examples -------- >>> s1 = pd.Series([1, np.nan]) >>> s2 = pd.Series([3, 4]) >>> s1.combine_first(s2) 0 1.0 1 4.0 dtype: float64
def batch(self, timelimit=None): from .launcher import BatchLauncher prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1]) prev_dir = os.path.join(os.path.sep, prev_dir) workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch") return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)
Run the flow in batch mode, return exit status of the job script. Requires a manager.yml file and a batch_adapter adapter. Args: timelimit: Time limit (int with seconds or string with time given with the slurm convention: "days-hours:minutes:seconds"). If timelimit is None, the default value specified in the `batch_adapter` entry of `manager.yml` is used.
def get_monitor_pos(monitor): xpos_value = ctypes.c_int(0) xpos = ctypes.pointer(xpos_value) ypos_value = ctypes.c_int(0) ypos = ctypes.pointer(ypos_value) _glfw.glfwGetMonitorPos(monitor, xpos, ypos) return xpos_value.value, ypos_value.value
Returns the position of the monitor's viewport on the virtual screen. Wrapper for: void glfwGetMonitorPos(GLFWmonitor* monitor, int* xpos, int* ypos);
def _interface_to_service(iface): for _service in _get_services(): service_info = pyconnman.ConnService(os.path.join(SERVICE_PATH, _service)) if service_info.get_property('Ethernet')['Interface'] == iface: return _service return None
returns the coresponding service to given interface if exists, otherwise return None
def send_mails(cls): if settings.CAS_NEW_VERSION_EMAIL_WARNING and settings.ADMINS: try: obj = cls.objects.get() except cls.DoesNotExist: obj = NewVersionWarning.objects.create(version=VERSION) LAST_VERSION = utils.last_version() if LAST_VERSION is not None and LAST_VERSION != obj.version: if utils.decode_version(VERSION) < utils.decode_version(LAST_VERSION): try: send_mail( ( '%sA new version of django-cas-server is available' ) % settings.EMAIL_SUBJECT_PREFIX, u .strip() % (VERSION, LAST_VERSION), settings.SERVER_EMAIL, ["%s <%s>" % admin for admin in settings.ADMINS], fail_silently=False, ) obj.version = LAST_VERSION obj.save() except smtplib.SMTPException as error: logger.error("Unable to send new version mail: %s" % error)
For each new django-cas-server version, if the current instance is not up to date send one mail to ``settings.ADMINS``.
def get_logs(self, login=None, **kwargs): _login = kwargs.get( 'login', login ) log_events_url = GSA_LOGS_URL.format(login=_login) return self._request_api(url=log_events_url).json()
Get a user's logs. :param str login: User's login (Default: self._login) :return: JSON
def inet_ntop(af, addr): addr = bytes_encode(addr) try: return socket.inet_ntop(af, addr) except AttributeError: try: return _INET_NTOP[af](addr) except KeyError: raise ValueError("unknown address family %d" % af)
Convert an IP address from binary form into text representation.
def tsv_import(self, xsv_source, encoding="UTF-8", transforms=None, row_class=DataObject, **kwargs): return self._xsv_import(xsv_source, encoding, transforms=transforms, delimiter="\t", row_class=row_class, **kwargs)
Imports the contents of a tab-separated data file into this table. @param xsv_source: tab-separated data file - if a string is given, the file with that name will be opened, read, and closed; if a file object is given, then that object will be read as-is, and left for the caller to be closed. @type xsv_source: string or file @param transforms: dict of functions by attribute name; if given, each attribute will be transformed using the corresponding transform; if there is no matching transform, the attribute will be read as a string (default); the transform function can also be defined as a (function, default-value) tuple; if there is an Exception raised by the transform function, then the attribute will be set to the given default value @type transforms: dict (optional)
def show(self): with_matplotlib = True try: import matplotlib.pyplot as plt except RuntimeError: import skimage.io as io with_matplotlib = False if with_matplotlib: equalised_img = self.equalise() _, ax = plt.subplots() ax.imshow(equalised_img, cmap='gray') import random for contour_set in self.as_contours.itervalues(): r, g, b = random.random(), random.random(), random.random() [ax.plot(contour[:,1], contour[:,0], linewidth=2, color=(r,g,b,1)) for contour in contour_set] ax.axis('image') ax.set_xticks([]) ax.set_yticks([]) plt.show() else: io.imshow(self.equalise()) io.show()
Display the image
def find_outer_region(im, r=0): r if r == 0: dt = spim.distance_transform_edt(input=im) r = int(sp.amax(dt)) * 2 im_padded = sp.pad(array=im, pad_width=r, mode='constant', constant_values=True) dt = spim.distance_transform_edt(input=im_padded) seeds = (dt >= r) + get_border(shape=im_padded.shape) labels = spim.label(seeds)[0] mask = labels == 1 dt = spim.distance_transform_edt(~mask) outer_region = dt < r outer_region = extract_subsection(im=outer_region, shape=im.shape) return outer_region
r""" Finds regions of the image that are outside of the solid matrix. This function uses the rolling ball method to define where the outer region ends and the void space begins. This function is particularly useful for samples that do not fill the entire rectangular image, such as cylindrical cores or samples with non- parallel faces. Parameters ---------- im : ND-array Image of the porous material with 1's for void and 0's for solid r : scalar The radius of the rolling ball to use. If not specified then a value is calculated as twice maximum of the distance transform. The image size is padded by this amount in all directions, so the image can become quite large and unwieldy if too large a value is given. Returns ------- image : ND-array A boolean mask the same shape as ``im``, containing True in all voxels identified as *outside* the sample.
def to_valid_state_vector(state_rep: Union[int, np.ndarray], num_qubits: int, dtype: Type[np.number] = np.complex64) -> np.ndarray: if isinstance(state_rep, np.ndarray): if len(state_rep) != 2 ** num_qubits: raise ValueError( 'initial state was of size {} ' 'but expected state for {} qubits'.format( len(state_rep), num_qubits)) state = state_rep elif isinstance(state_rep, int): if state_rep < 0: raise ValueError('initial_state must be positive') elif state_rep >= 2 ** num_qubits: raise ValueError( 'initial state was {} but expected state for {} qubits'.format( state_rep, num_qubits)) else: state = np.zeros(2 ** num_qubits, dtype=dtype) state[state_rep] = 1.0 else: raise TypeError('initial_state was not of type int or ndarray') validate_normalized_state(state, num_qubits, dtype) return state
Verifies the state_rep is valid and converts it to ndarray form. This method is used to support passing in an integer representing a computational basis state or a full wave function as a representation of a state. Args: state_rep: If an int, the state returned is the state corresponding to a computational basis state. If an numpy array this is the full wave function. Both of these are validated for the given number of qubits, and the state must be properly normalized and of the appropriate dtype. num_qubits: The number of qubits for the state. The state_rep must be valid for this number of qubits. dtype: The numpy dtype of the state, will be used when creating the state for a computational basis state, or validated against if state_rep is a numpy array. Returns: A numpy ndarray corresponding to the state on the given number of qubits. Raises: ValueError if the state is not valid.
def Update(self, attribute=None): currently_running = self.Get(self.Schema.CONTENT_LOCK) if currently_running: flow_obj = aff4.FACTORY.Open(currently_running, token=self.token) if flow_obj and flow_obj.GetRunner().IsRunning(): return client_id = self.urn.Path().split("/", 2)[1] pathspec = self.Get(self.Schema.STAT).pathspec flow_urn = flow.StartAFF4Flow( client_id=client_id, flow_name="MultiGetFile", token=self.token, pathspecs=[pathspec]) self.Set(self.Schema.CONTENT_LOCK(flow_urn)) self.Close() return flow_urn
Update an attribute from the client.
def alter_object(self, obj): for attname, field, replacer in self.replacers: currentval = getattr(obj, attname) replacement = replacer(self, obj, field, currentval) setattr(obj, attname, replacement)
Alters all the attributes in an individual object. If it returns False, the object will not be saved
def get_page_square_dpi(pageinfo, options): "Get the DPI when we require xres == yres, scaled to physical units" xres = pageinfo.xres or 0 yres = pageinfo.yres or 0 userunit = pageinfo.userunit or 1 return float( max( (xres * userunit) or VECTOR_PAGE_DPI, (yres * userunit) or VECTOR_PAGE_DPI, VECTOR_PAGE_DPI if pageinfo.has_vector else 0, options.oversample or 0, ) )
Get the DPI when we require xres == yres, scaled to physical units
def render(file): with file.open() as fp: encoding = detect_encoding(fp, default='utf-8') result = mistune.markdown(fp.read().decode(encoding)) return result
Render HTML from Markdown file content.
def one_hot_encoding(labels, num_classes, scope=None): with tf.name_scope(scope, 'OneHotEncoding', [labels]): batch_size = labels.get_shape()[0] indices = tf.expand_dims(tf.range(0, batch_size), 1) labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype) concated = tf.concat(axis=1, values=[indices, labels]) onehot_labels = tf.sparse_to_dense( concated, tf.stack([batch_size, num_classes]), 1.0, 0.0) onehot_labels.set_shape([batch_size, num_classes]) return onehot_labels
Transform numeric labels into onehot_labels. Args: labels: [batch_size] target labels. num_classes: total number of classes. scope: Optional scope for name_scope. Returns: one hot encoding of the labels.
def _unpad(self, a, axis, out): assert a.shape[axis] == self.N Npad = self.N - self.Nin if out: _Npad, Npad_ = Npad - Npad//2, Npad//2 else: _Npad, Npad_ = Npad//2, Npad - Npad//2 return np.take(a, range(_Npad, self.N - Npad_), axis=axis)
Undo padding in an array. Parameters ---------- a : (..., N, ...) ndarray array to be trimmed to size `Nin` axis : int axis along which to unpad out : bool trim the output if True, otherwise the input; the two cases have their left and right pad sizes reversed
def invalidate(self, assoc_handle, dumb): if dumb: key = self._dumb_key else: key = self._normal_key self.store.removeAssociation(key, assoc_handle)
Invalidates the association with the given handle. @type assoc_handle: str @param dumb: Is this association used with dumb mode? @type dumb: bool
def getOldestRequestTime(self): bldrid = yield self.getBuilderId() unclaimed = yield self.master.data.get( ('builders', bldrid, 'buildrequests'), [resultspec.Filter('claimed', 'eq', [False])], order=['submitted_at'], limit=1) if unclaimed: return unclaimed[0]['submitted_at']
Returns the submitted_at of the oldest unclaimed build request for this builder, or None if there are no build requests. @returns: datetime instance or None, via Deferred
def parse_json_feed_file(filename: str) -> JSONFeed: with open(filename) as f: try: root = json.load(f) except json.decoder.JSONDecodeError: raise FeedJSONError('Not a valid JSON document') return parse_json_feed(root)
Parse a JSON feed from a local json file.
def autodiscover(): url_conf = getattr(settings, 'ROOT_URLCONF', ()) resolver = urlresolvers.get_resolver(url_conf) urlpatterns = resolver.url_patterns permissions = generate_permissions(urlpatterns) refresh_permissions(permissions)
Autodiscover for urls.py
def evaluate_emb(emb, labels): d_mat = get_distance_matrix(emb) d_mat = d_mat.asnumpy() labels = labels.asnumpy() names = [] accs = [] for k in [1, 2, 4, 8, 16]: names.append('Recall@%d' % k) correct, cnt = 0.0, 0.0 for i in range(emb.shape[0]): d_mat[i, i] = 1e10 nns = argpartition(d_mat[i], k)[:k] if any(labels[i] == labels[nn] for nn in nns): correct += 1 cnt += 1 accs.append(correct/cnt) return names, accs
Evaluate embeddings based on Recall@k.
def set_project_dir(self, directory): if directory is not None: self.treewidget.set_root_path(osp.dirname(directory)) self.treewidget.set_folder_names([osp.basename(directory)]) self.treewidget.setup_project_view() try: self.treewidget.setExpanded(self.treewidget.get_index(directory), True) except TypeError: pass
Set the project directory
def configure_environ(dsn_env_name='PROM_DSN', connection_class=DsnConnection): inters = [] cs = dsnparse.parse_environs(dsn_env_name, parse_class=connection_class) for c in cs: inter = c.interface set_interface(inter, c.name) inters.append(inter) return inters
configure interfaces based on environment variables by default, when prom is imported, it will look for PROM_DSN, and PROM_DSN_N (where N is 1 through infinity) in the environment, if it finds them, it will assume they are dsn urls that prom understands and will configure db connections with them. If you don't want this behavior (ie, you want to configure prom manually) then just make sure you don't have any environment variables with matching names The num checks (eg PROM_DSN_1, PROM_DSN_2) go in order, so you can't do PROM_DSN_1, PROM_DSN_3, because it will fail on _2 and move on, so make sure your num dsns are in order (eg, 1, 2, 3, ...) example -- export PROM_DSN_1=some.Interface://host:port/dbname#i1 export PROM_DSN_2=some.Interface://host2:port/dbname2#i2 $ python >>> import prom >>> print prom.interfaces # prints a dict with interfaces i1 and i2 keys :param dsn_env_name: string, the name of the environment variables
def _get_full_path(self, path, environ): if path.startswith('/'): path = environ.get('SCRIPT_NAME', '') + path return path
Return the full path to ``path`` by prepending the SCRIPT_NAME. If ``path`` is a URL, do nothing.
def GetFeeds(client): feed_service = client.GetService('FeedService', 'v201809') feeds = [] more_pages = True selector = { 'fields': ['Id', 'Name', 'Attributes'], 'predicates': [ { 'field': 'Origin', 'operator': 'EQUALS', 'values': ['USER'] }, { 'field': 'FeedStatus', 'operator': 'EQUALS', 'values': ['ENABLED'] } ], 'paging': { 'startIndex': 0, 'numberResults': PAGE_SIZE } } while more_pages: page = feed_service.get(selector) if 'entries' in page: feeds.extend(page['entries']) selector['paging']['startIndex'] += PAGE_SIZE more_pages = selector['paging']['startIndex'] < int(page['totalNumEntries']) return feeds
Returns a list of all enabled Feeds. Args: client: an AdWordsClient instance. Returns: A list containing all enabled Feeds.
def update_volume(self, data): self._client['config']['volume'] = data['volume'] _LOGGER.info('updated volume on %s', self.friendly_name) self._server.group(self.group.identifier).callback() self.callback()
Update volume.
def getResponse(self, http_request, request): response = remoting.Envelope(request.amfVersion) for name, message in request: http_request.amf_request = message processor = self.getProcessor(message) response[name] = processor(message, http_request=http_request) return response
Processes the AMF request, returning an AMF response. @param http_request: The underlying HTTP Request. @type http_request: U{HTTPRequest<http://docs.djangoproject.com /en/dev/ref/request-response/#httprequest-objects>} @param request: The AMF Request. @type request: L{Envelope<pyamf.remoting.Envelope>} @rtype: L{Envelope<pyamf.remoting.Envelope>}
def sayHello(self, name="Not given", message="nothing"): print( "Python.sayHello called by: {0} " "with message: '{1}'".format(name, message) ) return ( "PythonSync says: Howdy {0} " "that's a nice runtime you got there".format(name) )
Synchronous implementation of IHello.sayHello synchronous method. The remote calling thread will be blocked until this is executed and responds.
def _consolidate(blocks): gkey = lambda x: x._consolidate_key grouper = itertools.groupby(sorted(blocks, key=gkey), gkey) new_blocks = [] for (_can_consolidate, dtype), group_blocks in grouper: merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype, _can_consolidate=_can_consolidate) new_blocks = _extend_blocks(merged_blocks, new_blocks) return new_blocks
Merge blocks having same dtype, exclude non-consolidating blocks
def add_state_machine(widget, event=None): logger.debug("Creating new state-machine...") root_state = HierarchyState("new root state") state_machine = StateMachine(root_state) rafcon.core.singleton.state_machine_manager.add_state_machine(state_machine)
Create a new state-machine when the user clicks on the '+' next to the tabs
def _get_ipv6addrs(self): addrs = self._get_addrs() ipv6addrs = addrs.get(netifaces.AF_INET6) if not ipv6addrs: return {} return ipv6addrs[0]
Returns the IPv6 addresses associated with this NIC. If no IPv6 addresses are used, empty dict is returned.
def peek_pointers_in_registers(self, peekSize = 16, context = None): peekable_registers = ( 'Eax', 'Ebx', 'Ecx', 'Edx', 'Esi', 'Edi', 'Ebp' ) if not context: context = self.get_context(win32.CONTEXT_CONTROL | \ win32.CONTEXT_INTEGER) aProcess = self.get_process() data = dict() for (reg_name, reg_value) in compat.iteritems(context): if reg_name not in peekable_registers: continue reg_data = aProcess.peek(reg_value, peekSize) if reg_data: data[reg_name] = reg_data return data
Tries to guess which values in the registers are valid pointers, and reads some data from them. @type peekSize: int @param peekSize: Number of bytes to read from each pointer found. @type context: dict( str S{->} int ) @param context: (Optional) Dictionary mapping register names to their values. If not given, the current thread context will be used. @rtype: dict( str S{->} str ) @return: Dictionary mapping register names to the data they point to.
def get_form_kwargs(self, **kwargs): kwargs = super(ClassRegistrationView, self).get_form_kwargs(**kwargs) kwargs['user'] = self.request.user if hasattr(self.request,'user') else None listing = self.get_listing() kwargs.update({ 'openEvents': listing['openEvents'], 'closedEvents': listing['closedEvents'], }) return kwargs
Tell the form which fields to render
def add_node(self, id, label=None, type='CLASS', meta=None): g = self.get_graph() if meta is None: meta={} g.add_node(id, label=label, type=type, meta=meta)
Add a new node to the ontology
def _serialize(self, value, attr, obj): if isinstance(value, arrow.arrow.Arrow): value = value.datetime return super(ArrowField, self)._serialize(value, attr, obj)
Convert the Arrow object into a string.
def GET_account_record(self, path_info, account_addr, token_type): if not check_account_address(account_addr): return self._reply_json({'error': 'Invalid address'}, status_code=400) if not check_token_type(token_type): return self._reply_json({'error': 'Invalid token type'}, status_code=400) blockstackd_url = get_blockstackd_url() res = blockstackd_client.get_account_record(account_addr, token_type, hostport=blockstackd_url) if json_is_error(res): log.error("Failed to get account state for {} {}: {}".format(account_addr, token_type, res['error'])) return self._reply_json({'error': 'Failed to get account record for {} {}: {}'.format(token_type, account_addr, res['error'])}, status_code=res.get('http_status', 500)) self._reply_json(res) return
Get the state of a particular token account Returns the account
def run(files, temp_folder): "Check flake8 errors in the code base." try: import flake8 except ImportError: return NO_FLAKE_MSG try: from flake8.engine import get_style_guide except ImportError: from flake8.api.legacy import get_style_guide py_files = filter_python_files(files) if not py_files: return DEFAULT_CONFIG = join(temp_folder, get_config_file()) with change_folder(temp_folder): flake8_style = get_style_guide(config_file=DEFAULT_CONFIG) out, err = StringIO(), StringIO() with redirected(out, err): flake8_style.check_files(py_files) return out.getvalue().strip() + err.getvalue().strip()
Check flake8 errors in the code base.
def write_bit(self, registeraddress, value, functioncode=5): _checkFunctioncode(functioncode, [5, 15]) _checkInt(value, minvalue=0, maxvalue=1, description='input value') self._genericCommand(functioncode, registeraddress, value)
Write one bit to the slave. Args: * registeraddress (int): The slave register address (use decimal numbers, not hex). * value (int): 0 or 1 * functioncode (int): Modbus function code. Can be 5 or 15. Returns: None Raises: ValueError, TypeError, IOError