code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def request(self, method, path, params=None, headers=None, body=None): if not headers: headers = {} if not params: params = {} headers["Accept"] = "application/json" headers["Accept-Version"] = "^1.15.0" if self.auth_token: headers["Authorization"] = "Bearer {0}".format(self.auth_token) path = self.url + path params = self.flatten_params(params) response = requests.request(method, path, params=params, headers=headers, json=body) result = response.text try: result = response.json() except Exception: pass if response.status_code >= 400: raise LosantError(response.status_code, result) return result
Base method for making a Losant API request
def uord(c): if len(c) == 2: high, low = [ord(p) for p in c] ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000 else: ordinal = ord(c) return ordinal
Get Unicode ordinal.
def __handle_request(self, request, *args, **kw): self._authenticate(request) self._check_permission(request) method = self._get_method(request) data = self._get_input_data(request) data = self._clean_input_data(data, request) response = self._exec_method(method, request, data, *args, **kw) return self._process_response(response, request)
Intercept the request and response. This function lets `HttpStatusCodeError`s fall through. They are caught and transformed into HTTP responses by the caller. :return: ``HttpResponse``
def find(key): docs = list(collection.find({KEY_FIELD: key})) if not docs: return None pickled_value = docs[0][VALUE_FIELD] return pickle.loads(pickled_value)
Return the value associated with a key. If there is no value with the given key, returns ``None``.
def add_to_path(p): old_path = sys.path if p not in sys.path: sys.path = sys.path[:] sys.path.insert(0, p) try: yield finally: sys.path = old_path
Adds a path to python paths and removes it after the 'with' block ends
async def sleep(self, sleep_time): try: await asyncio.sleep(sleep_time) except RuntimeError: if self.log_output: logging.info('sleep exception') else: print('sleep exception') await self.shutdown()
This method is a proxy method for asyncio.sleep :param sleep_time: Sleep interval in seconds :returns: No return value.
def get_subclass_from_module(module, parent_class): try: r = __recursive_import(module) member_dict = dict(inspect.getmembers(r)) sprinter_class = parent_class for v in member_dict.values(): if inspect.isclass(v) and issubclass(v, parent_class) and v != parent_class: if sprinter_class is parent_class: sprinter_class = v if sprinter_class is None: raise SprinterException("No subclass %s that extends %s exists in classpath!" % (module, str(parent_class))) return sprinter_class except ImportError: e = sys.exc_info()[1] raise e
Get a subclass of parent_class from the module at module get_subclass_from_module performs reflection to find the first class that extends the parent_class in the module path, and returns it.
def _check_load_existing_object(self, object_type, id_field_name, operation='update'): self._check_existing_object(object_type, id_field_name) if not self._load_from_hdx(object_type, self.data[id_field_name]): raise HDXError('No existing %s to %s!' % (object_type, operation))
Check metadata exists and contains HDX object identifier, and if so load HDX object Args: object_type (str): Description of HDX object type (for messages) id_field_name (str): Name of field containing HDX object identifier operation (str): Operation to report if error. Defaults to update. Returns: None
def _release(self, lease): if lease.exist: os.unlink(lease.path) LOGGER.debug('Removed subnet lease {}'.format(lease.path))
Free the given lease Args: lease (lago.subnet_lease.Lease): The lease to free
def coarsen(self, dim: Optional[Mapping[Hashable, int]] = None, boundary: str = 'exact', side: Union[str, Mapping[Hashable, str]] = 'left', coord_func: str = 'mean', **dim_kwargs: int): dim = either_dict_or_kwargs(dim, dim_kwargs, 'coarsen') return self._coarsen_cls( self, dim, boundary=boundary, side=side, coord_func=coord_func)
Coarsen object. Parameters ---------- dim: dict, optional Mapping from the dimension name to the window size. dim : str Name of the dimension to create the rolling iterator along (e.g., `time`). window : int Size of the moving window. boundary : 'exact' | 'trim' | 'pad' If 'exact', a ValueError will be raised if dimension size is not a multiple of the window size. If 'trim', the excess entries are dropped. If 'pad', NA will be padded. side : 'left' or 'right' or mapping from dimension to 'left' or 'right' coord_func: function (name) that is applied to the coordintes, or a mapping from coordinate name to function (name). Returns ------- Coarsen object (core.rolling.DataArrayCoarsen for DataArray, core.rolling.DatasetCoarsen for Dataset.) Examples -------- Coarsen the long time series by averaging over every four days. >>> da = xr.DataArray(np.linspace(0, 364, num=364), ... dims='time', ... coords={'time': pd.date_range( ... '15/12/1999', periods=364)}) >>> da <xarray.DataArray (time: 364)> array([ 0. , 1.002755, 2.00551 , ..., 361.99449 , 362.997245, 364. ]) Coordinates: * time (time) datetime64[ns] 1999-12-15 1999-12-16 ... 2000-12-12 >>> >>> da.coarsen(time=3, boundary='trim').mean() <xarray.DataArray (time: 121)> array([ 1.002755, 4.011019, 7.019284, ..., 358.986226, 361.99449 ]) Coordinates: * time (time) datetime64[ns] 1999-12-16 1999-12-19 ... 2000-12-10 >>> See Also -------- core.rolling.DataArrayCoarsen core.rolling.DatasetCoarsen
def add_version(self, project, version, egg): url = self._build_url(constants.ADD_VERSION_ENDPOINT) data = { 'project': project, 'version': version } files = { 'egg': egg } json = self.client.post(url, data=data, files=files, timeout=self.timeout) return json['spiders']
Adds a new project egg to the Scrapyd service. First class, maps to Scrapyd's add version endpoint.
def to_xml(self, root): if not len(self.__custom_elements): return for uri, tags in self.__custom_elements.items(): prefix, url = uri.split(":", 1) for name, value in tags.items(): self.__createElementNS(root, url, prefix + ":" + name, value) return root
Returns a DOM element contaning the XML representation of the ExtensibleXMLiElement @param root:Element Root XML element. @return: Element
def get_par_css_dataframe(self): assert self.jco is not None assert self.pst is not None jco = self.jco.to_dataframe() weights = self.pst.observation_data.loc[jco.index,"weight"].copy().values jco = (jco.T * weights).T dss_sum = jco.apply(np.linalg.norm) css = (dss_sum / float(self.pst.nnz_obs)).to_frame() css.columns = ["pest_css"] self.pst.add_transform_columns() parval1 = self.pst.parameter_data.loc[dss_sum.index,"parval1_trans"].values css.loc[:,"hill_css"] = (dss_sum * parval1) / (float(self.pst.nnz_obs)**2) return css
get a dataframe of composite scaled sensitivities. Includes both PEST-style and Hill-style. Returns ------- css : pandas.DataFrame
def removeSinglePixels(img): gx = img.shape[0] gy = img.shape[1] for i in range(gx): for j in range(gy): if img[i, j]: found_neighbour = False for ii in range(max(0, i - 1), min(gx, i + 2)): for jj in range(max(0, j - 1), min(gy, j + 2)): if ii == i and jj == j: continue if img[ii, jj]: found_neighbour = True break if found_neighbour: break if not found_neighbour: img[i, j] = 0
img - boolean array remove all pixels that have no neighbour
def get_annotationdefault(self): buff = self.get_attribute("AnnotationDefault") if buff is None: return None with unpack(buff) as up: (ti, ) = up.unpack_struct(_H) return ti
The AnnotationDefault attribute, only present upon fields in an annotaion. reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.20
def watch_for_events(): fd = inotify.init() try: wd = inotify.add_watch(fd, '/tmp', inotify.IN_CLOSE_WRITE) while True: for event in inotify.get_events(fd): print("event:", event.name, event.get_mask_description()) finally: os.close(fd)
Wait for events and print them to stdout.
def get_screen_info(self): return GetScreenInfo( display=self.display, opcode=self.display.get_extension_major(extname), window=self, )
Retrieve information about the current and available configurations for the screen associated with this window.
def fromstring(cls, string): parser = etree.XMLParser(remove_blank_text=True) root = etree.fromstring(string, parser) tree = root.getroottree() return cls.fromtree(tree)
Create a METS by parsing a string. :param str string: String containing a METS document.
def get_public_trades(self, time_frame='hour'): self._log('get public trades') return self._rest_client.get( endpoint='/transactions', params={'book': self.name, 'time': time_frame} )
Return public trades that were completed recently. :param time_frame: Time frame. Allowed values are "minute" for trades in the last minute, or "hour" for trades in the last hour (default: "hour"). :type time_frame: str | unicode :return: Public trades completed recently. :rtype: [dict]
def delete(self, qname): try: q = self.exists(qname) if not q: return False queue = self.show(qname) if queue: queue.delete() except pyrax.exceptions as err_msg: log.error('RackSpace API got some problems during deletion: %s', err_msg) return False return True
Delete an existings RackSpace Queue.
def _swap(self): self.ref_start, self.qry_start = self.qry_start, self.ref_start self.ref_end, self.qry_end = self.qry_end, self.ref_end self.hit_length_ref, self.hit_length_qry = self.hit_length_qry, self.hit_length_ref self.ref_length, self.qry_length = self.qry_length, self.ref_length self.ref_name, self.qry_name = self.qry_name, self.ref_name
Swaps the alignment so that the reference becomes the query and vice-versa. Swaps their names, coordinates etc. The frame is not changed
def CreateCustomizerFeed(client, feed_name): ad_customizer_feed_service = client.GetService('AdCustomizerFeedService', 'v201809') customizer_feed = { 'feedName': feed_name, 'feedAttributes': [ {'type': 'STRING', 'name': 'Name'}, {'type': 'STRING', 'name': 'Price'}, {'type': 'DATE_TIME', 'name': 'Date'} ] } feed_service_operation = { 'operator': 'ADD', 'operand': customizer_feed } response = ad_customizer_feed_service.mutate([feed_service_operation]) if response and 'value' in response: feed = response['value'][0] feed_data = { 'feedId': feed['feedId'], 'nameId': feed['feedAttributes'][0]['id'], 'priceId': feed['feedAttributes'][1]['id'], 'dateId': feed['feedAttributes'][2]['id'] } print ('Feed with name "%s" and ID %s was added with:\n' '\tName attribute ID %s and price attribute ID %s and date attribute' 'ID %s') % (feed['feedName'], feed['feedId'], feed_data['nameId'], feed_data['priceId'], feed_data['dateId']) return feed else: raise errors.GoogleAdsError('No feeds were added')
Creates a new AdCustomizerFeed. Args: client: an AdWordsClient instance. feed_name: the name for the new AdCustomizerFeed. Returns: The new AdCustomizerFeed.
def _cint(col, _map={base26(i): i - 1 for i in range(1, 257)}): try: return _map[col.upper()] except KeyError: raise ValueError(col)
Return zero-based column index from bijective base26 string. >>> Coordinates._cint('Ab') 27 >>> Coordinates._cint('spam') Traceback (most recent call last): ... ValueError: spam
def parse_plugin_metadata(content): if not isinstance(content, bytes): raise TypeError('Content type must be bytes') result = plugin_data_pb2.PrCurvePluginData.FromString(content) if result.version == 0: return result else: logger.warn( 'Unknown metadata version: %s. The latest version known to ' 'this build of TensorBoard is %s; perhaps a newer build is ' 'available?', result.version, PROTO_VERSION) return result
Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the pr_curves plugin. Returns: A `PrCurvesPlugin` protobuf object.
def actual_query_range(self): a = self.alignment_ranges if self.get_strand() == '+': return GenomicRange(a[0][1].chr,a[0][1].start,a[-1][1].end,self.get_strand()) return GenomicRange(a[0][1].chr,self.query_sequence_length-a[-1][1].end+1,self.query_sequence_length-a[0][1].start+1,dir=self.strand)
This is the actual query range for the positive strand :returns: Range of query positive strand covered :rtype: GenomicRange
def intersect_range_array(bed1,beds2,payload=None,is_sorted=False): if not is_sorted: beds2 = sort_ranges(beds2) output = [] for bed2 in beds2: cval = bed2.cmp(bed1) if cval == -1: continue elif cval == 0: output.append(bed1.intersect(bed2)) if payload==1: output[-1].set_payload(bed1.payload) if payload==2: output[-1].set_payload(bed2.payload) elif cval == 1: break if payload: return sort_ranges(output) return merge_ranges(output)
Does not do a merge if the payload has been set :param bed1: :param bed2: :param payload: payload=1 return the payload of bed1 on each of the intersect set, payload=2 return the payload of bed2 on each of the union set, payload=3 return the payload of bed1 and bed2 on each of the union set :param is_sorted: :type bed1: GenomicRange :type bed2: GenomicRange :type payload: int :type is_sorted: bool
def get_waittime(self): now = time.time() self.sentmessages.appendleft(now) if len(self.sentmessages) == self.sentmessages.maxlen: oldest = self.sentmessages[-1] waittime = self.limitinterval - (now - oldest) if waittime > 0: return waittime + 1 return 0
Return the appropriate time to wait, if we sent too many messages :returns: the time to wait in seconds :rtype: :class:`float` :raises: None
def _start_action_for_section(self, section): if section == "configuration": return Global.LOGGER.debug("starting actions for section " + section) action_configuration = Global.CONFIG_MANAGER.sections[ section] if len(action_configuration) == 0: Global.LOGGER.warn(f"section {section} has no configuration, skipping") return action_type = None new_managed_input = [] if "type" in action_configuration: action_type = action_configuration["type"] if "input" in action_configuration: action_input = action_configuration["input"] new_managed_input = (item.strip() for item in action_input.split(",")) my_action = Action.create_action_for_code(action_type, section, action_configuration, list(new_managed_input)) if not my_action: Global.LOGGER.warn(f"can't find a type for action {section}, the action will be skipped") return self.actions.append(my_action) Global.LOGGER.debug("updating the subscriptions table") for my_input in my_action.monitored_input: self.subscriptions.setdefault( my_input, []).append(my_action)
Start all the actions for a particular section
def annotate_from_changeset(self, changeset): if self.annotate_from_changeset_func: return self.annotate_from_changeset_func(changeset) else: return ''.join((changeset.id, '\n'))
Returns full html line for single changeset per annotated line.
def search(self, text, includes=None, doc_type=None, limit=None, autocomplete=False, promulgated_only=False, tags=None, sort=None, owner=None, series=None): queries = self._common_query_parameters(doc_type, includes, owner, promulgated_only, series, sort) if len(text): queries.append(('text', text)) if limit is not None: queries.append(('limit', limit)) if autocomplete: queries.append(('autocomplete', 1)) if tags is not None: if type(tags) is list: tags = ','.join(tags) queries.append(('tags', tags)) if len(queries): url = '{}/search?{}'.format(self.url, urlencode(queries)) else: url = '{}/search'.format(self.url) data = self._get(url) return data.json()['Results']
Search for entities in the charmstore. @param text The text to search for. @param includes What metadata to return in results (e.g. charm-config). @param doc_type Filter to this type: bundle or charm. @param limit Maximum number of results to return. @param autocomplete Whether to prefix/suffix match search terms. @param promulgated_only Whether to filter to only promulgated charms. @param tags The tags to filter; can be a list of tags or a single tag. @param sort Sorting the result based on the sort string provided which can be name, author, series and - in front for descending. @param owner Optional owner. If provided, search results will only include entities that owner can view. @param series The series to filter; can be a list of series or a single series.
def propagate_defaults(config_doc): for group_name, group_doc in config_doc.items(): if isinstance(group_doc, dict): defaults = group_doc.get('defaults', {}) for item_name, item_doc in group_doc.items(): if item_name == 'defaults': continue if isinstance(item_doc, dict): group_doc[item_name] = \ dict_merge_pair(copy.deepcopy(defaults), item_doc) return config_doc
Propagate default values to sections of the doc.
def start(self, use_atexit=True): assert not self._process _logger.debug('Starting process %s', self._proc_args) process_future = asyncio.create_subprocess_exec( stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, *self._proc_args ) self._process = yield from process_future self._stderr_reader = asyncio.async(self._read_stderr()) self._stdout_reader = asyncio.async(self._read_stdout()) if use_atexit: atexit.register(self.close)
Start the executable. Args: use_atexit (bool): If True, the process will automatically be terminated at exit.
def info(self): return super(QgisComposerComponentsMetadata, self).info.update({ 'orientation': self.orientation, 'page_dpi': self.page_dpi, 'page_width': self.page_width, 'page_height': self.page_height })
Short info of the metadata. :return: Returned dictionary of information about the component. :rtype: dict
def ignore(self, *ignore_lst: str): def stream(): for each in ignore_lst: each = ConstStrPool.cast_to_const(each) yield id(each), each self.ignore_lst.update(stream())
ignore a set of tokens with specific names
def current_version(): import setuptools version = [None] def monkey_setup(**settings): version[0] = settings['version'] old_setup = setuptools.setup setuptools.setup = monkey_setup import setup reload(setup) setuptools.setup = old_setup return version[0]
Get the current version number from setup.py
def inform_student(submission, request, state): details_url = request.build_absolute_uri(reverse('details', args=(submission.pk,))) if state == submission.TEST_VALIDITY_FAILED: subject = STUDENT_FAILED_SUB message = STUDENT_FAILED_MSG message = message % (submission.assignment, submission.assignment.course, details_url) elif state == submission.CLOSED: if submission.assignment.is_graded(): subject = STUDENT_GRADED_SUB message = STUDENT_GRADED_MSG else: subject = STUDENT_PASSED_SUB message = STUDENT_PASSED_MSG message = message % (submission.assignment, submission.assignment.course, details_url) else: return subject = "[%s] %s" % (submission.assignment.course, subject) from_email = submission.assignment.course.owner.email recipients = submission.authors.values_list( 'email', flat=True).distinct().order_by('email') email = EmailMessage(subject, message, from_email, recipients) email.send(fail_silently=True)
Create an email message for the student, based on the given submission state. Sending eMails on validation completion does not work, since this may have been triggered by the admin.
def nondegenerate(triangles, areas=None, height=None): triangles = np.asanyarray(triangles, dtype=np.float64) if not util.is_shape(triangles, (-1, 3, 3)): raise ValueError('Triangles must be (n,3,3)!') if height is None: height = tol.merge ok = (extents(triangles=triangles, areas=areas) > height).all(axis=1) return ok
Find all triangles which have an oriented bounding box where both of the two sides is larger than a specified height. Degenerate triangles can be when: 1) Two of the three vertices are colocated 2) All three vertices are unique but colinear Parameters ---------- triangles : (n, 3, 3) float Triangles in space height : float Minimum edge length of a triangle to keep Returns ---------- nondegenerate : (n,) bool True if a triangle meets required minimum height
def detached_signature_for(plaintext_str, keys): ctx = gpg.core.Context(armor=True) ctx.signers = keys (sigblob, sign_result) = ctx.sign(plaintext_str, mode=gpg.constants.SIG_MODE_DETACH) return sign_result.signatures, sigblob
Signs the given plaintext string and returns the detached signature. A detached signature in GPG speak is a separate blob of data containing a signature for the specified plaintext. :param bytes plaintext_str: bytestring to sign :param keys: list of one or more key to sign with. :type keys: list[gpg.gpgme._gpgme_key] :returns: A list of signature and the signed blob of data :rtype: tuple[list[gpg.results.NewSignature], str]
def find_vcs_root(cls, path): if cls.search_parents_for_root(): valid_dirs = walk_up_dirs(path) else: valid_dirs = [path] for i, current_path in enumerate(valid_dirs): if cls.is_valid_root(current_path): return current_path, i return None
Try to find a version control root directory of this type for the given path. If successful, returns (vcs_root, levels_up), where vcs_root is the path to the version control root directory it found, and levels_up is an integer indicating how many parent directories it had to search through to find it, where 0 means it was found in the indicated path, 1 means it was found in that path's parent, etc. If not sucessful, returns None
def read_config_file(self, file_name): with open(os.path.join(self.__path(), os.path.basename(file_name)), 'rt') as file_config: return self._parser.parseString(file_config.read())
Reads a CWR grammar config file. :param file_name: name of the text file :return: the file's contents
def emit_only(self, event: str, func_names: Union[str, List[str]], *args, **kwargs) -> None: if isinstance(func_names, str): func_names = [func_names] for func in self._event_funcs(event): if func.__name__ in func_names: func(*args, **kwargs)
Specifically only emits certain subscribed events. :param event: Name of the event. :type event: str :param func_names: Function(s) to emit. :type func_names: Union[ str | List[str] ]
def add_env(self, name, val): if name in self.env_vars: raise KeyError(name) self.env_vars[name] = val
Add an environment variable to the docker run invocation
def iscm_md_append_array(self, arraypath, member): array_path = string.split(arraypath, ".") array_key = array_path.pop() current = self.metadata for k in array_path: if not current.has_key(k): current[k] = {} current = current[k] if not current.has_key(array_key): current[array_key] = [] if not type(current[array_key]) == list: raise KeyError("%s doesn't point to an array" % arraypath) current[array_key].append(member)
Append a member to a metadata array entry
def get_vocabulary(preprocess_output_dir, name): vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name) if not file_io.file_exists(vocab_file): raise ValueError('File %s not found in %s' % (CATEGORICAL_ANALYSIS % name, preprocess_output_dir)) labels = python_portable_string( file_io.read_file_to_string(vocab_file)).split('\n') label_values = [x for x in labels if x] return label_values
Loads the vocabulary file as a list of strings. Args: preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name. name: name of the csv column. Returns: List of strings. Raises: ValueError: if file is missing.
def remove_video_for_course(course_id, edx_video_id): course_video = CourseVideo.objects.get(course_id=course_id, video__edx_video_id=edx_video_id) course_video.is_hidden = True course_video.save()
Soft deletes video for particular course. Arguments: course_id (str): id of the course edx_video_id (str): id of the video to be hidden
def rerender_options(options): args = [] for name,value in options.iteritems(): name = name.replace("_","-") if value is None: pass elif isinstance(value,bool): if value: args.append("--%s" % (name,)) elif isinstance(value,list): for item in value: args.append("--%s=%s" % (name,item)) else: args.append("--%s=%s" % (name,value)) return " ".join(args)
Helper function to re-render command-line options. This assumes that command-line options use the same name as their key in the options dictionary.
def _get_interpreter_info(interpreter=None): if interpreter is None: major, minor = sys.version_info[:2] executable = sys.executable else: args = [interpreter, '-c', SHOW_VERSION_CMD] try: requested_interpreter_info = logged_exec(args) except Exception as error: logger.error("Error getting requested interpreter version: %s", error) raise FadesError("Could not get interpreter version") requested_interpreter_info = json.loads(requested_interpreter_info[0]) executable = requested_interpreter_info['path'] major = requested_interpreter_info['major'] minor = requested_interpreter_info['minor'] if executable[-1].isdigit(): executable = executable.split(".")[0][:-1] interpreter = "{}{}.{}".format(executable, major, minor) return interpreter
Return the interpreter's full path using pythonX.Y format.
def check_directory_path(self, path): if os.path.isdir(path) is not True: msg = "Directory Does Not Exist {}".format(path) raise OSError(msg)
Ensure directory exists at the provided path :type path: string :param path: path to directory to check
def cursor_position(self, value): assert isinstance(value, int) assert value <= len(self.text) changed = self._set_cursor_position(value) if changed: self._cursor_position_changed()
Setting cursor position.
def get_parameters_as_dictionary(self, query_string): pairs = (x.split('=', 1) for x in query_string.split('&')) return dict((k, unquote(v)) for k, v in pairs)
Returns query string parameters as a dictionary.
def _parse_mut(subs): if subs!="0": subs = [[subs.replace(subs[-2:], ""),subs[-2], subs[-1]]] return subs
Parse mutation tag from miraligner output
def snap_remove(packages, *flags): if type(packages) is not list: packages = [packages] flags = list(flags) message = 'Removing snap(s) "%s"' % ', '.join(packages) if flags: message += ' with options "%s"' % ', '.join(flags) log(message, level='INFO') return _snap_exec(['remove'] + flags + packages)
Remove a snap package. :param packages: String or List String package name :param flags: List String flags to pass to remove command :return: Integer return code from snap
def get_older_backup(self, encrypted=None, compressed=None, content_type=None, database=None, servername=None): files = self.list_backups(encrypted=encrypted, compressed=compressed, content_type=content_type, database=database, servername=servername) if not files: raise FileNotFound("There's no backup file available.") return min(files, key=utils.filename_to_date)
Return the older backup's file name. :param encrypted: Filter by encrypted or not :type encrypted: ``bool`` or ``None`` :param compressed: Filter by compressed or not :type compressed: ``bool`` or ``None`` :param content_type: Filter by media or database backup, must be ``'db'`` or ``'media'`` :type content_type: ``str`` or ``None`` :param database: Filter by source database's name :type: ``str`` or ``None`` :param servername: Filter by source server's name :type: ``str`` or ``None`` :returns: Older file :rtype: ``str`` :raises: FileNotFound: If no backup file is found
def _InitializeURL(self, upload_url, current_content_length): if current_content_length != 0: return upload_url headers = { 'Content-Type': 'application/xml', 'Content-Length': 0, 'x-goog-resumable': 'start' } req = urllib2.Request(upload_url, data={}, headers=headers) resp = self._url_opener.open(req) return resp.headers['location']
Ensures that the URL used to upload operations is properly initialized. Args: upload_url: a string url. current_content_length: an integer identifying the current content length of data uploaded to the Batch Job. Returns: An initialized string URL, or the provided string URL if the URL has already been initialized.
def mols_to_file(mols, path): with open(path, 'w') as f: f.write(mols_to_text(mols))
Save molecules to the SDFile format file Args: mols: list of molecule objects path: file path to save
def snake(s): if len(s) < 2: return s.lower() out = s[0].lower() for c in s[1:]: if c.isupper(): out += "_" c = c.lower() out += c return out
Convert from title or camelCase to snake_case.
def get_standard(self): try: res = urlopen(PARSELY_PAGE_SCHEMA) except: return [] text = res.read() if isinstance(text, bytes): text = text.decode('utf-8') tree = etree.parse(StringIO(text)) stdref = tree.xpath("//div/@about") return [a.split(':')[1] for a in stdref]
get list of allowed parameters
def clone(self) -> "Event": return self.__class__(copy.deepcopy(self.event), copy.deepcopy(self.metadata))
Clone the event Returns: :class:`slack.events.Event`
def add_localedir_translations(self, localedir): global _localedirs if localedir in self.localedirs: return self.localedirs.append(localedir) full_localedir = os.path.join(localedir, 'locale') if os.path.exists(full_localedir): translation = self._new_gnu_trans(full_localedir) self.merge(translation)
Merge translations from localedir.
def tostring(node, indent=4, nsmap=None): out = io.BytesIO() writer = StreamingXMLWriter(out, indent, nsmap=nsmap) writer.serialize(node) return out.getvalue()
Convert a node into an XML string by using the StreamingXMLWriter. This is useful for testing purposes. :param node: a node object (typically an ElementTree object) :param indent: the indentation to use in the XML (default 4 spaces)
def add_clients(session, verbose): for ctype in ['Genuine', 'Impostor']: for cdid in userid_clients: cid = ctype + '_%d' % cdid if verbose>1: print(" Adding user '%s' of type '%s'..." % (cid, ctype)) session.add(Client(cid, ctype, cdid))
Add clients to the ATVS Keystroke database.
def QueryValueEx(key, value_name): regqueryvalueex = advapi32["RegQueryValueExW"] regqueryvalueex.restype = ctypes.c_long regqueryvalueex.argtypes = [ ctypes.c_void_p, ctypes.c_wchar_p, LPDWORD, LPDWORD, LPBYTE, LPDWORD ] size = 256 data_type = ctypes.wintypes.DWORD() while True: tmp_size = ctypes.wintypes.DWORD(size) buf = ctypes.create_string_buffer(size) rc = regqueryvalueex(key.handle, value_name, LPDWORD(), ctypes.byref(data_type), ctypes.cast(buf, LPBYTE), ctypes.byref(tmp_size)) if rc != ERROR_MORE_DATA: break if size > 10 * 1024 * 1024: raise OSError("Value too big to be read by GRR.") size *= 2 if rc != ERROR_SUCCESS: raise ctypes.WinError(2) return _Reg2Py(buf, tmp_size.value, data_type.value), data_type.value
This calls the Windows QueryValueEx function in a Unicode safe way.
def _get_conditions_list(table, conds, archive=True): if conds is None: conds = [] all_conditions = [] for cond in conds: if len(cond) != len(table.version_columns): raise ValueError('Conditions must specify all unique constraints.') conditions = [] t = table.ArchiveTable if archive else table for col_name, value in cond.iteritems(): if col_name not in table.version_columns: raise ValueError('{} is not one of the unique columns <{}>'.format( col_name, ','.join(table.version_columns) )) conditions.append(getattr(t, col_name) == value) all_conditions.append(conditions) return all_conditions
This function returns a list of list of == conditions on sqlalchemy columns given conds. This should be treated as an or of ands. :param table: the user table model class which inherits from savage.models.SavageModelMixin :param conds: a list of dictionaries of key value pairs where keys are column names and values are conditions to be placed on the column. :param archive: If true, the condition is with columns from the archive table. Else its from the user table.
def _parse_param(key, val): regex = re.compile(r'fields\[([A-Za-z]+)\]') match = regex.match(key) if match: if not isinstance(val, list): val = val.split(',') fields = [field.lower() for field in val] rtype = match.groups()[0].lower() return rtype, fields
Parse the query param looking for sparse fields params Ensure the `val` or what will become the sparse fields is always an array. If the query param is not a sparse fields query param then return None. :param key: the query parameter key in the request (left of =) :param val: the query parameter val in the request (right of =) :return: tuple of resource type to implement the sparse fields on & a array of the fields.
def split_markers_from_line(line): if not any(line.startswith(uri_prefix) for uri_prefix in SCHEME_LIST): marker_sep = ";" else: marker_sep = "; " markers = None if marker_sep in line: line, markers = line.split(marker_sep, 1) markers = markers.strip() if markers else None return line, markers
Split markers from a dependency
def transfer_list(request, detailed=True, search_opts=None): c_client = cinderclient(request) try: return [VolumeTransfer(v) for v in c_client.transfers.list( detailed=detailed, search_opts=search_opts)] except cinder_exception.Forbidden as error: LOG.error(error) return []
List volume transfers. To see all volumes transfers as an admin pass in a special search option: {'all_tenants': 1}
def dict2kvlist(o): return chain.from_iterable((k, v) for k, v in o.items())
Serializes a dict-like object into a generator of the flatten list of repeating key-value pairs. It is useful when using HMSET method in Redis. Example: >>> list(dict2kvlist({'a': 1, 'b': 2})) ['a', 1, 'b', 2]
def next_version(self, object, relations_as_of='end'): if object.version_end_date is None: next = object else: next = self.filter( Q(identity=object.identity), Q(version_start_date__gte=object.version_end_date) ).order_by('version_start_date').first() if not next: raise ObjectDoesNotExist( "next_version couldn't find a next version of object " + str(object.identity)) return self.adjust_version_as_of(next, relations_as_of)
Return the next version of the given object. In case there is no next object existing, meaning the given object is the current version, the function returns this version. Note that if object's version_end_date is None, this does not check the database to see if there is a newer version (perhaps created by some other code), it simply returns the passed object. ``relations_as_of`` is used to fix the point in time for the version; this affects which related objects are returned when querying for object relations. See ``VersionManager.version_as_of`` for details on valid ``relations_as_of`` values. :param Versionable object: object whose next version will be returned. :param mixed relations_as_of: determines point in time used to access relations. 'start'|'end'|datetime|None :return: Versionable
def mark_as_read(self): if self.object_id is None or self.__is_draft: raise RuntimeError('Attempting to mark as read an unsaved Message') data = {self._cc('isRead'): True} url = self.build_url( self._endpoints.get('get_message').format(id=self.object_id)) response = self.con.patch(url, data=data) if not response: return False self.__is_read = True return True
Marks this message as read in the cloud :return: Success / Failure :rtype: bool
def push(self, undoObj): if not isinstance(undoObj, QtmacsUndoCommand): raise QtmacsArgumentError('undoObj', 'QtmacsUndoCommand', inspect.stack()[0][3]) self._wasUndo = False self._push(undoObj)
Add ``undoObj`` command to stack and run its ``commit`` method. |Args| * ``undoObj`` (**QtmacsUndoCommand**): the new command object. |Returns| * **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
def make_directory(directory): if not os.path.isdir(directory): os.mkdir(directory) logger.info('Path {} not found, I will create it.' .format(directory))
Makes directory if it does not exist. Parameters ----------- directory : :obj:`str` Directory path
def get_item_children(item): children = [item.child(index) for index in range(item.childCount())] for child in children[:]: others = get_item_children(child) if others is not None: children += others return sorted(children, key=lambda child: child.line)
Return a sorted list of all the children items of 'item'.
def read_file(*relative_path_elements): file_path = path.join(path.dirname(__file__), *relative_path_elements) return io.open(file_path, encoding='utf8').read().strip()
Return content of a file relative to this ``setup.py``.
def _acquire_media_transport(self, path, access_type): transport = BTMediaTransport(path=path) (fd, read_mtu, write_mtu) = transport.acquire(access_type) self.fd = fd.take() self.write_mtu = write_mtu self.read_mtu = read_mtu self.access_type = access_type self.path = path self._install_transport_ready()
Should be called by subclass when it is ready to acquire the media transport file descriptor
def convert(self, json="", table_attributes='border="1"', clubbing=True, encode=False, escape=True): self.table_init_markup = "<table %s>" % table_attributes self.clubbing = clubbing self.escape = escape json_input = None if not json: json_input = {} elif type(json) in text_types: try: json_input = json_parser.loads(json, object_pairs_hook=OrderedDict) except ValueError as e: if u"Expecting property name" in text(e): raise e json_input = json else: json_input = json converted = self.convert_json_node(json_input) if encode: return converted.encode('ascii', 'xmlcharrefreplace') return converted
Convert JSON to HTML Table format
def _expr2code(self, arg_list, expr): code = lambdastr(arg_list, expr) function_code = code.split(':')[1].strip() return function_code
Convert the given symbolic expression into code.
def _load_json(self, filename): with open(filename, 'r') as file_handle: self._sensors.update(json.load( file_handle, cls=MySensorsJSONDecoder))
Load sensors from json file.
def _read_miraligner(fn): reads = defaultdict(realign) with open(fn) as in_handle: in_handle.next() for line in in_handle: cols = line.strip().split("\t") iso = isomir() query_name, seq = cols[1], cols[0] chrom, reference_start = cols[-2], cols[3] iso.mirna = cols[3] subs, add, iso.t5, iso.t3 = cols[6:10] if query_name not in reads: reads[query_name].sequence = seq iso.align = line iso.start = reference_start iso.subs, iso.add = _parse_mut(subs), add logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add)) reads[query_name].set_precursor(chrom, iso) return reads
Read ouput of miraligner and create compatible output.
def parse_singular_string(t, tag_name): pos = t.getElementsByTagName(tag_name) assert(len(pos) == 1) pos = pos[0] assert(len(pos.childNodes) == 1) return pos.childNodes[0].data
Parses the sole string value with name tag_name in tag t. Heavy-handed with the asserts.
def unique(objects, key=None): dupl = [] for obj, group in itertools.groupby(sorted(objects), key): if sum(1 for _ in group) > 1: dupl.append(obj) if dupl: raise ValueError('Found duplicates %s' % dupl) return objects
Raise a ValueError if there is a duplicated object, otherwise returns the objects as they are.
def _realValue_to_float(value_str): if REAL_VALUE.match(value_str): value = float(value_str) else: value = None return value
Convert a value string that conforms to DSP0004 `realValue`, into the corresponding float and return it. The special values 'INF', '-INF', and 'NAN' are supported. Note that the Python `float()` function supports a superset of input formats compared to the `realValue` definition in DSP0004. For example, "1." is allowed for `float()` but not for `realValue`. In addition, it has the same support for Unicode decimal digits as `int()`. Therefore, the match patterns explicitly check for US-ASCII digits, and the `float()` function should never raise `ValueError`. Returns None if the value string does not conform to `realValue`.
def load(cls, path): assert os.path.exists(path), "No such file: %r" % path (folder, filename) = os.path.split(path) (name, extension) = os.path.splitext(filename) image = Image(None) image._path = path image._format = Image.image_format(extension) return image
Load image from file.
def controlprompt_cmd(self, cmd): data = tags.string_tag('cmbe', cmd) + tags.uint8_tag('cmcc', 0) return self.daap.post(_CTRL_PROMPT_CMD, data=data)
Perform a "controlpromptentry" command.
def toints(self): def grouper(iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" return zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue) return [int(''.join(map(str, group)), 2) for group in grouper(self._data, 8, 0)]
\ Returns an iterable of integers interpreting the content of `seq` as sequence of binary numbers of length 8.
def from_path(cls, conn, path): path = path.strip(SEP) full_path = os.path.join(conn.abs_root, path) return cls(conn, path, 0, os.path.getsize(full_path))
Create container from path.
def convert_elementwise_mul_scalar(net, node, module, builder): import numpy input_name, output_name = _get_input_output_name(net, node) name = node['name'] param = _get_attr(node) mult = literal_eval(param['scalar']) builder.add_scale(name=name, W=numpy.array([mult]), b=0, has_bias=False, input_name=input_name, output_name=output_name)
Convert a scalar multiplication from mxnet to coreml. Parameters ---------- net: network A mxnet network object. node: layer Node to convert. module: module An module for MXNet builder: NeuralNetworkBuilder A neural network builder object.
def crc16_ccitt(data, crc=0): tab = CRC16_CCITT_TAB for byte in six.iterbytes(data): crc = (((crc << 8) & 0xff00) ^ tab[((crc >> 8) & 0xff) ^ byte]) return crc & 0xffff
Calculate the crc16 ccitt checksum of some data A starting crc value may be specified if desired. The input data is expected to be a sequence of bytes (string) and the output is an integer in the range (0, 0xFFFF). No packing is done to the resultant crc value. To check the value a checksum, just pass in the data byes and checksum value. If the data matches the checksum, then the resultant checksum from this function should be 0.
def first(dmap_data, *path): if not (path and isinstance(dmap_data, list)): return dmap_data for key in dmap_data: if path[0] in key: return first(key[path[0]], *path[1:]) return None
Look up a value given a path in some parsed DMAP data.
def _continue_params(self): if not self.data.get('continue'): return params = [] for item in self.data['continue']: params.append("&%s=%s" % (item, self.data['continue'][item])) return ''.join(params)
Returns query string fragment continue parameters
def get_state_all(self): state_dict = {} for device in self.get_device_names().keys(): state_dict[device] = self.get_state(device) return state_dict
Returns all device states
def block(bdaddr): if not salt.utils.validate.net.mac(bdaddr): raise CommandExecutionError( 'Invalid BD address passed to bluetooth.block' ) cmd = 'hciconfig {0} block'.format(bdaddr) __salt__['cmd.run'](cmd).splitlines()
Block a specific bluetooth device by BD Address CLI Example: .. code-block:: bash salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
def is_connected(self, attempts=3): if self.gce is None: while attempts > 0: self.logger.info("Attempting to connect ...") try: self.connect() except ComputeEngineManagerException: attempts -= 1 continue self.logger.info("Connection established.") return True self.logger.error("Unable to connect to Google Compute Engine.") return False return True
Try to reconnect if neccessary. :param attempts: The amount of tries to reconnect if neccessary. :type attempts: ``int``
def pixels_from_softmax(frame_logits, pure_sampling=False, temperature=1.0, gumbel_noise_factor=0.2): if pure_sampling or temperature == 0.0: return common_layers.sample_with_temperature(frame_logits, temperature) pixel_range = tf.to_float(tf.range(256)) for _ in range(len(frame_logits.get_shape().as_list()) - 1): pixel_range = tf.expand_dims(pixel_range, axis=0) frame_logits = tf.nn.log_softmax(frame_logits) gumbel_samples = discretization.gumbel_sample( common_layers.shape_list(frame_logits)) * gumbel_noise_factor frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1) result = tf.reduce_sum(frame * pixel_range, axis=-1) return result + tf.stop_gradient(tf.round(result) - result)
Given frame_logits from a per-pixel softmax, generate colors.
def add_vertex(self, x, y, z, name): self.vertices[name] = Vertex(x, y, z, name) return self.vertices[name]
add vertex by coordinate and uniq name x y z is coordinates of vertex name is uniq name to refer the vertex returns Vertex object whici is added.
def _is_pingable(ip): ping_cmd = ['ping', '-c', '5', '-W', '1', '-i', '0.2', ip] try: linux_utils.execute(ping_cmd, check_exit_code=True) return True except RuntimeError: LOG.warning("Cannot ping ip address: %s", ip) return False
Checks whether an IP address is reachable by pinging. Use linux utils to execute the ping (ICMP ECHO) command. Sends 5 packets with an interval of 0.2 seconds and timeout of 1 seconds. Runtime error implies unreachability else IP is pingable. :param ip: IP to check :return: bool - True or False depending on pingability.
def get_video_transcript_storage(): if hasattr(settings, 'VIDEO_TRANSCRIPTS_SETTINGS'): return get_storage_class( settings.VIDEO_TRANSCRIPTS_SETTINGS.get('STORAGE_CLASS'), )(**settings.VIDEO_TRANSCRIPTS_SETTINGS.get('STORAGE_KWARGS', {})) else: return get_storage_class()()
Return the configured django storage backend for video transcripts.
def from_json(cls, filename): with open(filename) as fp: raw = json.load(fp) return cls(raw['stimuli'], raw['inhibitors'], raw['readouts'])
Creates an experimental setup from a JSON file Parameters ---------- filename : str Absolute path to JSON file Returns ------- caspo.core.setup.Setup Created object instance
def isOverlayVisible(self, ulOverlayHandle): fn = self.function_table.isOverlayVisible result = fn(ulOverlayHandle) return result
Returns true if the overlay is visible.
def cfloat64_array_to_numpy(cptr, length): if isinstance(cptr, ctypes.POINTER(ctypes.c_double)): return np.fromiter(cptr, dtype=np.float64, count=length) else: raise RuntimeError('Expected double pointer')
Convert a ctypes double pointer array to a numpy array.
def get_k8s_model(model_type, model_dict): model_dict = copy.deepcopy(model_dict) if isinstance(model_dict, model_type): return model_dict elif isinstance(model_dict, dict): model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict) return model_type(**model_dict) else: raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__))
Returns an instance of type specified model_type from an model instance or represantative dictionary.