Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
382,300
def child(self, name=None, **attrs): sub_query = build_query(name, **attrs) query = (, (self.query, sub_query)) obj = UIObjectProxy(self.poco) obj.query = query return obj
Select the direct child(ren) from the UI element(s) given by the query expression, see ``QueryCondition`` for more details about the selectors. Args: name: query expression of attribute "name", i.e. the UI elements with ``name`` name will be selected attrs: other query expression except for the ``name`` Returns: :py:class:`UIObjectProxy <poco.proxy.UIObjectProxy>`: a new UI proxy object representing the child(ren) of current UI element(s)
382,301
def _post(self, route, data, headers=None, failure_message=None): headers = self._get_headers(headers) response_lambda = ( lambda: requests.post( self._get_qualified_route(route), headers=headers, data=data, verify=False, proxies=self.proxies ) ) response = check_for_rate_limiting(response_lambda(), response_lambda) return self._handle_response(response, failure_message)
Execute a post request and return the result :param data: :param headers: :return:
382,302
def selectAssemblies(pth, manifest=None): rv = [] if not os.path.isfile(pth): pth = check_extract_from_egg(pth)[0][0] if manifest: _depNames = set([dep.name for dep in manifest.dependentAssemblies]) for assembly in getAssemblies(pth): if seen.get(assembly.getid().upper(), 0): continue if manifest and not assembly.name in _depNames: logger.info("Adding %s to dependent assemblies " "of final executable", assembly.name) manifest.dependentAssemblies.append(assembly) _depNames.add(assembly.name) if not dylib.include_library(assembly.name): logger.debug("Skipping assembly %s", assembly.getid()) continue if assembly.optional: logger.debug("Skipping optional assembly %s", assembly.getid()) continue files = assembly.find_files() if files: seen[assembly.getid().upper()] = 1 for fn in files: fname, fext = os.path.splitext(fn) if fext.lower() == ".manifest": nm = assembly.name + fext else: nm = os.path.basename(fn) ftocnm = nm if assembly.language not in (None, "", "*", "neutral"): ftocnm = os.path.join(assembly.getlanguage(), ftocnm) nm, ftocnm, fn = [item.encode(sys.getfilesystemencoding()) for item in (nm, ftocnm, fn)] if not seen.get(fn.upper(), 0): logger.debug("Adding %s", ftocnm) seen[nm.upper()] = 1 seen[fn.upper()] = 1 rv.append((ftocnm, fn)) else: pass else: logger.error("Assembly %s not found", assembly.getid()) return rv
Return a binary's dependent assemblies files that should be included. Return a list of pairs (name, fullpath)
382,303
def add_uid(fastq, cores): uids = partial(append_uids) p = multiprocessing.Pool(cores) chunks = tz.partition_all(10000, read_fastq(fastq)) bigchunks = tz.partition_all(cores, chunks) for bigchunk in bigchunks: for chunk in p.map(uids, list(bigchunk)): for read in chunk: sys.stdout.write(read)
Adds UID:[samplebc cellbc umi] to readname for umi-tools deduplication Expects formatted fastq files with correct sample and cell barcodes.
382,304
def cublasSsbmv(handle, uplo, n, k, alpha, A, lda, x, incx, beta, y, incy): status = _libcublas.cublasSsbmv_v2(handle, _CUBLAS_FILL_MODE[uplo], n, k, ctypes.byref(ctypes.c_float(alpha)), int(A), lda, int(x), incx, ctypes.byref(ctypes.c_float(beta)), int(y), incy) cublasCheckStatus(status)
Matrix-vector product for real symmetric-banded matrix.
382,305
def conflict(self, key, **kwargs): try: msg = self.error_messages[key] except KeyError: class_name = self.__class__.__name__ msg = MISSING_ERROR_MESSAGE.format(class_name=class_name, key=key) raise AssertionError(msg) message_string = msg.format(**kwargs) raise Conflict(message_string)
A helper method that simply raises a validation error.
382,306
def correlation_model(prediction, fm): (_, r_x) = calc_resize_factor(prediction, fm.image_size) fdm = compute_fdm(fm, scale_factor = r_x) return np.corrcoef(fdm.flatten(), prediction.flatten())[0,1]
wraps numpy.corrcoef functionality for model evaluation input: prediction: 2D Matrix the model salience map fm: fixmat Used to compute a FDM to which the prediction is compared.
382,307
def pretty_print(self, as_list=False, show_datetime=True): ppl = [entry.pretty_print(show_datetime) for entry in self.entries] if as_list: return ppl return u"\n".join(ppl)
Return a Unicode string pretty print of the log entries. :param bool as_list: if ``True``, return a list of Unicode strings, one for each entry, instead of a Unicode string :param bool show_datetime: if ``True``, show the date and time of the entries :rtype: string or list of strings
382,308
def random_combination(iterable, nquartets): pool = tuple(iterable) size = len(pool) indices = random.sample(xrange(size), nquartets) return tuple(pool[i] for i in indices)
Random selection from itertools.combinations(iterable, r). Use this if not sampling all possible quartets.
382,309
def to_dict(self): d = {: self.name} if self.visible != True: d[RTS_EXT_NS_YAML + ] = self.visible if self.comment: d[RTS_EXT_NS_YAML + ] = self.comment props = [] for name in self.properties: p = {: name} if self.properties[name]: p[] = str(self.properties[name]) props.append(p) if props: d[RTS_EXT_NS_YAML + ] = props return d
Save this service port into a dictionary.
382,310
def set_typ(self, refobj, typ): try: enum = JB_ReftrackNode.types.index(typ) except ValueError: raise ValueError("The given type %s could not be found in available types: %" % (typ, JB_ReftrackNode.types)) cmds.setAttr("%s.type" % refobj, enum)
Set the type of the given refobj :param refobj: the reftrack node to edit :type refobj: refobj :param typ: the entity type :type typ: str :returns: None :rtype: None :raises: ValueError
382,311
def luns(self): lun_list, smp_list = [], [] if self.ioclass_luns: lun_list = map(lambda l: VNXLun(lun_id=l.lun_id, name=l.name, cli=self._cli), self.ioclass_luns) if self.ioclass_snapshots: smp_list = map(lambda smp: VNXLun(name=smp.name, cli=self._cli), self.ioclass_snapshots) return list(lun_list) + list(smp_list)
Aggregator for ioclass_luns and ioclass_snapshots.
382,312
def process_alt(header, ref, alt_str): if "]" in alt_str or "[" in alt_str: return record.BreakEnd(*parse_breakend(alt_str)) elif alt_str[0] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.FORWARD, alt_str[1:]) elif alt_str[-1] == "." and len(alt_str) > 0: return record.SingleBreakEnd(record.REVERSE, alt_str[:-1]) elif alt_str[0] == "<" and alt_str[-1] == ">": inner = alt_str[1:-1] return record.SymbolicAllele(inner) else: return process_sub(ref, alt_str)
Process alternative value using Header in ``header``
382,313
def select_tasks(self, nids=None, wslice=None, task_class=None): if nids is not None: assert wslice is None tasks = self.tasks_from_nids(nids) elif wslice is not None: tasks = [] for work in self[wslice]: tasks.extend([t for t in work]) else: tasks = list(self.iflat_tasks()) if task_class is not None: tasks = [t for t in tasks if t.isinstance(task_class)] return tasks
Return a list with a subset of tasks. Args: nids: List of node identifiers. wslice: Slice object used to select works. task_class: String or class used to select tasks. Ignored if None. .. note:: nids and wslice are mutually exclusive. If no argument is provided, the full list of tasks is returned.
382,314
def stream(self, start_date=values.unset, end_date=values.unset, identity=values.unset, tag=values.unset, limit=None, page_size=None): limits = self._version.read_limits(limit, page_size) page = self.page( start_date=start_date, end_date=end_date, identity=identity, tag=tag, page_size=limits[], ) return self._version.stream(page, limits[], limits[])
Streams BindingInstance records from the API as a generator stream. This operation lazily loads records as efficiently as possible until the limit is reached. The results are returned as a generator, so this operation is memory efficient. :param date start_date: Only include usage that has occurred on or after this date :param date end_date: Only include usage that occurred on or before this date :param unicode identity: The `identity` value of the resources to read :param unicode tag: Only list Bindings that have all of the specified Tags :param int limit: Upper limit for the number of records to return. stream() guarantees to never return more than limit. Default is no limit :param int page_size: Number of records to fetch per request, when not set will use the default value of 50 records. If no page_size is defined but a limit is defined, stream() will attempt to read the limit with the most efficient page size, i.e. min(limit, 1000) :returns: Generator that will yield up to limit results :rtype: list[twilio.rest.notify.v1.service.binding.BindingInstance]
382,315
def collect_modules(self): coverage_dir = os.path.join(self.root, ) for name in fnmatch.filter(os.listdir(coverage_dir), "*.html"): if name == : continue with open(os.path.join(coverage_dir, name)) as cover_file: src_file, line_ranges = self.determine_coverage(cover_file) if not src_file: continue src_file = os.path.abspath(os.path.join(self.root, src_file)) if os.path.isfile(src_file): yield (src_file, line_ranges) else: raise SourceNotFound( "Source file %(file)s not found at %(area)s" % {: os.path.basename(src_file), : os.path.dirname(src_file)})
Generator to obtain lines of interest from coverage report files. Will verify that the source file is within the project tree, relative to the coverage directory.
382,316
def prepare_delete_monarchy(self, node, position=None, save=True): first = None for child in node.children.all(): if first is None: first = child first.move(node.parent, position, save) else: child.move(first, position, save)
Prepares a given :class:`CTENode` `node` for deletion, by executing the :const:`DELETE_METHOD_MONARCHY` semantics. Descendant nodes, if present, will be moved; in this case the optional `position` can be a ``callable`` which is invoked prior to each move operation (see :meth:`move` for details). By default, after each move operation, sub-tree nodes which were moved will be saved through a call to :meth:`Model.save` unless `save` is ``False``. This method delegates move operations to :meth:`move`. :param node: the :class:`CTENode` to prepare for deletion. :param position: optionally, a ``callable`` to invoke prior to each move operation. :param save: flag indicating whether to save after each move operation, ``True`` by default.
382,317
def lookup_job_tasks(self, statuses, user_ids=None, job_ids=None, job_names=None, task_ids=None, task_attempts=None, labels=None, create_time_min=None, create_time_max=None, max_tasks=0, page_size=0): ops_filter = self._build_query_filter( statuses, user_ids, job_ids, job_names, task_ids, task_attempts, labels, create_time_min, create_time_max) page_token = None tasks_yielded = 0 while True: max_to_fetch = None if max_tasks: max_to_fetch = max_tasks - tasks_yielded ops, page_token = self._operations_list(ops_filter, max_to_fetch, page_size, page_token) for op in ops: yield op tasks_yielded += 1 assert (max_tasks >= tasks_yielded or not max_tasks) if not page_token or 0 < max_tasks <= tasks_yielded: break
Yields operations based on the input criteria. If any of the filters are empty or {'*'}, then no filtering is performed on that field. Filtering by both a job id list and job name list is unsupported. Args: statuses: {'*'}, or a list of job status strings to return. Valid status strings are 'RUNNING', 'SUCCESS', 'FAILURE', or 'CANCELED'. user_ids: a list of ids for the user(s) who launched the job. job_ids: a list of job ids to return. job_names: a list of job names to return. task_ids: a list of specific tasks within the specified job(s) to return. task_attempts: a list of specific attempts within the specified tasks(s) to return. labels: a list of LabelParam with user-added labels. All labels must match the task being fetched. create_time_min: a timezone-aware datetime value for the earliest create time of a task, inclusive. create_time_max: a timezone-aware datetime value for the most recent create time of a task, inclusive. max_tasks: the maximum number of job tasks to return or 0 for no limit. page_size: the page size to use for each query to the pipelins API. Raises: ValueError: if both a job id list and a job name list are provided Yeilds: Genomics API Operations objects.
382,318
async def listCronJobs(self): crons = [] for iden, cron in self.cell.agenda.list(): useriden = cron[] if not (self.user.admin or useriden == self.user.iden): continue user = self.cell.auth.user(useriden) cron[] = if user is None else user.name crons.append((iden, cron)) return crons
Get information about all the cron jobs accessible to the current user
382,319
def set_headers(context): safe_add_http_request_context_to_behave_context(context) headers = dict() for row in context.table: headers[row["header_name"]] = row["header_value"] context.http_request_context.headers = headers
Parameters: +--------------+---------------+ | header_name | header_value | +==============+===============+ | header1 | value1 | +--------------+---------------+ | header2 | value2 | +--------------+---------------+
382,320
def plot_spectrogram(self, node_idx=None): r from pygsp.plotting import _plot_spectrogram _plot_spectrogram(self, node_idx=node_idx)
r"""Docstring overloaded at import time.
382,321
def view(self, tempname=): import os if os.system() == 0: six.print_("Starting casaviewer in the background ...") self.unlock() if self.ispersistent(): os.system( + self.name() + ) elif len(tempname) > 0: six.print_(" making a persistent copy in " + tempname) six.print_(" which should be deleted after the viewer has ended") self.saveas(tempname) os.system( + tempname + ) else: six.print_("Cannot view because the image is in memory only.") six.print_("You can browse a persistent copy of the image like:") six.print_(" t.view()") else: six.print_("casaviewer cannot be found")
Display the image using casaviewer. If the image is not persistent, a copy will be made that the user has to delete once viewing has finished. The name of the copy can be given in argument `tempname`. Default is '/tmp/tempimage'.
382,322
async def startlisten(self, vhost = None): servers = self.getservers(vhost) for s in servers: await s.startlisten() return len(servers)
Start listen on current servers :param vhost: return only servers of vhost if specified. '' to return only default servers. None for all servers.
382,323
def srandmember(self, name, number=None): with self.pipe as pipe: f = Future() res = pipe.srandmember(self.redis_key(name), number=number) def cb(): if number is None: f.set(self.valueparse.decode(res.result)) else: f.set([self.valueparse.decode(v) for v in res.result]) pipe.on_execute(cb) return f
Return a random member of the set. :param name: str the name of the redis key :return: Future()
382,324
def fill(self, doc_contents): for key, content in doc_contents.items(): doc_contents[key] = replace_chars_for_svg_code(content) return super(SVGDocument, self).fill(doc_contents=doc_contents)
Fill the content of the document with the information in doc_contents. This is different from the TextDocument fill function, because this will check for symbools in the values of `doc_content` and replace them to good XML codes before filling the template. Parameters ---------- doc_contents: dict Set of values to set the template document. Returns ------- filled_doc: str The content of the document with the template information filled.
382,325
def market_open(self, session, mins) -> Session: if session not in self.exch: return SessNA start_time = self.exch[session][0] return Session(start_time, shift_time(start_time, int(mins)))
Time intervals for market open Args: session: [allday, day, am, pm, night] mins: mintues after open Returns: Session of start_time and end_time
382,326
def make_csv(api_key, api_secret, path_to_csv=None, result_limit=1000, **kwargs): path_to_csv = path_to_csv or os.path.join(os.getcwd(), ) timeout_in_seconds = 2 max_retries = 3 retries = 0 offset = 0 videos = list() jwplatform_client = jwplatform.Client(api_key, api_secret) logging.info("Querying for video list.") while True: try: response = jwplatform_client.videos.list(result_limit=result_limit, result_offset=offset, **kwargs) except jwplatform.errors.JWPlatformRateLimitExceededError: logging.error("Encountered rate limiting error. Backing off on request time.") if retries == max_retries: raise jwplatform.errors.JWPlatformRateLimitExceededError() timeout_in_seconds *= timeout_in_seconds retries += 1 time.sleep(timeout_in_seconds) continue except jwplatform.errors.JWPlatformError as e: logging.error("Encountered an error querying for videos list.\n{}".format(e)) raise e retries = 0 timeout_in_seconds = 2 next_videos = response.get(, []) last_query_total = response.get(, 0) videos.extend(next_videos) offset += len(next_videos) logging.info("Accumulated {} videos.".format(offset)) if offset >= last_query_total: if should_write_header: writer.writeheader() writer.writerows(videos)
Function which fetches a video library and writes each video_objects Metadata to CSV. Useful for CMS systems. :param api_key: <string> JWPlatform api-key :param api_secret: <string> JWPlatform shared-secret :param path_to_csv: <string> Local system path to desired CSV. Default will be within current working directory. :param result_limit: <int> Number of video results returned in response. (Suggested to leave at default of 1000) :param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/videos/list.html :return: <dict> Dict which represents the JSON response.
382,327
def nvmlDeviceGetInforomImageVersion(handle): r c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE) fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomImageVersion") ret = fn(handle, c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)) _nvmlCheckReturn(ret) return bytes_to_str(c_version.value)
r""" /** * Retrieves the global infoROM image version * * For all products with an inforom. * * Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board * in contrast to infoROM object version which is only an indicator of supported features. * Version string will not exceed 16 characters in length (including the NULL terminator). * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. * * @param device The identifier of the target device * @param version Reference in which to return the infoROM image version * @param length The maximum allowed length of the string returned in \a version * * @return * - \ref NVML_SUCCESS if \a version has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see nvmlDeviceGetInforomVersion */ nvmlReturn_t DECLDIR nvmlDeviceGetInforomImageVersion
382,328
def favorites(self): return bind_api( api=self, path=, payload_type=, payload_list=True, allowed_param=[, , , , , ] )
:reference: https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-favorites-list :allowed_param:'screen_name', 'user_id', 'max_id', 'count', 'since_id', 'max_id'
382,329
def set_font(self, font): self.shellwidget._control.setFont(font) self.shellwidget.font = font
Set IPython widget's font
382,330
def post(self, url, data, charset=CHARSET_UTF8, headers={}): if not in headers: headers[] = if not in headers: headers[] = "application/x-www-form-urlencoded;charset=" + charset rsp = requests.post(url, data, headers=headers, timeout=(int(self.conf(HTTP_CONN_TIMEOUT, )), int(self.conf(HTTP_SO_TIMEOUT, )))) return json.loads(rsp.text)
response json text
382,331
def storeByteArray(self, context, page, len, data, returnError): returnError.contents.value = self.IllegalStateError raise NotImplementedError("You must override this method.")
please override
382,332
def delete(self): result = yield gen.Task(RedisSession._redis_client.delete, self._key) LOGGER.debug(, self.id, result) self.clear() raise gen.Return(result)
Delete the item from storage :param method callback: The callback method to invoke when done
382,333
def numRef_xml(self, wksht_ref, number_format, values): pt_xml = self.pt_xml(values) return ( ).format(**{ : wksht_ref, : number_format, : pt_xml, })
Return the ``<c:numRef>`` element specified by the parameters as unicode text.
382,334
def check_effective(func): def wrapper(*args, **kwargs): if ( in kwargs or args and in args[1].get_identifier_namespace()): try: assessment_section_id = kwargs[] except KeyError: assessment_section_id = args[1] if (not args[0].has_assessment_section_begun(assessment_section_id) or args[0].is_assessment_section_over(assessment_section_id)): raise IllegalState() else: if in kwargs: assessment_taken_id = kwargs[] else: assessment_taken_id = args[1] assessment_taken = args[0]._get_assessment_taken(assessment_taken_id) if not assessment_taken.has_started() or assessment_taken.has_ended(): raise IllegalState() return func(*args, **kwargs) return wrapper
decorator, tests if an Assessment or Section is effective, raises error if not Side benefit: raised NotFound on AssessmentSections and AssessmentTakens
382,335
def create_2d_gaussian(dim, sigma): if dim % 2 == 0: raise ValueError("Kernel dimension should be odd") kernel = np.zeros((dim, dim), dtype=np.float16) center = dim/2 variance = sigma ** 2 coeff = 1. / (2 * variance) for x in range(0, dim): for y in range(0, dim): x_val = abs(x - center) y_val = abs(y - center) numerator = x_val**2 + y_val**2 denom = 2*variance kernel[x,y] = coeff * np.exp(-1. * numerator/denom) return kernel/sum(sum(kernel))
This function creates a 2d gaussian kernel with the standard deviation denoted by sigma :param dim: integer denoting a side (1-d) of gaussian kernel :param sigma: floating point indicating the standard deviation :returns: a numpy 2d array
382,336
def _regex_strings(self): domain = 0 if domain not in self.domains: self.register_domain(domain=domain) return self.domains[domain]._regex_strings
A property to link into IntentEngine's _regex_strings. Warning: this is only for backwards compatiblility and should not be used if you intend on using domains. Returns: the domains _regex_strings from its IntentEngine
382,337
def _any(objs, query): for obj in objs: if isinstance(obj, Document): if _any(obj.roots, query): return True else: if any(query(ref) for ref in obj.references()): return True else: return False
Whether any of a collection of objects satisfies a given query predicate Args: objs (seq[Model or Document]) : query (callable) Returns: True, if ``query(obj)`` is True for some object in ``objs``, else False
382,338
def unpack(tokens): logger.log_tag("unpack", tokens) if use_computation_graph: tokens = evaluate_tokens(tokens) if isinstance(tokens, ParseResults) and len(tokens) == 1: tokens = tokens[0] return tokens
Evaluate and unpack the given computation graph.
382,339
def atlas_find_missing_zonefile_availability( peer_table=None, con=None, path=None, missing_zonefile_info=None ): bit_offset = 0 bit_count = 10000 missing = [] ret = {} if missing_zonefile_info is None: while True: zfinfo = atlasdb_zonefile_find_missing( bit_offset, bit_count, con=con, path=path ) if len(zfinfo) == 0: break missing += zfinfo bit_offset += len(zfinfo) if len(missing) > 0: log.debug("Missing %s zonefiles" % len(missing)) else: missing = missing_zonefile_info if len(missing) == 0: return ret with AtlasPeerTableLocked(peer_table) as ptbl: for zfinfo in missing: popularity = 0 byte_index = (zfinfo[] - 1) / 8 bit_index = 7 - ((zfinfo[] - 1) % 8) peers = [] if not ret.has_key(zfinfo[]): ret[zfinfo[]] = { : [], : zfinfo[], : [], : [], : 0, : [], : False } for peer_hostport in ptbl.keys(): peer_inv = atlas_peer_get_zonefile_inventory( peer_hostport, peer_table=ptbl ) if len(peer_inv) <= byte_index: continue if (ord(peer_inv[byte_index]) & (1 << bit_index)) == 0: return ret
Find the set of missing zonefiles, as well as their popularity amongst our neighbors. Only consider zonefiles that are known by at least one peer; otherwise they're missing from our clique (and we'll re-sync our neighborss' inventories every so often to make sure we detect when zonefiles become available). Return a dict, structured as: { 'zonefile hash': { 'names': [names], 'txid': first txid that set it, 'indexes': [...], 'popularity': ..., 'peers': [...], 'tried_storage': True|False } }
382,340
def protect_shorthand(text, split_locations): word_matches = list(re.finditer(word_with_period, text)) total_words = len(word_matches) for i, match in enumerate(word_matches): match_start = match.start() match_end = match.end() for char_pos in range(match_start, match_end): if split_locations[char_pos] == SHOULD_SPLIT and match_end - char_pos > 1: match_start = char_pos word = text[match_start:match_end] if not word.endswith(): if (not word[0].isdigit() and split_locations[match_start] == UNDECIDED): split_locations[match_start] = SHOULD_SPLIT continue period_pos = match_end - 1 word_is_in_abbr = word[:-1].lower() in ABBR is_abbr_like = ( word_is_in_abbr or one_letter_long_or_repeating.match(word[:-1]) is not None ) is_digit = False if is_abbr_like else word[:-1].isdigit() is_last_word = i == (total_words - 1) is_ending = is_last_word and (match_end == len(text) or text[match_end:].isspace()) is_not_ending = not is_ending abbreviation_and_not_end = ( len(word) > 1 and is_abbr_like and is_not_ending ) if abbreviation_and_not_end and ( (not is_last_word and word_matches[i+1].group(0)[0].islower()) or (not is_last_word and word_matches[i+1].group(0) in PUNCT_SYMBOLS) or word[0].isupper() or word_is_in_abbr or len(word) == 2): if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif (is_digit and len(word[:-1]) <= 2 and not is_last_word and word_matches[i+1].group(0).lower() in MONTHS): if split_locations[period_pos] == SHOULD_SPLIT and period_pos + 1 < len(split_locations): split_locations[period_pos + 1] = SHOULD_SPLIT split_locations[period_pos] = SHOULD_NOT_SPLIT elif split_locations[period_pos] == UNDECIDED: split_locations[period_pos] = SHOULD_SPLIT
Annotate locations in a string that contain periods as being true periods or periods that are a part of shorthand (and thus should not be treated as punctuation marks). Arguments: ---------- text : str split_locations : list<int>, same length as text.
382,341
def put(self, request): edx_video_id = request.data[] profile = request.data[] encode_data = request.data[] video = Video.objects.get(edx_video_id=edx_video_id) profile = Profile.objects.get(profile_name=profile) EncodedVideo.objects.filter(video=video, profile=profile).delete() EncodedVideo.objects.create(video=video, profile=profile, **encode_data) return Response(status=status.HTTP_200_OK)
Update a single profile for a given video. Example request data: { 'edx_video_id': '1234' 'profile': 'hls', 'encode_data': { 'url': 'foo.com/qwe.m3u8' 'file_size': 34 'bitrate': 12 } }
382,342
def layer_from_combo(combo): index = combo.currentIndex() if index < 0: return None layer_id = combo.itemData(index, Qt.UserRole) layer = QgsProject.instance().mapLayer(layer_id) return layer
Get the QgsMapLayer currently selected in a combo. Obtain QgsMapLayer id from the userrole of the QtCombo and return it as a QgsMapLayer. :returns: The currently selected map layer a combo. :rtype: QgsMapLayer
382,343
def api_request(self, method, path): if not hasattr(self, ): ValueError() if method.lower() == : _request = requests.get elif method.lower() == : _request = requests.post domain = self.api_settings[] uri = .format(domain, self.api_settings[], path) if self.api_settings.get() and self.api_settings.get(): auth = (self.api_settings[], self.api_settings[]) else: auth = () req = _request(uri, auth=auth) return req
Query Sensu api for information.
382,344
def _unpack_object_array(inp, source, prescatter): raise NotImplementedError("Currently not used with record/struct/object improvements") base_rec, attr = source.rsplit(".", 1) new_name = "%s_%s_unpack" % (inp["name"], base_rec.replace(".", "_")) prescatter[base_rec].append((new_name, attr, _to_variable_type(inp["type"]["items"]))) return new_name, prescatter
Unpack Array[Object] with a scatter for referencing in input calls. There is no shorthand syntax for referencing all items in an array, so we explicitly unpack them with a scatter.
382,345
def unpack_frame(message): body = [] returned = dict(cmd=, headers={}, body=) breakdown = message.split() returned[] = breakdown[0] breakdown = breakdown[1:] def headD(field): index = field.find() if index: header = field[:index].strip() data = field[index+1:].strip() returned[][header.strip()] = data.strip() def bodyD(field): field = field.strip() if field: body.append(field) handler = headD for field in breakdown: if field.strip() == : handler = bodyD continue handler(field) body = "".join(body) returned[] = body.replace(, ) return returned
Called to unpack a STOMP message into a dictionary. returned = { # STOMP Command: 'cmd' : '...', # Headers e.g. 'headers' : { 'destination' : 'xyz', 'message-id' : 'some event', : etc, } # Body: 'body' : '...1234...\x00', }
382,346
def handle(self, url, method): if not self.serve: return HTTPError(503, "Server stopped") handler, args = self.match_url(url, method) if not handler: return HTTPError(404, "Not found:" + url) try: return handler(**args) except HTTPResponse, e: return e except Exception, e: if isinstance(e, (KeyboardInterrupt, SystemExit, MemoryError))\ or not self.catchall: raise return HTTPError(500, , e, format_exc(10))
Execute the handler bound to the specified url and method and return its output. If catchall is true, exceptions are catched and returned as HTTPError(500) objects.
382,347
def get_raw_data_from_buffer(self, filter_func=None, converter_func=None): if self._is_running: raise RuntimeError() if not self.fill_buffer: logging.warning() return [convert_data_array(data_array_from_data_iterable(data_iterable), filter_func=filter_func, converter_func=converter_func) for data_iterable in self._data_buffer]
Reads local data buffer and returns raw data array. Returns ------- data : np.array An array containing data words from the local data buffer.
382,348
def send(self, tid, company_code, session, **kwargs): request = TOPRequest() request[] = tid request[] = company_code for k, v in kwargs.iteritems(): if k not in (, , , ) and v==None: continue request[k] = v self.create(self.execute(request, session), fields=[, ], models={:Shipping}) return self.shipping
taobao.logistics.online.send 在线订单发货处理(支持货到付款) - 用户调用该接口可实现在线订单发货(支持货到付款) - 调用该接口实现在线下单发货,有两种情况: - 如果不输入运单号的情况:交易状态不会改变,需要调用taobao.logistics.online.confirm确认发货后交易状态才会变成卖家已发货。 - 如果输入运单号的情况发货:交易订单状态会直接变成卖家已发货 。
382,349
def say(*words): * cmd = .format(.join(words)) call = __salt__[]( cmd, output_loglevel=, python_shell=False ) _check_cmd(call) return True
Say some words. words The words to execute the say command with. CLI Example: .. code-block:: bash salt '*' desktop.say <word0> <word1> ... <wordN>
382,350
def maximum_impact_estimation(membership_matrix, max_iters=1000): pr_0 = np.sum(membership_matrix, axis=0) / np.sum(membership_matrix) pr_1 = _update_probabilities(pr_0, membership_matrix) epsilon = np.linalg.norm(pr_1 - pr_0)/100. pr_old = pr_1 check_for_convergence = epsilon count = 0 while epsilon > NEAR_ZERO and check_for_convergence >= epsilon: count += 1 if count > max_iters: print("Reached the maximum number of iterations {0}".format( max_iters)) break pr_new = _update_probabilities(pr_old, membership_matrix) check_for_convergence = np.linalg.norm(pr_new - pr_old) pr_old = pr_new pr_final = pr_old corrected_pathway_definitions = {} n, k = membership_matrix.shape for gene_index in range(n): gene_membership = membership_matrix[gene_index] denominator = np.dot(gene_membership, pr_final) if denominator < NEAR_ZERO: denominator = NEAR_ZERO conditional_pathway_pr = (np.multiply(gene_membership, pr_final) / denominator) all_pathways_at_max = np.where( conditional_pathway_pr == conditional_pathway_pr.max())[0] gene_in_pathways = np.where(gene_membership == 1)[0] all_pathways_at_max = np.intersect1d( all_pathways_at_max, gene_in_pathways) pathway_index = np.random.choice(all_pathways_at_max) if pathway_index not in corrected_pathway_definitions: corrected_pathway_definitions[pathway_index] = set() corrected_pathway_definitions[pathway_index].add(gene_index) return corrected_pathway_definitions
An expectation maximization technique that produces pathway definitions devoid of crosstalk. That is, each gene is mapped to the pathway in which it has the greatest predicted impact; this removes any overlap between pathway definitions. Parameters ----------- membership_matrix : numpy.array(float), shape = [n, k] The observed gene-to-pathway membership matrix, where n is the number of genes and k is the number of pathways we are interested in. max_iters : int (default=1000) The maximum number of expectation-maximization steps to take. Returns ----------- dict(int -> set(int)), a dictionary mapping a pathway to a set of genes. These are the pathway definitions after the maximum impact estimation procedure has been applied to remove crosstalk. - The keys are ints corresponding to the pathway column indices in the membership matrix. - The values are sets of ints corresponding to gene row indices in the membership matrix.
382,351
def handle_page_location_changed(self, timeout=None): s latest location. t track redirect! No idea what to do!") last_message = messages[-1] self.log.info("Probably a redirect! New content url: ", last_message[][]) resp = self.transport.recv_filtered(filter_funcs.network_response_recieved_for_url(last_message[][], last_message[][]), tab_key=self.tab_id) resp = resp[] ctype = resp_response = resp[] if in resp_response: ctype = resp_response[] if in resp_response and in resp_response[]: ctype = resp_response[][].split(";")[0] return self.get_unpacked_response_body(last_message[][], mimetype=ctype)
If the chrome tab has internally redirected (generally because jerberscript), this will walk the page navigation responses and attempt to fetch the response body for the tab's latest location.
382,352
def values(prev, *keys, **kw): d = next(prev) if isinstance(d, dict): yield [d[k] for k in keys if k in d] for d in prev: yield [d[k] for k in keys if k in d] else: yield [d[i] for i in keys if 0 <= i < len(d)] for d in prev: yield [d[i] for i in keys if 0 <= i < len(d)]
values pipe extract value from previous pipe. If previous pipe send a dictionary to values pipe, keys should contains the key of dictionary which you want to get. If previous pipe send list or tuple, :param prev: The previous iterator of pipe. :type prev: Pipe :returns: generator
382,353
def render_request(self, sort=True): if not sort: return ("; ".join( cookie.render_request() for cookie in self.values())) return ("; ".join(sorted( cookie.render_request() for cookie in self.values())))
Render the dict's Cookie objects into a string formatted for HTTP request headers (simple 'Cookie: ' style).
382,354
def retrieve(pdb_id, cache_dir = None): pdb_contents = None xml_contents = None pdb_id = pdb_id.upper() if cache_dir: filename = os.path.join(cache_dir, "%s.pdb" % pdb_id) if os.path.exists(filename): pdb_contents = read_file(filename) filename = os.path.join(cache_dir, "%s.xml" % pdb_id) if os.path.exists(filename): xml_contents = read_file(filename) if not pdb_contents: pdb_contents = rcsb.retrieve_pdb(pdb_id) if cache_dir: write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents) if not xml_contents: xml_contents = rcsb.retrieve_xml(pdb_id) if cache_dir: write_file(os.path.join(cache_dir, "%s.xml" % pdb_id), xml_contents) return PDBML_slow(xml_contents, pdb_contents)
Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB.
382,355
def delete_tag(self, tag_name, **kwargs): resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name), **kwargs) resp.raise_for_status() return resp
delete a tag by name Args: tag_name (string): name of tag to delete
382,356
def usable_id(cls, id, datacenter=None): try: qry_id = int(id) except Exception: qry_id = cls.from_sysdisk(id) or cls.from_label(id, datacenter) if not qry_id: msg = % id cls.error(msg) return qry_id
Retrieve id from input which can be label or id.
382,357
def _random_ipv4_address_from_subnet(self, subnet, network=False): address = str( subnet[self.generator.random.randint( 0, subnet.num_addresses - 1, )], ) if network: address += + str(self.generator.random.randint( subnet.prefixlen, subnet.max_prefixlen, )) address = str(ip_network(address, strict=False)) return address
Produces a random IPv4 address or network with a valid CIDR from within a given subnet. :param subnet: IPv4Network to choose from within :param network: Return a network address, and not an IP address
382,358
def read_table(self, table_type): if table_type == : entry_class = MPQHashTableEntry elif table_type == : entry_class = MPQBlockTableEntry else: raise ValueError("Invalid table type.") table_offset = self.header[ % table_type] table_entries = self.header[ % table_type] key = self._hash( % table_type, ) self.file.seek(table_offset + self.header[]) data = self.file.read(table_entries * 16) data = self._decrypt(data, key) def unpack_entry(position): entry_data = data[position*16:position*16+16] return entry_class._make( struct.unpack(entry_class.struct_format, entry_data)) return [unpack_entry(i) for i in range(table_entries)]
Read either the hash or block table of a MPQ archive.
382,359
def mark_sacrificed(self,request,queryset): rows_updated = queryset.update(Alive=False, Death=datetime.date.today(), Cause_of_Death=) if rows_updated == 1: message_bit = "1 animal was" else: message_bit = "%s animals were" % rows_updated self.message_user(request, "%s successfully marked as sacrificed." % message_bit)
An admin action for marking several animals as sacrificed. This action sets the selected animals as Alive=False, Death=today and Cause_of_Death as sacrificed. To use other paramters, mice muse be individually marked as sacrificed. This admin action also shows as the output the number of mice sacrificed.
382,360
def _get_path_for_type(type_): if type_.lower() in CORE_TYPES: return Path( % type_.lower()) elif in type_: namespace, name = type_.split() return Path(, namespace, _get_file_name(name)) else: return Path(, _get_file_name(type_))
Similar to `_get_path_for` but for only type names.
382,361
def begin(self): if self.in_transaction: if self._auto_transaction: self._auto_transaction = False return self.commit() self.in_transaction = True for collection, store in self.stores.items(): store.begin() indexes = self.indexes[collection] for index in indexes.values(): index.begin()
Start a new transaction.
382,362
def create(graph, verbose=True): from turicreate._cython.cy_server import QuietProgress if not isinstance(graph, _SGraph): raise TypeError() with QuietProgress(verbose): params = _tc.extensions._toolkits.graph.degree_count.create( {: graph.__proxy__}) return DegreeCountingModel(params[])
Compute the in degree, out degree and total degree of each vertex. Parameters ---------- graph : SGraph The graph on which to compute degree counts. verbose : bool, optional If True, print progress updates. Returns ------- out : DegreeCountingModel Examples -------- If given an :class:`~turicreate.SGraph` ``g``, we can create a :class:`~turicreate.degree_counting.DegreeCountingModel` as follows: >>> g = turicreate.load_sgraph('http://snap.stanford.edu/data/web-Google.txt.gz', ... format='snap') >>> m = turicreate.degree_counting.create(g) >>> g2 = m['graph'] >>> g2 SGraph({'num_edges': 5105039, 'num_vertices': 875713}) Vertex Fields:['__id', 'in_degree', 'out_degree', 'total_degree'] Edge Fields:['__src_id', '__dst_id'] >>> g2.vertices.head(5) Columns: __id int in_degree int out_degree int total_degree int <BLANKLINE> Rows: 5 <BLANKLINE> Data: +------+-----------+------------+--------------+ | __id | in_degree | out_degree | total_degree | +------+-----------+------------+--------------+ | 5 | 15 | 7 | 22 | | 7 | 3 | 16 | 19 | | 8 | 1 | 2 | 3 | | 10 | 13 | 11 | 24 | | 27 | 19 | 16 | 35 | +------+-----------+------------+--------------+ See Also -------- DegreeCountingModel
382,363
def make_pdb(self): pdb_str = write_pdb( [self], if not self.ampal_parent else self.ampal_parent.id) return pdb_str
Generates a PDB string for the `Monomer`.
382,364
def checkGradient(self,h=1e-6,verbose=True): grad_an = self.LMLgrad() grad_num = {} params0 = self.params.copy() for key in list(self.params.keys()): paramsL = params0.copy() paramsR = params0.copy() grad_num[key] = SP.zeros_like(self.params[key]) e = SP.zeros(self.params[key].shape[0]) for i in range(self.params[key].shape[0]): e[i] = 1 paramsL[key]=params0[key]-h*e paramsR[key]=params0[key]+h*e lml_L = self.LML(paramsL) lml_R = self.LML(paramsR) grad_num[key][i] = (lml_R-lml_L)/(2*h) e[i] = 0 if verbose: print((%key)) print((abs(grad_an[key]-grad_num[key]))) print() self.setParams(params0)
utility function to check the gradient of the gp
382,365
def p_function_call_parameter(p): if len(p) == 2: p[0] = ast.Parameter(p[1], False, lineno=p.lineno(1)) else: p[0] = ast.Parameter(p[2], True, lineno=p.lineno(1))
function_call_parameter : expr | AND variable
382,366
def parent(idx, dim, axis=None): idxm = multi_index(idx, dim) if axis is None: axis = dim - numpy.argmin(1*(numpy.array(idxm)[::-1] == 0))-1 if not idx: return idx, axis if idxm[axis] == 0: idxi = parent(parent(idx, dim)[0], dim)[0] while child(idxi+1, dim, axis) < idx: idxi += 1 return idxi, axis out = numpy.array(idxm) - 1*(numpy.eye(dim)[axis]) return single_index(out), axis
Parent node according to Bertran's notation. Parameters ---------- idx : int Index of the child node. dim : int Dimensionality of the problem. axis : int Assume axis direction. Returns ------- out : int Index of parent node with `j<=i`, and `j==i` iff `i==0`. axis : int Dimension direction the parent was found.
382,367
def createListRecursively(self,args): resultList = [] dirDict = self.getDirectoryDictionary(args) for key in dirDict: for path,dirs,files in os.walk(key): for d in dirs: resultList.append(os.path.join(path,d)) for f in files: pattern = dirDict[key].split() if in pattern or os.path.splitext(f)[1] in pattern: resultList.append(os.path.join(path,f)) return list(set(resultList))
This is an internal method to create the list of input files (or directories) recursively, starting at the provided directory or directories.
382,368
def check_R_package(self, package): test_package = not bool(launch_R_script("{}/R_templates/test_import.R".format(os.path.dirname(os.path.realpath(__file__))), {"{package}": package}, verbose=True)) return test_package
Execute a subprocess to check the package's availability. Args: package (str): Name of the package to be tested. Returns: bool: `True` if the package is available, `False` otherwise
382,369
def extract_angular(fileobj, keywords, comment_tags, options): parser = AngularGettextHTMLParser() for line in fileobj: parser.feed(encodeutils.safe_decode(line)) for string in parser.strings: yield(string)
Extract messages from angular template (HTML) files. It extract messages from angular template (HTML) files that use angular-gettext translate directive as per https://angular-gettext.rocketeer.be/ :param fileobj: the file-like object the messages should be extracted from :param keywords: This is a standard parameter so it isaccepted but ignored. :param comment_tags: This is a standard parameter so it is accepted but ignored. :param options: Another standard parameter that is accepted but ignored. :return: an iterator over ``(lineno, funcname, message, comments)`` tuples :rtype: ``iterator``
382,370
def right_press(self, event): self.set_current() current = self.canvas.find_withtag("current") if current and current[0] in self.canvas.find_withtag("item"): self.current = current[0] self.item_menu.tk_popup(event.x_root, event.y_root) else: self.frame_menu.tk_popup(event.x_root, event.y_root)
Callback for the right mouse button event to pop up the correct menu. :param event: Tkinter event
382,371
def write(p_file, p_string): if not config().colors(p_file.isatty()): p_string = escape_ansi(p_string) if p_string: p_file.write(p_string + "\n")
Write p_string to file p_file, trailed by a newline character. ANSI codes are removed when the file is not a TTY (and colors are automatically determined).
382,372
def collect(self): if not os.access(self.PROC, os.R_OK): return False file = open(self.PROC, ) for line in file: if not line.startswith(): continue data = line.split() metric_name = metric_value = int(data[1]) metric_value = int(self.derivative( metric_name, long(metric_value), counter)) self.publish(metric_name, metric_value) for i in range(2, len(data)): metric_name = str(i - 2) metric_value = int(data[i]) metric_value = int(self.derivative( metric_name, long(metric_value), counter)) self.publish(metric_name, metric_value) file.close()
Collect interrupt data
382,373
def question_detail(request, topic_slug, slug): extra_context = { : Topic.objects.published().get(slug=topic_slug), } return object_detail(request, queryset=Question.objects.published(), extra_context=extra_context, template_object_name=, slug=slug)
A detail view of a Question. Templates: :template:`faq/question_detail.html` Context: question A :model:`faq.Question`. topic The :model:`faq.Topic` object related to ``question``.
382,374
def grep(query, directory): dir_contents = os.listdir(directory) results = [] for item in dir_contents: path = os.path.join(directory, item) if os.path.isdir(path): build_and_start(query, path) else: if item.endswith(): results.extend(grep_file(query, path)) return results
This function will search through the directory structure of the application and for each directory it finds it launches an Async task to run itself. For each .py file it finds, it actually greps the file and then returns the found output.
382,375
def plot_structures(self, structures, fontsize=6, **kwargs): import matplotlib.pyplot as plt nrows = len(structures) fig, axes = plt.subplots(nrows=nrows, ncols=1, sharex=True, squeeze=False) for i, (ax, structure) in enumerate(zip(axes.ravel(), structures)): self.get_plot(structure, fontsize=fontsize, ax=ax, with_labels=i == nrows - 1, **kwargs) spg_symbol, spg_number = structure.get_space_group_info() ax.set_title("{} {} ({}) ".format(structure.formula, spg_symbol, spg_number)) return fig
Plot diffraction patterns for multiple structures on the same figure. Args: structures (Structure): List of structures two_theta_range ([float of length 2]): Tuple for range of two_thetas to calculate in degrees. Defaults to (0, 90). Set to None if you want all diffracted beams within the limiting sphere of radius 2 / wavelength. annotate_peaks (bool): Whether to annotate the peaks with plane information. fontsize: (int) fontsize for peak labels.
382,376
def view_task_durations(token, dstore): task = token.split()[1] array = dstore[ + task][] return .join(map(str, array))
Display the raw task durations. Here is an example of usage:: $ oq show task_durations:classical
382,377
def range_interleave(ranges, sizes={}, empty=False): from jcvi.utils.iter import pairwise ranges = range_merge(ranges) interleaved_ranges = [] for ch, cranges in groupby(ranges, key=lambda x: x[0]): cranges = list(cranges) size = sizes.get(ch, None) if size: ch, astart, aend = cranges[0] if astart > 1: interleaved_ranges.append((ch, 1, astart - 1)) elif empty: interleaved_ranges.append(None) for a, b in pairwise(cranges): ch, astart, aend = a ch, bstart, bend = b istart, iend = aend + 1, bstart - 1 if istart <= iend: interleaved_ranges.append((ch, istart, iend)) elif empty: interleaved_ranges.append(None) if size: ch, astart, aend = cranges[-1] if aend < size: interleaved_ranges.append((ch, aend + 1, size)) elif empty: interleaved_ranges.append(None) return interleaved_ranges
Returns the ranges in between the given ranges. >>> ranges = [("1", 30, 40), ("1", 45, 50), ("1", 10, 30)] >>> range_interleave(ranges) [('1', 41, 44)] >>> ranges = [("1", 30, 40), ("1", 42, 50)] >>> range_interleave(ranges) [('1', 41, 41)] >>> range_interleave(ranges, sizes={"1": 70}) [('1', 1, 29), ('1', 41, 41), ('1', 51, 70)]
382,378
def body_block_paragraph_render(p_tag, html_flag=True, base_url=None): convert = lambda xml_string: xml_to_html(html_flag, xml_string, base_url) block_content_list = [] tag_content_content = [] nodenames = body_block_nodenames() paragraph_content = u for child_tag in p_tag: if child_tag.name is None or body_block_content(child_tag) == {}: paragraph_content = paragraph_content + unicode_value(child_tag) else: if paragraph_content.strip() != : tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) paragraph_content = u if child_tag.name is not None and body_block_content(child_tag) != {}: for block_content in body_block_content_render(child_tag, base_url=base_url): if block_content != {}: tag_content_content.append(block_content) if paragraph_content.strip() != : tag_content_content.append(body_block_paragraph_content(convert(paragraph_content))) if len(tag_content_content) > 0: for block_content in tag_content_content: block_content_list.append(block_content) return block_content_list
paragraphs may wrap some other body block content this is separated out so it can be called from more than one place
382,379
def _inherit_parent_kwargs(self, kwargs): if not self.parent or not self._is_dynamic: return kwargs if not in kwargs: return kwargs
Extract any necessary attributes from parent serializer to propagate down to child serializer.
382,380
def googlenet(pretrained=False, **kwargs): r if pretrained: if not in kwargs: kwargs[] = True if not in kwargs: kwargs[] = False if kwargs[]: warnings.warn( ) original_aux_logits = kwargs[] kwargs[] = True kwargs[] = False model = GoogLeNet(**kwargs) model.load_state_dict(model_zoo.load_url(model_urls[])) if not original_aux_logits: model.aux_logits = False del model.aux1, model.aux2 return model return GoogLeNet(**kwargs)
r"""GoogLeNet (Inception v1) model architecture from `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet aux_logits (bool): If True, adds two auxiliary branches that can improve training. Default: *False* when pretrained is True otherwise *True* transform_input (bool): If True, preprocesses the input according to the method with which it was trained on ImageNet. Default: *False*
382,381
def _sort_text(definition): prefix = if definition.name.startswith() else return prefix.format(definition.name)
Ensure builtins appear at the bottom. Description is of format <type>: <module>.<item>
382,382
def check_html(html_file, begin): sig = False for html_line in open(html_file).readlines(): uuu = pack_str(html_line).find() if uuu > 0: f_tmpl = html_line.strip().split()[-2].strip() curpath, curfile = os.path.split(html_file) ff_tmpl = os.path.abspath(os.path.join(curpath, f_tmpl)) if os.path.isfile(ff_tmpl): pass else: print( *10 + + *10) print() print( * 4 + html_file) print() print( * 4 +ff_tmpl) print() sys.exit(1) sig = True if sig: pass else: continue vvv = pack_str(html_line).find() if vvv > 0: test_fig = False for the_line in open(ff_tmpl).readlines(): if the_line.find(pack_str(html_line)) > 0: test_fig = True fff = ff_tmpl[begin:] sss = html_file[begin:] tmplsig = [fff, sss] if tmplsig in RELS_UNIQ_ARR: pass else: RELS_UNIQ_ARR.append(tmplsig) DOT_OBJ.edge(fff, sss) if test_fig: pass else: pass
Checking the HTML
382,383
def get_transformation(self, struct1, struct2): if self._primitive_cell: raise ValueError("get_transformation cannot be used with the " "primitive cell option") struct1, struct2 = self._process_species((struct1, struct2)) s1, s2, fu, s1_supercell = self._preprocess(struct1, struct2, False) ratio = fu if s1_supercell else 1/fu if s1_supercell and fu > 1: raise ValueError("Struct1 must be the supercell, " "not the other way around") if len(s1) * ratio >= len(s2): match = self._strict_match(s1, s2, fu=fu, s1_supercell=False, use_rms=True, break_on_match=False) if match is None: return None mapping = [list(match[4]).index(i) if i in match[4] else None for i in range(len(s1))] return match[2], match[3], mapping else: match = self._strict_match(s2, s1, fu=fu, s1_supercell=True, use_rms=True, break_on_match=False) if match is None: return None not_included = list(range(len(s2) * fu)) for i in match[4]: not_included.remove(i) mapping = list(match[4]) + not_included return match[2], -match[3], mapping
Returns the supercell transformation, fractional translation vector, and a mapping to transform struct2 to be similar to struct1. Args: struct1 (Structure): Reference structure struct2 (Structure): Structure to transform. Returns: supercell (numpy.ndarray(3, 3)): supercell matrix vector (numpy.ndarray(3)): fractional translation vector mapping (list(int or None)): The first len(struct1) items of the mapping vector are the indices of struct1's corresponding sites in struct2 (or None if there is no corresponding site), and the other items are the remaining site indices of struct2.
382,384
def compile_file(self, infile, outfile, outdated=False, force=False): myfile = codecs.open(outfile, , ) if settings.DEBUG: myfile.write(sass.compile(filename=infile)) else: myfile.write(sass.compile(filename=infile, output_style=)) return myfile.close()
Process sass file.
382,385
def get_task_runs(self, json_file=None): if self.project is None: raise ProjectError loader = create_task_runs_loader(self.project.id, self.tasks, json_file, self.all) self.task_runs, self.task_runs_file = loader.load() self._check_project_has_taskruns() self.task_runs_df = dataframer.create_task_run_data_frames(self.tasks, self.task_runs)
Load all project Task Runs from Tasks.
382,386
def count(y_true, y_score=None, countna=False): if not countna: return (~np.isnan(to_float(y_true))).sum() else: return len(y_true)
Counts the number of examples. If countna is False then only count labeled examples, i.e. those with y_true not NaN
382,387
def _update_secrets(self): token = self._required_get_and_update() self.dbx = Dropbox(token) try: self.account = self.dbx.users_get_current_account() except AuthError as err: bot.error() sys.exit(1)
update secrets will look for a dropbox token in the environment at SREGISTRY_DROPBOX_TOKEN and if found, create a client. If not, an error message is returned and the client exits.
382,388
def translate_to_international_phonetic_alphabet(self, hide_stress_mark=False): translations = self.stress.mark_ipa() if (not hide_stress_mark) and self.have_vowel else "" for phoneme in self._phoneme_list: translations += phoneme.ipa return translations
转换成国际音标。只要一个元音的时候需要隐藏重音标识 :param hide_stress_mark: :return:
382,389
def setXr(self, Xr): self.Xr = Xr self.gp_block.covar.G = Xr
set genotype data of the set component
382,390
def _get_free_gpu(max_gpu_utilization=40, min_free_memory=0.5, num_gpu=1): def get_gpu_info(): gpu_info = subprocess.check_output(["nvidia-smi", "--format=csv,noheader,nounits", "--query-gpu=index,memory.total,memory.free,memory.used,utilization.gpu"]).decode() gpu_info = gpu_info.split() gpu_info_array = [] for line in gpu_info: if len(line) > 0: gpu_id, total_memory, free_memory, used_memory, gpu_util = line.split() gpu_memory_util = float(used_memory) / float(total_memory) gpu_info_array.append((float(gpu_util), gpu_memory_util, gpu_id)) return(gpu_info_array) num_times_to_average = 5 current_array = [] for ind in range(num_times_to_average): current_array.append(get_gpu_info()) time.sleep(1) num_gpus = len(current_array[0]) avg_array = [(0, 0, str(x)) for x in range(num_gpus)] for ind in range(num_times_to_average): for gpu_ind in range(num_gpus): avg_array[gpu_ind] = (avg_array[gpu_ind][0] + current_array[ind][gpu_ind][0], avg_array[gpu_ind][1] + current_array[ind][gpu_ind][1], avg_array[gpu_ind][2]) for gpu_ind in range(num_gpus): avg_array[gpu_ind] = (float(avg_array[gpu_ind][0]) / num_times_to_average, float(avg_array[gpu_ind][1]) / num_times_to_average, avg_array[gpu_ind][2]) avg_array.sort() gpus_found = 0 gpus_to_use = "" free_memory = 1.0 for current_gpu in avg_array: if current_gpu[0] < max_gpu_utilization and (1 - current_gpu[1]) > min_free_memory: if gpus_found == 0: gpus_to_use = current_gpu[2] free_memory = 1 - current_gpu[1] else: gpus_to_use = gpus_to_use + "," + current_gpu[2] free_memory = min(free_memory, 1 - current_gpu[1]) gpus_found = gpus_found + 1 if gpus_found == num_gpu: break return gpus_to_use, free_memory
Get available GPUs according to utilization thresholds. Args: :max_gpu_utilization: percent utilization threshold to consider a GPU "free" :min_free_memory: percent free memory to consider a GPU "free" :num_gpu: number of requested GPUs Returns: A tuple of (available_gpus, minimum_free_memory), where available_gpus is a comma-delimited string of GPU ids, and minimum_free_memory is the lowest amount of free memory available on the available_gpus.
382,391
def _validate_client(self, request, data): client = self.get_client(data.get()) if client is None: raise OAuthError({ : , : _("An unauthorized client tried to access" " your resources.") }) form = self.get_request_form(client, data) if not form.is_valid(): raise OAuthError(form.errors) return client, form.cleaned_data
:return: ``tuple`` - ``(client or False, data or error)``
382,392
def blog_likes(self, blogname, **kwargs): url = "/v2/blog/{}/likes".format(blogname) return self.send_api_request("get", url, kwargs, [, , , ], True)
Gets the current given user's likes :param limit: an int, the number of likes you want returned (DEPRECATED) :param offset: an int, the like you want to start at, for pagination. :param before: an int, the timestamp for likes you want before. :param after: an int, the timestamp for likes you want after. # Start at the 20th like and get 20 more likes. client.blog_likes({'offset': 20, 'limit': 20}) :returns: A dict created from the JSON response
382,393
def update_preference_communication_channel_id(self, notification, communication_channel_id, notification_preferences_frequency): path = {} data = {} params = {} path["communication_channel_id"] = communication_channel_id path["notification"] = notification data["notification_preferences[frequency]"] = notification_preferences_frequency self.logger.debug("PUT /api/v1/users/self/communication_channels/{communication_channel_id}/notification_preferences/{notification} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/users/self/communication_channels/{communication_channel_id}/notification_preferences/{notification}".format(**path), data=data, params=params, no_data=True)
Update a preference. Change the preference for a single notification for a single communication channel
382,394
def _FormatSizeInUnitsOf1024(self, size): magnitude_1024 = 0 used_memory_1024 = float(size) while used_memory_1024 >= 1024: used_memory_1024 /= 1024 magnitude_1024 += 1 if 0 < magnitude_1024 <= 7: return .format( used_memory_1024, self._UNITS_1024[magnitude_1024]) return .format(size)
Represents a number of bytes in units of 1024. Args: size (int): size in bytes. Returns: str: human readable string of the size.
382,395
def offset(requestContext, seriesList, factor): for series in seriesList: series.name = "offset(%s,%g)" % (series.name, float(factor)) series.pathExpression = series.name for i, value in enumerate(series): if value is not None: series[i] = value + factor return seriesList
Takes one metric or a wildcard seriesList followed by a constant, and adds the constant to each datapoint. Example:: &target=offset(Server.instance01.threads.busy,10)
382,396
def read_reporting_revisions_get(self, project=None, fields=None, types=None, continuation_token=None, start_date_time=None, include_identity_ref=None, include_deleted=None, include_tag_ref=None, include_latest_only=None, expand=None, include_discussion_changes_only=None, max_page_size=None): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) query_parameters = {} if fields is not None: fields = ",".join(fields) query_parameters[] = self._serialize.query(, fields, ) if types is not None: types = ",".join(types) query_parameters[] = self._serialize.query(, types, ) if continuation_token is not None: query_parameters[] = self._serialize.query(, continuation_token, ) if start_date_time is not None: query_parameters[] = self._serialize.query(, start_date_time, ) if include_identity_ref is not None: query_parameters[] = self._serialize.query(, include_identity_ref, ) if include_deleted is not None: query_parameters[] = self._serialize.query(, include_deleted, ) if include_tag_ref is not None: query_parameters[] = self._serialize.query(, include_tag_ref, ) if include_latest_only is not None: query_parameters[] = self._serialize.query(, include_latest_only, ) if expand is not None: query_parameters[] = self._serialize.query(, expand, ) if include_discussion_changes_only is not None: query_parameters[] = self._serialize.query(, include_discussion_changes_only, ) if max_page_size is not None: query_parameters[] = self._serialize.query(, max_page_size, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters) return self._deserialize(, response)
ReadReportingRevisionsGet. Get a batch of work item revisions with the option of including deleted items :param str project: Project ID or project name :param [str] fields: A list of fields to return in work item revisions. Omit this parameter to get all reportable fields. :param [str] types: A list of types to filter the results to specific work item types. Omit this parameter to get work item revisions of all work item types. :param str continuation_token: Specifies the watermark to start the batch from. Omit this parameter to get the first batch of revisions. :param datetime start_date_time: Date/time to use as a starting point for revisions, all revisions will occur after this date/time. Cannot be used in conjunction with 'watermark' parameter. :param bool include_identity_ref: Return an identity reference instead of a string value for identity fields. :param bool include_deleted: Specify if the deleted item should be returned. :param bool include_tag_ref: Specify if the tag objects should be returned for System.Tags field. :param bool include_latest_only: Return only the latest revisions of work items, skipping all historical revisions :param str expand: Return all the fields in work item revisions, including long text fields which are not returned by default :param bool include_discussion_changes_only: Return only the those revisions of work items, where only history field was changed :param int max_page_size: The maximum number of results to return in this batch :rtype: :class:`<ReportingWorkItemRevisionsBatch> <azure.devops.v5_0.work_item_tracking.models.ReportingWorkItemRevisionsBatch>`
382,397
def set_options(self, options): for key, val in options.items(): key = key.lstrip() if hasattr(self, key): setattr(self, key, val)
Sets the given options as instance attributes (only if they are known). :parameters: options : Dict All known instance attributes and more if the childclass has defined them before this call. :rtype: None
382,398
def create_notification(self, notification_type, label=None, name=None, details=None): return self._notification_manager.create(notification_type, label=label, name=name, details=details)
Defines a notification for handling an alarm.
382,399
def num_equal(result, operator, comparator): if operator == : return len([x for x in result if x < comparator]) elif operator == : return len([x for x in result if x > comparator]) elif operator == : return len([x for x in result if x == comparator]) else: raise ValueError
Returns the number of elements in a list that pass a comparison :param result: The list of results of a dice roll :param operator: Operator in string to perform comparison on: Either '+', '-', or '*' :param comparator: The value to compare :return: