Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
15,400
def open(self, options): if self.opened: return self.opened = True log.debug(, self.id, self.ns[1], self.location ) result = self.locate() if result is None: if self.location is None: log.debug(, self.ns[1]) else: result = self.download(options) log.debug(, result) return result
Open and import the refrenced schema. @param options: An options dictionary. @type options: L{options.Options} @return: The referenced schema. @rtype: L{Schema}
15,401
def nodeInLanguageStem(_: Context, n: Node, s: ShExJ.LanguageStem) -> bool: return isinstance(s, ShExJ.Wildcard) or \ (isinstance(n, Literal) and n.language is not None and str(n.language).startswith(str(s)))
http://shex.io/shex-semantics/#values **nodeIn**: asserts that an RDF node n is equal to an RDF term s or is in a set defined by a :py:class:`ShExJ.IriStem`, :py:class:`LiteralStem` or :py:class:`LanguageStem`. The expression `nodeInLanguageStem(n, s)` is satisfied iff: #) `s` is a :py:class:`ShExJ.WildCard` or #) `n` is a language-tagged string and fn:starts-with(`n.language`, `s`)
15,402
def handle_option_error(error): if in error: raise exc.UnknownOption(error) elif in error: raise exc.InvalidOption(error) elif in error: raise exc.AmbiguousOption(error) else: raise exc.OptionError(error)
Raises exception if error in option command found. Purpose: As of tmux 2.4, there are now 3 different types of option errors: - unknown option - invalid option - ambiguous option Before 2.4, unknown option was the user. All errors raised will have the base error of :exc:`exc.OptionError`. So to catch any option error, use ``except exc.OptionError``. Parameters ---------- error : str Error response from subprocess call. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
15,403
def _factorize_array(values, na_sentinel=-1, size_hint=None, na_value=None): (hash_klass, _), values = _get_data_algo(values, _hashtables) table = hash_klass(size_hint or len(values)) uniques, labels = table.factorize(values, na_sentinel=na_sentinel, na_value=na_value) labels = ensure_platform_int(labels) return labels, uniques
Factorize an array-like to labels and uniques. This doesn't do any coercion of types or unboxing before factorization. Parameters ---------- values : ndarray na_sentinel : int, default -1 size_hint : int, optional Passsed through to the hashtable's 'get_labels' method na_value : object, optional A value in `values` to consider missing. Note: only use this parameter when you know that you don't have any values pandas would consider missing in the array (NaN for float data, iNaT for datetimes, etc.). Returns ------- labels, uniques : ndarray
15,404
def igetattr(self, name, context=None): try: return bases._infer_stmts(self.getattr(name, context), context, frame=self) except exceptions.AttributeInferenceError as error: raise exceptions.InferenceError( error.message, target=self, attribute=name, context=context ) from error
Inferred getattr, which returns an iterator of inferred statements.
15,405
def deserialize(data): try: module = import_module(data.get().get()) cls = getattr(module, data.get().get()) except ImportError: raise ImportError("No module named: %r" % data.get().get()) except AttributeError: raise ImportError("module %r does not contain class %r" % ( data.get().get(), data.get().get() )) class_params = cls.class_params(hidden=True) params = dict( (name, class_params[name].deserialize(value)) for (name, value) in data.get().items() ) return cls(**params)
Create instance from serial data
15,406
def setEventCallback(self, event, callback): if event not in EVENT_CALLBACK_SET: raise ValueError( % (event, )) self.__event_callback_dict[event] = callback
Set a function to call for a given event. event must be one of: TRANSFER_COMPLETED TRANSFER_ERROR TRANSFER_TIMED_OUT TRANSFER_CANCELLED TRANSFER_STALL TRANSFER_NO_DEVICE TRANSFER_OVERFLOW
15,407
def uuid4(self, cast_to=str): return cast_to(uuid.UUID(int=self.generator.random.getrandbits(128), version=4))
Generates a random UUID4 string. :param cast_to: Specify what type the UUID should be cast to. Default is `str` :type cast_to: callable
15,408
def normalize_linefeeds(self, a_string): newline = re.compile("(\r\r\r\n|\r\r\n|\r\n|\n\r)") a_string = newline.sub(self.RESPONSE_RETURN, a_string) if self.RESPONSE_RETURN == "\n": return re.sub("\r", self.RESPONSE_RETURN, a_string)
Convert `\r\r\n`,`\r\n`, `\n\r` to `\n.` :param a_string: A string that may have non-normalized line feeds i.e. output returned from device, or a device prompt :type a_string: str
15,409
def get_data(self, query, fields_convert_map, encoding=, auto_convert=True, include_hidden=False, header=None): fields_convert_map = fields_convert_map or {} d = self.fields_convert_map.copy() d.update(fields_convert_map) if isinstance(query, Select): query = do_(query) for record in query: self._cal_sum(record) row = [] record = self._get_record(record) if self.before_record_render: self.before_record_render(record) if isinstance(record, orm.Model): model = record.__class__ else: model = None for i, x in enumerate(self.table_info[]): field = get_field(x[], model) if not field: field = {:x[]} else: field = {:x[], :field} if not include_hidden and x.get(): continue if isinstance(record, orm.Model): v = make_view_field(field, record, fields_convert_map=d, auto_convert=auto_convert) else: v = make_view_field(field, record, fields_convert_map=d, auto_convert=auto_convert, value=record[x[]]) value = v[] row.append(value) if header: ret = dict(zip(header, row)) else: ret = row yield ret total = self._get_sum() if total: row = [] for x in total: v = x if isinstance(x, str): v = safe_unicode(x, encoding) row.append(v) if header: ret = dict(zip(header, row)) else: ret = row yield ret
If convert=True, will convert field value
15,410
def mcmc_emcee(self, n_walkers, n_run, n_burn, mean_start, sigma_start): sampler = emcee.EnsembleSampler(n_walkers, self.cosmoParam.numParam, self.chain.likelihood) p0 = emcee.utils.sample_ball(mean_start, sigma_start, n_walkers) new_pos, _, _, _ = sampler.run_mcmc(p0, n_burn) sampler.reset() store = InMemoryStorageUtil() for pos, prob, _, _ in sampler.sample(new_pos, iterations=n_run): store.persistSamplingValues(pos, prob, None) return store.samples
returns the mcmc analysis of the parameter space
15,411
def delete_pool(hostname, username, password, name): ret = {: name, : {}, : False, : } if __opts__[]: return _test_output(ret, , params={ : hostname, : username, : password, : name, } ) existing = __salt__[](hostname, username, password, name) if existing[] == 200: deleted = __salt__[](hostname, username, password, name) if deleted[] == 200: ret[] = True ret[] = ret[][] = existing[] ret[][] = {} else: ret = _load_result(deleted, ret) elif existing[] == 404: ret[] = True ret[] = ret[][] = {} ret[][] = {} else: ret = _load_result(existing, ret) return ret
Delete an existing pool. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool which will be deleted
15,412
def get_ansible_by_id(self, ansible_id): for elem in self.ansible_hosts: if elem.id == ansible_id: return elem return None
Return a ansible with that id or None.
15,413
def rightsibling(node): if node.parent: pchildren = node.parent.children idx = pchildren.index(node) try: return pchildren[idx + 1] except IndexError: return None else: return None
Return Right Sibling of `node`. >>> from anytree import Node >>> dan = Node("Dan") >>> jet = Node("Jet", parent=dan) >>> jan = Node("Jan", parent=dan) >>> joe = Node("Joe", parent=dan) >>> rightsibling(dan) >>> rightsibling(jet) Node('/Dan/Jan') >>> rightsibling(jan) Node('/Dan/Joe') >>> rightsibling(joe)
15,414
def _process_element(self, pos, e): tag, class_attr = _tag_and_class_attr(e) start_of_message = tag == and class_attr == and pos == end_of_thread = tag == and in class_attr and pos == if start_of_message and not self.messages_started: self.messages_started = True elif tag == "span" and pos == "end": if "user" in class_attr: self.current_sender = self.name_resolver.resolve(e.text) elif "meta" in class_attr: self.current_timestamp =\ parse_timestamp(e.text, self.use_utc, self.timezone_hints) elif tag == and pos == : if not self.current_text: self.current_text = e.text.strip() if e.text else elif tag == and pos == : self.current_text = .format(e.attrib[]) elif (start_of_message or end_of_thread) and self.messages_started: if not self.current_timestamp: raise UnsuitableParserError if not self.current_sender: if not self.no_sender_warning_status: sys.stderr.write( "\rWARNING: The sender was missing in one or more parsed messages. " "This is an error on FacebookUnknown', seq_num=self.seq_num) self.messages += [cm] self.seq_num -= 1 self.current_sender, self.current_timestamp, self.current_text = None, None, None return end_of_thread
Parses an incoming HTML element/node for data. pos -- the part of the element being parsed (start/end) e -- the element being parsed
15,415
def getWithPrompt(self): if self.prompt: pstring = self.prompt.split("\n")[0].strip() else: pstring = self.name if self.choice: schoice = list(map(self.toString, self.choice)) pstring = pstring + " (" + "|".join(schoice) + ")" elif self.min not in [None, INDEF] or \ self.max not in [None, INDEF]: pstring = pstring + " (" if self.min not in [None, INDEF]: pstring = pstring + self.toString(self.min) pstring = pstring + ":" if self.max not in [None, INDEF]: pstring = pstring + self.toString(self.max) pstring = pstring + ")" if self.value is not None: pstring = pstring + " (" + self.toString(self.value,quoted=1) + ")" pstring = pstring + ": " stdout = sys.__stdout__ try: if sys.stdout.isatty() or not stdout.isatty(): stdout = sys.stdout except AttributeError: pass stdin = sys.__stdin__ try: if sys.stdin.isatty() or not stdin.isatty(): stdin = sys.stdin except AttributeError: pass stdout.write(pstring) stdout.flush() ovalue = irafutils.tkreadline(stdin) value = ovalue.strip() while (1): try: if value == "": value = self._nullPrompt() self.set(value) if self.value is not None: return if ovalue == "": stdout.flush() raise EOFError("EOF on parameter prompt") print("Error: specify a value for the parameter") except ValueError as e: print(str(e)) stdout.write(pstring) stdout.flush() ovalue = irafutils.tkreadline(stdin) value = ovalue.strip()
Interactively prompt for parameter value
15,416
def set_rendering_intent(self, rendering_intent): if rendering_intent not in (None, PERCEPTUAL, RELATIVE_COLORIMETRIC, SATURATION, ABSOLUTE_COLORIMETRIC): raise FormatError() self.rendering_intent = rendering_intent
Set rendering intent variant for sRGB chunk
15,417
def get_position(self, dt): return self.sx + self.dx * dt, self.sy + self.dy * dt
Given dt in [0, 1], return the current position of the tile.
15,418
def porttree_matches(name): matches = [] for category in _porttree().dbapi.categories: if _porttree().dbapi.cp_list(category + "/" + name): matches.append(category + "/" + name) return matches
Returns a list containing the matches for a given package name from the portage tree. Note that the specific version of the package will not be provided for packages that have several versions in the portage tree, but rather the name of the package (i.e. "dev-python/paramiko").
15,419
def render(value): if not value: return r if value[0] != beginning: value = beginning + value if value[-1] != end: value += end return value
This function finishes the url pattern creation by adding starting character ^ end possibly by adding end character at the end :param value: naive URL value :return: raw string
15,420
def selected(self, request, tag): if self.option.selected: tag(selected=) return tag
Render a selected attribute on the given tag if the wrapped L{Option} instance is selected.
15,421
def create_groups(iam_client, groups): groups_data = [] if type(groups) != list: groups = [ groups ] for group in groups: errors = [] try: printInfo( % group) iam_client.create_group(GroupName = group) except Exception as e: if e.response[][] != : printException(e) errors.append() groups_data.append({: group, : errors}) return groups_data
Create a number of IAM group, silently handling exceptions when entity already exists . :param iam_client: AWS API client for IAM :param groups: Name of IAM groups to be created. :return: None
15,422
def _adjusted_script_code(self, script): s a redeem script thatt happen in practice Args: script (bytes): the spend script Returns: (bytes): the length-prepended script (if necessary) ' script_code = ByteData() if script[0] == len(script) - 1: return script script_code += VarInt(len(script)) script_code += script return script_code
Checks if the script code pased in to the sighash function is already length-prepended This will break if there's a redeem script that's just a pushdata That won't happen in practice Args: script (bytes): the spend script Returns: (bytes): the length-prepended script (if necessary)
15,423
def parse_model_group(path, group): context = FilePathContext(path) for reaction_id in group.get(, []): yield reaction_id for reaction_id in parse_model_group_list( context, group.get(, [])): yield reaction_id
Parse a structured model group as obtained from a YAML file Path can be given as a string or a context.
15,424
def _Decode(self, codec_name, data): try: return data.decode(codec_name, "replace") except LookupError: raise RuntimeError("Codec could not be found.") except AssertionError: raise RuntimeError("Codec failed to decode")
Decode data with the given codec name.
15,425
def writeList(self, register, data): self._idle() self._transaction_start() self._i2c_start() self._i2c_write_bytes([self._address_byte(False), register] + data) self._i2c_stop() response = self._transaction_end() self._verify_acks(response)
Write bytes to the specified register.
15,426
def dev(): env.roledefs = { : [], : [], } env.user = env.backends = env.roledefs[] env.server_name = env.short_server_name = env.static_folder = env.server_ip = env.no_shared_sessions = False env.server_ssl_on = False env.goal = env.socket_port = env.map_settings = {} execute(build_env)
Define dev stage
15,427
def backup_db( aws_access_key_id, aws_secret_access_key, bucket_name, s3_folder, database, mysql_host, mysql_port, db_user, db_pass, db_backups_dir, backup_aging_time): bucket = s3_bucket(aws_access_key_id, aws_secret_access_key, bucket_name) key = boto.s3.key.Key(bucket) bucketlist = bucket.list() pat = "[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]-[0-9][0-9]-[0-9][0-9]-[0-9][0-9]-%s.sql.bz2" % database sql_file = % (dt.now().strftime(TIMESTAMP_FORMAT), database) print( % (database, sql_file)) sql_full_target = os.path.join(db_backups_dir, sql_file) f = open(sql_full_target, "wb") cmd = % (mysql_host, mysql_port, db_user, db_pass, database) print(cmd) subprocess.call(cmd.split(), stdout=f) cmd = % sql_full_target print(cmd) subprocess.call(cmd.split()) sql_local_full_target = sql_full_target key.key = os.path.join(s3_folder, % sql_file) print( % (sql_file, key.key, dt.now())) try: key.set_contents_from_filename( % os.path.join(db_backups_dir, sql_full_target)) print( % (sql_local_full_target, dt.now())) finally: delete_expired_backups_in_bucket(bucket, bucketlist, pat, backup_aging_time=backup_aging_time) delete_local_db_backups(pat, db_backups_dir, backup_aging_time)
dumps databases into /backups, uploads to s3, deletes backups older than a month fab -f ./fabfile.py backup_dbs :param aws_access_key_id: :param aws_secret_access_key: :param bucket_name: :param database: :param mysql_host: :param mysql_port: :param db_pass: :param db_backups_dir: :param backup_aging_time: :return:
15,428
def breakRankTies(self, oldsym, newsym): stableSort = map(None, oldsym, newsym, range(len(oldsym))) stableSort.sort() lastOld, lastNew = None, None x = -1 for old, new, index in stableSort: if old != lastOld: x += 1 lastOld = old lastNew = new elif new != lastNew: x += 1 lastNew = new newsym[index] = x
break Ties to form a new list with the same integer ordering from high to low Example old = [ 4, 2, 4, 7, 8] (Two ties, 4 and 4) new = [60, 2 61,90,99] res = [ 4, 0, 3, 1, 2] * * This tie is broken in this case
15,429
def getAngle(self, mode=): if self.refresh is True: self.getMatrix() try: if self.mflag: if mode == : return self.bangle / np.pi * 180 else: return self.bangle else: return 0 except AttributeError: print("Please execute getMatrix() first.")
return bend angle :param mode: 'deg' or 'rad' :return: deflecting angle in RAD
15,430
def _ConvertDictToObject(self, json_dict): class_type = json_dict.get(, None) if class_type not in self._CLASS_TYPES: raise TypeError() del json_dict[] type_indicator = json_dict.get(, None) if type_indicator: del json_dict[] if in json_dict: json_dict[] = tuple(json_dict[]) return path_spec_factory.Factory.NewPathSpec(type_indicator, **json_dict)
Converts a JSON dict into a path specification object. The dictionary of the JSON serialized objects consists of: { '__type__': 'PathSpec' 'type_indicator': 'OS' 'parent': { ... } ... } Here '__type__' indicates the object base type in this case this should be 'PathSpec'. The rest of the elements of the dictionary make up the path specification object properties. Note that json_dict is a dict of dicts and the _ConvertDictToObject method will be called for every dict. That is how the path specification parent objects are created. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: PathSpec: a path specification. Raises: TypeError: if the JSON serialized object does not contain a '__type__' attribute that contains 'PathSpec'.
15,431
def most_similar(self, keyword, num): try: result = self.model.most_similar(keyword, topn = num) return {:keyword, :result, :1} except KeyError as e: kemKeyword = self.kemNgram.find(keyword) if kemKeyword: result = self.model.most_similar(kemKeyword, topn = num) return {:kemKeyword, :result, :self.kemNgram.compare(kemKeyword, keyword)} return {:keyword, :[], :0}
input: keyword term of top n output: keyword result in json formmat
15,432
def get_asset_notification_session(self, asset_receiver, proxy): if asset_receiver is None: raise NullArgument() if not self.supports_asset_notification(): raise Unimplemented() try: from . import sessions except ImportError: raise proxy = self._convert_proxy(proxy) try: session = sessions.AssetNotificationSession(asset_receiver, proxy, runtime=self._runtime) except AttributeError: raise return session
Gets the notification session for notifications pertaining to asset changes. arg: asset_receiver (osid.repository.AssetReceiver): the notification callback arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetNotificationSession) - an AssetNotificationSession raise: NullArgument - asset_receiver is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_notification() is false compliance: optional - This method must be implemented if supports_asset_notification() is true.
15,433
def get_set(self, flag, new): old = self._is_set(flag) if new is True: self._set(flag) elif new is False: self._clear(flag) return old
Return the boolean value of 'flag'. If 'new' is set, the flag is updated, and the value before update is returned.
15,434
def remove_trailing(needle, haystack): if haystack[-len(needle):] == needle: return haystack[:-len(needle)] return haystack
Remove trailing needle string (if exists). >>> remove_trailing('Test', 'ThisAndThatTest') 'ThisAndThat' >>> remove_trailing('Test', 'ArbitraryName') 'ArbitraryName'
15,435
def bibitems(self): bibitems = [] lines = self.text.split() for i, line in enumerate(lines): if line.lstrip().startswith(u): j = 1 while True: try: if (lines[i + j].startswith(u) is False) \ and (lines[i + j] != ): line += lines[i + j] elif "\end{document}" in lines[i + j]: break else: break except IndexError: break else: print line j += 1 print "finished", line bibitems.append(line) return bibitems
List of bibitem strings appearing in the document.
15,436
def ExtractEvents(self, parser_mediator, registry_key, **kwargs): values_dict = {} for registry_value in registry_key.GetValues(): if not registry_value.name or not registry_value.data: continue if registry_value.name == : self._ParseUpdateKeyValue( parser_mediator, registry_value, registry_key.path) else: values_dict[registry_value.name] = registry_value.GetDataAsObject() event_data = windows_events.WindowsRegistryEventData() event_data.key_path = registry_key.path event_data.offset = registry_key.offset event_data.regvalue = values_dict event_data.source_append = self._SOURCE_APPEND event_data.urls = self.URLS event = time_events.DateTimeValuesEvent( registry_key.last_written_time, definitions.TIME_DESCRIPTION_WRITTEN) parser_mediator.ProduceEventWithEventData(event, event_data)
Extracts events from a Windows Registry key. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
15,437
def _set_access_log(self, config, level): access_handler = self._get_param( , , config, , ) syslog_formatter = logging.Formatter( "ldapcherry[%(process)d]: %(message)s" ) if access_handler == : cherrypy.log.access_log.handlers = [] handler = logging.handlers.SysLogHandler( address=, facility=, ) handler.setFormatter(syslog_formatter) cherrypy.log.access_log.addHandler(handler) elif access_handler == : cherrypy.log.access_log.handlers = [] handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter( ) handler.setFormatter(formatter) cherrypy.log.access_log.addHandler(handler) elif access_handler == : pass elif access_handler == : cherrypy.log.access_log.handlers = [] handler = logging.NullHandler() cherrypy.log.access_log.addHandler(handler) cherrypy.log.access_log.setLevel(level)
Configure access logs
15,438
def classify(self, dataset, missing_value_action=): if (missing_value_action == ): missing_value_action = select_default_missing_value_policy(self, ) if isinstance(dataset, list): return self.__proxy__.fast_classify(dataset, missing_value_action) if isinstance(dataset, dict): return self.__proxy__.fast_classify([dataset], missing_value_action) _raise_error_if_not_sframe(dataset, "dataset") return self.__proxy__.classify(dataset, missing_value_action)
Return predictions for ``dataset``, using the trained supervised_learning model. Predictions are generated as class labels (0 or 1). Parameters ---------- dataset: SFrame Dataset of new observations. Must include columns with the same names as the features used for model training, but does not require a target column. Additional columns are ignored. missing_value_action: str, optional Action to perform when missing values are encountered. This can be one of: - 'auto': Choose model dependent missing value action - 'impute': Proceed with evaluation by filling in the missing values with the mean of the training data. Missing values are also imputed if an entire column of data is missing during evaluation. - 'error': Do not proceed with prediction and terminate with an error message. Returns ------- out : SFrame An SFrame with model predictions.
15,439
def integrate(self, rate, timestep): self._fast_normalise() rate = self._validate_number_sequence(rate, 3) rotation_vector = rate * timestep rotation_norm = np.linalg.norm(rotation_vector) if rotation_norm > 0: axis = rotation_vector / rotation_norm angle = rotation_norm q2 = Quaternion(axis=axis, angle=angle) self.q = (self * q2).q self._fast_normalise()
Advance a time varying quaternion to its value at a time `timestep` in the future. The Quaternion object will be modified to its future value. It is guaranteed to remain a unit quaternion. Params: rate: numpy 3-array (or array-like) describing rotation rates about the global x, y and z axes respectively. timestep: interval over which to integrate into the future. Assuming *now* is `T=0`, the integration occurs over the interval `T=0` to `T=timestep`. Smaller intervals are more accurate when `rate` changes over time. Note: The solution is closed form given the assumption that `rate` is constant over the interval of length `timestep`.
15,440
def tree_to_nodes(tree, context=None, metadata=None): nodes = [] for item in tree[]: if in item: sub_nodes = tree_to_nodes(item, context=context, metadata=metadata) if metadata is None: metadata = {} else: metadata = metadata.copy() for key in (, , , , ): if key in metadata: metadata.pop(key) for key in (, , ): if item.get(key): metadata[key] = item[key] if item[key] != : if key == : metadata[] = item[key] elif key == : metadata[] = item[key] titles = _title_overrides_from_tree(item) if item.get() is not None: tbinder = cnxepub.Binder(item.get(), sub_nodes, metadata=metadata, title_overrides=titles) else: tbinder = cnxepub.TranslucentBinder(sub_nodes, metadata=metadata, title_overrides=titles) nodes.append(tbinder) else: doc = document_factory(item[], context=context) for key in (, , ): if item.get(key): doc.metadata[key] = item[key] if key == : doc.metadata[] = item[key] elif key == : doc.metadata[] = item[key] nodes.append(doc) return nodes
Assembles ``tree`` nodes into object models. If ``context`` is supplied, it will be used to contextualize the contents of the nodes. Metadata will pass non-node identifying values down to child nodes, if not overridden (license, timestamps, etc)
15,441
def _grid_widgets(self): if self.__label: self._header_label.grid(row=0, column=1, columnspan=3, sticky="nw", padx=5, pady=(5, 0)) self._bold_button.grid(row=1, column=1, sticky="nswe", padx=5, pady=2) self._italic_button.grid(row=1, column=2, sticky="nswe", padx=(0, 5), pady=2) self._underline_button.grid(row=1, column=3, sticky="nswe", padx=(0, 5), pady=2) self._overstrike_button.grid(row=1, column=4, sticky="nswe", padx=(0, 5), pady=2)
Place the widgets in the correct positions :return: None
15,442
def set_mem_per_proc(self, mem_mb): super().set_mem_per_proc(mem_mb) self.qparams["mem_per_cpu"] = self.mem_per_proc
Set the memory per process in megabytes
15,443
def setup_seq_signals(self, ): log.debug("Setting up sequence page signals.") self.seq_prj_view_pb.clicked.connect(self.seq_view_prj) self.seq_shot_view_pb.clicked.connect(self.seq_view_shot) self.seq_shot_create_pb.clicked.connect(self.seq_create_shot) self.seq_desc_pte.textChanged.connect(self.seq_save)
Setup the signals for the sequence page :returns: None :rtype: None :raises: None
15,444
def get_file_from_s3(job, s3_url, encryption_key=None, per_file_encryption=True, write_to_jobstore=True): work_dir = job.fileStore.getLocalTempDir() parsed_url = urlparse(s3_url) if parsed_url.scheme == : download_url = + parsed_url.path elif parsed_url.scheme in (, ): download_url = s3_url else: raise RuntimeError( % s3_url) filename = .join([work_dir, os.path.basename(s3_url)]) download_call = [, , , ] if encryption_key: download_call.extend([, encryption_key]) if per_file_encryption: download_call.append() download_call.extend([download_url, filename]) attempt = 0 exception = while True: try: with open(work_dir + , ) as stderr_file: subprocess.check_call(download_call, stderr=stderr_file) except subprocess.CalledProcessError: with open(stderr_file.name) as stderr_file: for line in stderr_file: line = line.strip() if line: exception = line if exception.startswith(): exception = exception.split() if exception[-1].startswith(): raise RuntimeError( % s3_url) elif exception[-1].startswith(): raise RuntimeError( % s3_url) else: raise RuntimeError( % (.join(exception), s3_url)) elif exception.startswith(): exception = exception.split() if exception[-1].startswith(""): raise RuntimeError( % s3_url) else: raise RuntimeError( % (.join(exception), s3_url)) else: if attempt < 3: attempt += 1 continue else: raise RuntimeError( % s3_url) except OSError: raise RuntimeError() else: break finally: os.remove(stderr_file.name) assert os.path.exists(filename) if write_to_jobstore: filename = job.fileStore.writeGlobalFile(filename) return filename
Download a supplied URL that points to a file on Amazon S3. If the file is encrypted using sse-c (with the user-provided key or with a hash of the usesr provided master key) then the encryption keys will be used when downloading. The file is downloaded and written to the jobstore if requested. :param str s3_url: URL for the file (can be s3, S3 or https) :param str encryption_key: Path to the master key :param bool per_file_encryption: If encrypted, was the file encrypted using the per-file method? :param bool write_to_jobstore: Should the file be written to the job store? :return: Path to the downloaded file or fsID (if write_to_jobstore was True) :rtype: str|toil.fileStore.FileID
15,445
def gdal2np_dtype(b): dt_dict = gdal_array.codes if isinstance(b, str): b = gdal.Open(b) if isinstance(b, gdal.Dataset): b = b.GetRasterBand(1) if isinstance(b, gdal.Band): b = b.DataType if isinstance(b, int): np_dtype = dt_dict[b] else: np_dtype = None print("Input must be GDAL Dataset or RasterBand object") return np_dtype
Get NumPy datatype that corresponds with GDAL RasterBand datatype Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
15,446
def cluster_coincs(stat, time1, time2, timeslide_id, slide, window, argmax=numpy.argmax): logging.info( % window) if len(time1) == 0 or len(time2) == 0: logging.info() return numpy.array([]) if numpy.isfinite(slide): time = (time1 + time2 + timeslide_id * slide) / 2 else: time = 0.5 * (time2 + time1) tslide = timeslide_id.astype(numpy.float128) time = time.astype(numpy.float128) span = (time.max() - time.min()) + window * 10 time = time + span * tslide cidx = cluster_over_time(stat, time, window, argmax) return cidx
Cluster coincident events for each timeslide separately, across templates, based on the ranking statistic Parameters ---------- stat: numpy.ndarray vector of ranking values to maximize time1: numpy.ndarray first time vector time2: numpy.ndarray second time vector timeslide_id: numpy.ndarray vector that determines the timeslide offset slide: float length of the timeslides offset interval window: float length to cluster over Returns ------- cindex: numpy.ndarray The set of indices corresponding to the surviving coincidences.
15,447
def compat_string(value): if isinstance(value, bytes): return value.decode(encoding=) return str(value)
Provide a python2/3 compatible string representation of the value :type value: :rtype :
15,448
def chat_post_message(self, channel, text, **params): method = params.update({ : channel, : text, }) return self._make_request(method, params)
chat.postMessage This method posts a message to a channel. https://api.slack.com/methods/chat.postMessage
15,449
def redirect_legacy_content(request): routing_args = request.matchdict objid = routing_args[] objver = routing_args.get() filename = routing_args.get() id, version = _convert_legacy_id(objid, objver) if not id: raise httpexceptions.HTTPNotFound() headers=[("Cache-Control", "max-age=60, public")])
Redirect from legacy /content/id/version to new /contents/uuid@version. Handles collection context (book) as well.
15,450
def _transschema(x): if isinstance(x, tuple): return x.__class__(_transschema(x[0]), *x[1:]) elif isinstance(x, dict): return dict((_qualify_map(key, _transschema(val)) for key, val in x.iteritems())) elif isinstance(x, list): return map(_transschema, x) else: return x
Transform a schema, once loaded from its YAML representation, to its final internal representation
15,451
def make_context(self, info_name, args, parent=None, **extra): for key, value in iteritems(self.context_settings): if key not in extra: extra[key] = value ctx = Context(self, info_name=info_name, parent=parent, **extra) self.parse_args(ctx, args) return ctx
This function when given an info name and arguments will kick off the parsing and create a new :class:`Context`. It does not invoke the actual command callback though. :param info_name: the info name for this invokation. Generally this is the most descriptive name for the script or command. For the toplevel script it's usually the name of the script, for commands below it it's the name of the script. :param args: the arguments to parse as list of strings. :param parent: the parent context if available. :param extra: extra keyword arguments forwarded to the context constructor.
15,452
def H3(self): "Correlation." multiplied = np.dot(self.levels[:, np.newaxis] + 1, self.levels[np.newaxis] + 1) repeated = np.tile(multiplied[np.newaxis], (self.nobjects, 1, 1)) summed = (repeated * self.P).sum(2).sum(1) h3 = (summed - self.mux * self.muy) / (self.sigmax * self.sigmay) h3[np.isinf(h3)] = 0 return h3
Correlation.
15,453
def errors_to_json(errors): out = [] for e in errors: out.append({ "check": e[0], "message": e[1], "line": 1 + e[2], "column": 1 + e[3], "start": 1 + e[4], "end": 1 + e[5], "extent": e[6], "severity": e[7], "replacements": e[8], }) return json.dumps( dict(status="success", data={"errors": out}), sort_keys=True)
Convert the errors to JSON.
15,454
def normalize(symbol_string, strict=False): if isinstance(symbol_string, string_types): if not PY3: try: symbol_string = symbol_string.encode() except UnicodeEncodeError: raise ValueError("string should only contain ASCII characters") else: raise TypeError("string is of invalid type %s" % symbol_string.__class__.__name__) norm_string = symbol_string.replace(, ).translate(normalize_symbols).upper() if not valid_symbols.match(norm_string): raise ValueError("string contains invalid characters" % norm_string) if strict and norm_string != symbol_string: raise ValueError("string requires normalization" % symbol_string) return norm_string
Normalize an encoded symbol string. Normalization provides error correction and prepares the string for decoding. These transformations are applied: 1. Hyphens are removed 2. 'I', 'i', 'L' or 'l' are converted to '1' 3. 'O' or 'o' are converted to '0' 4. All characters are converted to uppercase A TypeError is raised if an invalid string type is provided. A ValueError is raised if the normalized string contains invalid characters. If the strict parameter is set to True, a ValueError is raised if any of the above transformations are applied. The normalized string is returned.
15,455
def decompress(obj, return_type="bytes"): if isinstance(obj, binary_type): b = zlib.decompress(obj) elif isinstance(obj, string_types): b = zlib.decompress(base64.b64decode(obj.encode("utf-8"))) else: raise TypeError("input cannot be anything other than str and bytes!") if return_type == "bytes": return b elif return_type == "str": return b.decode("utf-8") elif return_type == "obj": return pickle.loads(b) else: raise ValueError( " has to be one of , or !")
De-compress it to it's original. :param obj: Compressed object, could be bytes or str. :param return_type: if bytes, then return bytes; if str, then use base64.b64decode; if obj, then use pickle.loads return an object.
15,456
def status(self,verbose=0): self._update_status() self._group_report(self.running,) self._group_report(self.completed,) self._group_report(self.dead,) self._comp_report[:] = [] self._dead_report[:] = []
Print a status of all jobs currently being managed.
15,457
def created_by_column(self, obj): try: first_addition_logentry = admin.models.LogEntry.objects.filter( object_id=obj.pk, content_type_id=self._get_obj_ct(obj).pk, action_flag=admin.models.ADDITION, ).get() return first_addition_logentry.user except admin.models.LogEntry.DoesNotExist: return None
Return user who first created an item in Django admin
15,458
def delete_zone(server, token, domain): method = uri = + server + + domain connect.tonicdns_client(uri, method, token, data=False)
Delete specific zone. Argument: server: TonicDNS API server token: TonicDNS API authentication token domain: Specify domain name x-authentication-token: token
15,459
def how_many(self): if self.linkdates != []: if max(self.linkdates) <= list(time.localtime()): currentdate = max(self.linkdates) else: currentdate = list(time.localtime()) print(("This entry has its date set in the future. " "I will use your current local time as its date " "instead."), file=sys.stderr, flush=True) stop = sys.maxsize else: currentdate = [1, 1, 1, 0, 0] firstsync = self.retrieve_config(, ) if firstsync == : stop = sys.maxsize else: stop = int(firstsync) return currentdate, stop
Ascertain where to start downloading, and how many entries.
15,460
def common_vector_root(vec1, vec2): root = [] for v1, v2 in zip(vec1, vec2): if v1 == v2: root.append(v1) else: return root return root
Return common root of the two vectors. Args: vec1 (list/tuple): First vector. vec2 (list/tuple): Second vector. Usage example:: >>> common_vector_root([1, 2, 3, 4, 5], [1, 2, 8, 9, 0]) [1, 2] Returns: list: Common part of two vectors or blank list.
15,461
def radec2azel(ra_deg: float, dec_deg: float, lat_deg: float, lon_deg: float, time: datetime, usevallado: bool = False) -> Tuple[float, float]: if usevallado or Time is None: return vradec2azel(ra_deg, dec_deg, lat_deg, lon_deg, time) lat = np.atleast_1d(lat_deg) lon = np.atleast_1d(lon_deg) ra = np.atleast_1d(ra_deg) dec = np.atleast_1d(dec_deg) obs = EarthLocation(lat=lat * u.deg, lon=lon * u.deg) points = SkyCoord(Angle(ra, unit=u.deg), Angle(dec, unit=u.deg), equinox=) altaz = points.transform_to(AltAz(location=obs, obstime=Time(str2dt(time)))) return altaz.az.degree, altaz.alt.degree
sky coordinates (ra, dec) to viewing angle (az, el) Parameters ---------- ra_deg : float or numpy.ndarray of float ecliptic right ascension (degress) dec_deg : float or numpy.ndarray of float ecliptic declination (degrees) lat_deg : float observer latitude [-90, 90] lon_deg : float observer longitude [-180, 180] (degrees) time : datetime.datetime time of observation usevallado : bool, optional default use astropy. If true, use Vallado algorithm Returns ------- az_deg : float or numpy.ndarray of float azimuth [degrees clockwize from North] el_deg : float or numpy.ndarray of float elevation [degrees above horizon (neglecting aberration)]
15,462
def _func_addrs_from_prologues(self): regexes = list() for ins_regex in self.project.arch.function_prologs: r = re.compile(ins_regex) regexes.append(r) thumb_regexes = list() if hasattr(self.project.arch, ): for ins_regex in self.project.arch.thumb_prologs: r = re.compile(ins_regex) thumb_regexes.append(r) unassured_functions = [ ] for start_, bytes_ in self._binary.memory.backers(): for regex in regexes: for mo in regex.finditer(bytes_): position = mo.start() + start_ if position % self.project.arch.instruction_alignment == 0: mapped_position = AT.from_rva(position, self._binary).to_mva() if self._addr_in_exec_memory_regions(mapped_position): unassured_functions.append(mapped_position) for regex in thumb_regexes: for mo in regex.finditer(bytes_): position = mo.start() + start_ if position % self.project.arch.instruction_alignment == 0: mapped_position = AT.from_rva(position, self._binary).to_mva() if self._addr_in_exec_memory_regions(mapped_position): unassured_functions.append(mapped_position+1) l.info("Found %d functions with prologue scanning.", len(unassured_functions)) return unassured_functions
Scan the entire program image for function prologues, and start code scanning at those positions :return: A list of possible function addresses
15,463
def group_singles2array(input, **params): PARAM_FIELD_KEY = PARAM_FIELD_ARRAY = PARAM_FIELD_SINGLE = field_key = params.get(PARAM_FIELD_KEY) if PARAM_FIELD_KEY in params else None field_array = params.get(PARAM_FIELD_ARRAY) field_single = params.get(PARAM_FIELD_SINGLE) if not field_key: res = [] for item in input: res.append(item[field_single]) return {field_array: res} else: tdict = {} for row in input: if not row[field_key] in tdict: tdict.update({row[field_key]: [row[field_single]]}) else: tdict[row[field_key]].append(row[field_single]) res = [] for key, value in tdict.items(): res.append({field_key: key, field_array: value}) return res
Creates array of strings or ints from objects' fields :param input: list of objects :param params: :return: list
15,464
async def _on_receive_array(self, array): if array[0] == : pass else: wrapper = json.loads(array[0][])
Parse channel array and call the appropriate events.
15,465
def parse_object_type_definition(lexer: Lexer) -> ObjectTypeDefinitionNode: start = lexer.token description = parse_description(lexer) expect_keyword(lexer, "type") name = parse_name(lexer) interfaces = parse_implements_interfaces(lexer) directives = parse_directives(lexer, True) fields = parse_fields_definition(lexer) return ObjectTypeDefinitionNode( description=description, name=name, interfaces=interfaces, directives=directives, fields=fields, loc=loc(lexer, start), )
ObjectTypeDefinition
15,466
def migrate_case(adapter: MongoAdapter, scout_case: dict, archive_data: dict): collaborators = list(set(scout_case[] + archive_data[])) if collaborators != scout_case[]: LOG.info(f"set collaborators: {.join(collaborators)}") scout_case[] = collaborators if len(scout_case.get(, [])) == 0: scout_user = adapter.user(archive_data[]) if scout_user: scout_case[] = [archive_data[]] else: LOG.warning(f"{archive_data[]}: unable to find assigned user") for key in [, ]: scout_case[key] = scout_case.get(key, []) for archive_variant in archive_data[key]: variant_id = get_variantid(archive_variant, scout_case[]) scout_variant = adapter.variant(variant_id) if scout_variant: if scout_variant[] in scout_case[key]: LOG.info(f"{scout_variant[]}: variant already in {key}") else: LOG.info(f"{scout_variant[]}: add to {key}") scout_variant[key].append(scout_variant[]) else: LOG.warning(f"{scout_variant[]}: unable to find variant ({key})") scout_variant[key].append(variant_id) if not scout_case.get(): scout_case[] = archive_data[] scout_case[] = True adapter.case_collection.find_one_and_replace( {: scout_case[]}, scout_case, ) scout_institute = adapter.institute(scout_case[]) scout_user = adapter.user() for key in [, ]: for archive_term in archive_data[key]: adapter.add_phenotype( institute=scout_institute, case=scout_case, user=scout_user, link=f"/{scout_case[]}/{scout_case[]}", hpo_term=archive_term[], is_group=key == , )
Migrate case information from archive.
15,467
def select_dict(conn, query: str, params=None, name=None, itersize=5000): with conn.cursor(name, cursor_factory=RealDictCursor) as cursor: cursor.itersize = itersize cursor.execute(query, params) for result in cursor: yield result
Return a select statement's results as dictionary. Parameters ---------- conn : database connection query : select query string params : query parameters. name : server side cursor name. defaults to client side. itersize : number of records fetched by server.
15,468
def VGGFace(include_top=True, model=, weights=, input_tensor=None, input_shape=None, pooling=None, classes=None): if weights not in {, None}: raise ValueError( ) if model == : if classes is None: classes = 2622 if weights == and include_top and classes != 2622: raise ValueError( ) return VGG16(include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, weights=weights, classes=classes) if model == : if classes is None: classes = 8631 if weights == and include_top and classes != 8631: raise ValueError( ) return RESNET50(include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, weights=weights, classes=classes) if model == : if classes is None: classes = 8631 if weights == and include_top and classes != 8631: raise ValueError( ) return SENET50(include_top=include_top, input_tensor=input_tensor, input_shape=input_shape, pooling=pooling, weights=weights, classes=classes)
Instantiates the VGGFace architectures. Optionally loads weights pre-trained on VGGFace datasets. Note that when using TensorFlow, for best performance you should set `image_data_format="channels_last"` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The data format convention used by the model is the one specified in your Keras config file. # Arguments include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization) or "vggface" (pre-training on VGGFACE datasets). input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. model: selects the one of the available architectures vgg16, resnet50 or senet50 default is vgg16. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 244)` (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 48. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. # Returns A Keras model instance. # Raises ValueError: in case of invalid argument for `weights`, or invalid input shape.
15,469
def commit_and_quit(self): try: self.dev.rpc.commit_configuration() self.close_config() except Exception as err: print err
Commits and closes the currently open configration. Saves a step by not needing to manually close the config. Example: .. code-block:: python from pyJunosManager import JunosDevice dev = JunosDevice(host="1.2.3.4",username="root",password="Juniper") dev.open() dev.load_config_template("system{ host-name {{ hostname }};}",hostname="foo") dev commit_and_quit() dev.close()
15,470
def handle(self, line_info): normal_handler = self.prefilter_manager.get_handler_by_name() line = line_info.line except: raise return normal_handler.handle(line_info) else: return normal_handler.handle(line_info)
Try to get some help for the object. obj? or ?obj -> basic information. obj?? or ??obj -> more details.
15,471
def getServiceDependencies(self): calc = self.getCalculation() if calc: return calc.getCalculationDependencies(flat=True) return []
This methods returns a list with the analyses services dependencies. :return: a list of analysis services objects.
15,472
def list_plugins(path, user): * ret = [] resp = __salt__[](( ).format(path), runas=user) for line in resp.split()[1:]: ret.append(line.split()) return [plugin.__dict__ for plugin in map(_get_plugins, ret)]
List plugins in an installed wordpress path path path to wordpress install location user user to run the command as CLI Example: .. code-block:: bash salt '*' wordpress.list_plugins /var/www/html apache
15,473
def get_server_setting(settings, server=_DEFAULT_SERVER): *MaxRecipients ret = dict() if not settings: _LOG.warning() return ret with salt.utils.winapi.Com(): try: connection = wmi.WMI(namespace=_WMI_NAMESPACE) objs = connection.IIsSmtpServerSetting(settings, Name=server)[0] for setting in settings: ret[setting] = six.text_type(getattr(objs, setting)) except wmi.x_wmi as error: _LOG.error(, error.com_error) except (AttributeError, IndexError) as error: _LOG.error(, error) return ret
Get the value of the setting for the SMTP virtual server. :param str settings: A list of the setting names. :param str server: The SMTP server name. :return: A dictionary of the provided settings and their values. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_smtp_server.get_server_setting settings="['MaxRecipients']"
15,474
def init(): global ORG global LEXER global MEMORY global INITS global AUTORUN_ADDR global NAMESPACE ORG = 0 INITS = [] MEMORY = None AUTORUN_ADDR = None NAMESPACE = GLOBAL_NAMESPACE gl.has_errors = 0 gl.error_msg_cache.clear()
Initializes this module
15,475
def rule_operation(self, **kwargs): config = ET.Element("config") rule = ET.SubElement(config, "rule", xmlns="urn:brocade.com:mgmt:brocade-aaa") index_key = ET.SubElement(rule, "index") index_key.text = kwargs.pop() operation = ET.SubElement(rule, "operation") operation.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
15,476
def deferral(): deferred = [] defer = lambda f, *a, **k: deferred.append((f, a, k)) try: yield defer finally: while deferred: f, a, k = deferred.pop() f(*a, **k)
Defers a function call when it is being required like Go. :: with deferral() as defer: sys.setprofile(f) defer(sys.setprofile, None) # do something.
15,477
def url(self, name): key = blobstore.create_gs_key( + name) return images.get_serving_url(key)
Ask blobstore api for an url to directly serve the file
15,478
def append(self, name, value): with self.pipe as pipe: return pipe.append(self.redis_key(name), self.valueparse.encode(value))
Appends the string ``value`` to the value at ``key``. If ``key`` doesn't already exist, create it with a value of ``value``. Returns the new length of the value at ``key``. :param name: str the name of the redis key :param value: str :return: Future()
15,479
def addworkdays(self, date, offset): date = parsefun(date) if offset == 0: return date if offset > 0: direction = 1 idx_offset = Calendar._idx_offsetnext idx_next = Calendar._idx_nextworkday idx_offset_other = Calendar._idx_offsetprev idx_next_other = Calendar._idx_prevworkday else: direction = -1 idx_offset = Calendar._idx_offsetprev idx_next = Calendar._idx_prevworkday idx_offset_other = Calendar._idx_offsetnext idx_next_other = Calendar._idx_nextworkday weekdaymap = self.weekdaymap datewk = date.weekday() if not weekdaymap[datewk].isworkday: date += datetime.timedelta(days=\ weekdaymap[datewk][idx_offset_other]) datewk = weekdaymap[datewk][idx_next_other] nw, nd = divmod(abs(offset), len(self.workdays)) ndays = nw * 7 while nd > 0: ndays += abs(weekdaymap[datewk][idx_offset]) datewk = weekdaymap[datewk][idx_next] nd -= 1 date += datetime.timedelta(days=ndays*direction) return date
Add work days to a given date, ignoring holidays. Note: By definition, a zero offset causes the function to return the initial date, even it is not a work date. An offset of 1 represents the next work date, regardless of date being a work date or not. Args: date (date, datetime or str): Date to be incremented. offset (integer): Number of work days to add. Positive values move the date forward and negative values move the date back. Returns: datetime: New incremented date.
15,480
def write_patch_file(self, patch_file, lines_to_write): with open(patch_file, ) as f: f.writelines(lines_to_write)
Write lines_to_write to a the file called patch_file :param patch_file: file name of the patch to generate :param lines_to_write: lines to write to the file - they should be \n terminated :type lines_to_write: list[str] :return: None
15,481
def backwardeuler(dfun, xzero, timerange, timestep): return zip(*list(BackwardEuler(dfun, xzero, timerange, timestep)))
Backward Euler method integration. This function wraps BackwardEuler. :param dfun: Derivative function of the system. The differential system arranged as a series of first-order equations: \dot{X} = dfun(t, x) :param xzero: The initial condition of the system. :param vzero: The initial condition of first derivative of the system. :param timerange: The start and end times as (starttime, endtime). :param timestep: The timestep. :param convergencethreshold: Each step requires an iterative solution of an implicit equation. This is the threshold of convergence. :param maxiterations: Maximum iterations of the implicit equation before raising an exception. :returns: t, x: as lists.
15,482
async def call_async(self, method_name: str, *args, rpc_timeout: float = None, **kwargs): if rpc_timeout is None: rpc_timeout = self.rpc_timeout if rpc_timeout: try: return await asyncio.wait_for(self._call_async(method_name, *args, **kwargs), timeout=rpc_timeout) except asyncio.TimeoutError: raise TimeoutError(f"Timeout on client {self.endpoint}, method name {method_name}, class info: {self}") else: return await self._call_async(method_name, *args, **kwargs)
Send JSON RPC request to a backend socket and receive reply (asynchronously) :param method_name: Method name :param args: Args that will be passed to the remote function :param float rpc_timeout: Timeout in seconds for Server response, set to None to disable the timeout :param kwargs: Keyword args that will be passed to the remote function
15,483
def set_rotation(self, rotation): self._rotation = np.radians(rotation) self._polar.set_theta_offset(self._rotation + np.pi / 2.0) self.transData.invalidate() self.transAxes.invalidate() self._set_lim_and_transforms()
Set the rotation of the stereonet in degrees clockwise from North.
15,484
def get_features_by_ids(self, ids=None, threshold=0.0001, func=np.mean, get_weights=False): weights = self.data.ix[ids].apply(func, 0) above_thresh = weights[weights >= threshold] return above_thresh if get_weights else list(above_thresh.index)
Returns features for which the mean loading across all specified studies (in ids) is >= threshold.
15,485
def sigPerms(s): codes = if not s: yield elif s[0] in codes: start = codes.index(s[0]) for x in codes[start:]: for y in sigPerms(s[1:]): yield x + y elif s[0] == : for y in sigPerms(s[1:]): yield + y else: yield s
Generate all possible signatures derived by upcasting the given signature.
15,486
def locateChild(self, context, segments): shortcut = getattr(self, + segments[0], None) if shortcut: res = shortcut(context) if res is not None: return res, segments[1:] req = IRequest(context) for plg in self.siteStore.powerupsFor(ISessionlessSiteRootPlugin): spr = getattr(plg, , None) if spr is not None: childAndSegments = spr(req, segments) else: childAndSegments = plg.resourceFactory(segments) if childAndSegments is not None: return childAndSegments return self.guardedRoot.locateChild(context, segments)
Return a statically defined child or a child defined by a sessionless site root plugin or an avatar from guard.
15,487
def stubs_clustering(network,use_reduced_coordinates=True, line_length_factor=1.0): busmap = busmap_by_stubs(network) if use_reduced_coordinates: network.buses.loc[busmap.index,[,]] = network.buses.loc[busmap,[,]].values return get_clustering_from_busmap(network, busmap, line_length_factor=line_length_factor)
Cluster network by reducing stubs and stubby trees (i.e. sequentially reducing dead-ends). Parameters ---------- network : pypsa.Network use_reduced_coordinates : boolean If True, do not average clusters, but take from busmap. line_length_factor : float Factor to multiply the crow-flies distance between new buses in order to get new line lengths. Returns ------- Clustering : named tuple A named tuple containing network, busmap and linemap
15,488
def _findAssociatedConfigSpecFile(self, cfgFileName): retval = "."+os.sep+self.__taskName+".cfgspc" if os.path.isfile(retval): return retval retval = os.path.dirname(cfgFileName)+os.sep+self.__taskName+".cfgspc" if os.path.isfile(retval): return retval retval = self.getDefaultSaveFilename()+ if os.path.isfile(retval): return retval if self.__assocPkg is not None: x, theFile = findCfgFileForPkg(None, , pkgObj = self.__assocPkg, taskName = self.__taskName) return theFile x, theFile = findCfgFileForPkg(self.__taskName, , taskName = self.__taskName) if os.path.exists(theFile): return theFile raise NoCfgFileError(+ \ self.__taskName+)
Given a config file, find its associated config-spec file, and return the full pathname of the file.
15,489
def importaccount(ctx, account, role): from peerplaysbase.account import PasswordKey password = click.prompt("Account Passphrase", hide_input=True) account = Account(account, peerplays_instance=ctx.peerplays) imported = False if role == "owner": owner_key = PasswordKey(account["name"], password, role="owner") owner_pubkey = format( owner_key.get_public_key(), ctx.peerplays.rpc.chain_params["prefix"] ) if owner_pubkey in [x[0] for x in account["owner"]["key_auths"]]: click.echo("Importing owner key!") owner_privkey = owner_key.get_private_key() ctx.peerplays.wallet.addPrivateKey(owner_privkey) imported = True if role == "active": active_key = PasswordKey(account["name"], password, role="active") active_pubkey = format( active_key.get_public_key(), ctx.peerplays.rpc.chain_params["prefix"] ) if active_pubkey in [x[0] for x in account["active"]["key_auths"]]: click.echo("Importing active key!") active_privkey = active_key.get_private_key() ctx.peerplays.wallet.addPrivateKey(active_privkey) imported = True if role == "memo": memo_key = PasswordKey(account["name"], password, role=role) memo_pubkey = format( memo_key.get_public_key(), ctx.peerplays.rpc.chain_params["prefix"] ) if memo_pubkey == account["memo_key"]: click.echo("Importing memo key!") memo_privkey = memo_key.get_private_key() ctx.peerplays.wallet.addPrivateKey(memo_privkey) imported = True if not imported: click.echo("No matching key(s) found. Password correct?")
Import an account using an account password
15,490
def load_batch(self, fn_batch): inverse = list(zip(*fn_batch)) feat_fn_batch = inverse[0] target_fn_batch = inverse[1] batch_inputs, batch_inputs_lens = utils.load_batch_x(feat_fn_batch, flatten=False) batch_targets_list = [] for targets_path in target_fn_batch: with open(targets_path, encoding=ENCODING) as targets_f: target_indices = self.corpus.labels_to_indices(targets_f.readline().split()) batch_targets_list.append(target_indices) batch_targets = utils.target_list_to_sparse_tensor(batch_targets_list) return batch_inputs, batch_inputs_lens, batch_targets
Loads a batch with the given prefixes. The prefixes is the full path to the training example minus the extension.
15,491
def check_req(req): if not isinstance(req, Requirement): return None info = get_package_info(req.name) newest_version = _get_newest_version(info) if _is_pinned(req) and _is_version_range(req): return None current_spec = next(iter(req.specifier)) if req.specifier else None current_version = current_spec.version if current_spec else None if current_version != newest_version: return req.name, current_version, newest_version
Checks if a given req is the latest version available.
15,492
def detect_c3_function_shadowing(contract): results = {} for i in range(0, len(contract.immediate_inheritance) - 1): inherited_contract1 = contract.immediate_inheritance[i] for function1 in inherited_contract1.functions_and_modifiers: if function1.full_name in results or function1.is_constructor or not function1.is_implemented: continue functions_matching = [(inherited_contract1, function1)] already_processed = set([function1]) for x in range(i + 1, len(contract.immediate_inheritance)): inherited_contract2 = contract.immediate_inheritance[x] for function2 in inherited_contract2.functions_and_modifiers: if function2 in already_processed or function2.is_constructor or not function2.is_implemented: continue if function1.full_name == function2.full_name: functions_matching.append((inherited_contract2, function2)) already_processed.add(function2) if len(functions_matching) > 1: results[function1.full_name] = functions_matching return list(results.values())
Detects and obtains functions which are indirectly shadowed via multiple inheritance by C3 linearization properties, despite not directly inheriting from each other. :param contract: The contract to check for potential C3 linearization shadowing within. :return: A list of list of tuples: (contract, function), where each inner list describes colliding functions. The later elements in the inner list overshadow the earlier ones. The contract-function pair's function does not need to be defined in its paired contract, it may have been inherited within it.
15,493
def request_permissions(self, permissions): f = self.create_future() if self.api_level < 23: f.set_result({p: True for p in permissions}) return f w = self.widget request_code = self._permission_code self._permission_code += 1 if request_code == 0: w.setPermissionResultListener(w.getId()) w.onRequestPermissionsResult.connect(self._on_permission_result) def on_results(code, perms, results): f.set_result({p: r == Activity.PERMISSION_GRANTED for (p, r) in zip(perms, results)}) self._permission_requests[request_code] = on_results self.widget.requestPermissions(permissions, request_code) return f
Return a future that resolves with the results of the permission requests
15,494
def matrix(mat): import ROOT if isinstance(mat, (ROOT.TMatrixD, ROOT.TMatrixDSym)): return _librootnumpy.matrix_d(ROOT.AsCObject(mat)) elif isinstance(mat, (ROOT.TMatrixF, ROOT.TMatrixFSym)): return _librootnumpy.matrix_f(ROOT.AsCObject(mat)) raise TypeError( "unable to convert object of type {0} " "into a numpy matrix".format(type(mat)))
Convert a ROOT TMatrix into a NumPy matrix. Parameters ---------- mat : ROOT TMatrixT A ROOT TMatrixD or TMatrixF Returns ------- mat : numpy.matrix A NumPy matrix Examples -------- >>> from root_numpy import matrix >>> from ROOT import TMatrixD >>> a = TMatrixD(4, 4) >>> a[1][2] = 2 >>> matrix(a) matrix([[ 0., 0., 0., 0.], [ 0., 0., 2., 0.], [ 0., 0., 0., 0.], [ 0., 0., 0., 0.]])
15,495
def build_query_fragment(query): root = etree.Element(, nsmap={None: }) text = etree.SubElement(root, ) text.text = etree.CDATA(query.strip()) return root
<query xmlns="http://basex.org/rest"> <text><![CDATA[ (//city/name)[position() <= 5] ]]></text> </query>
15,496
def add_before(self, pipeline): if not isinstance(pipeline, Pipeline): pipeline = Pipeline(pipeline) self.pipes = pipeline.pipes[:] + self.pipes[:] return self
Add a Pipeline to be applied before this processing pipeline. Arguments: pipeline: The Pipeline or callable to apply before this Pipeline.
15,497
def send_sms(self, text, **kw): params = { : self._user, : self._passwd, : text } kw.setdefault("verify", False) if not kw["verify"]: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) res = requests.get(FreeClient.BASE_URL, params=params, **kw) return FreeResponse(res.status_code)
Send an SMS. Since Free only allows us to send SMSes to ourselves you don't have to provide your phone number.
15,498
def add_uid(self, uid, selfsign=True, **prefs): uid._parent = self if selfsign: uid |= self.certify(uid, SignatureType.Positive_Cert, **prefs) self |= uid
Add a User ID to this key. :param uid: The user id to add :type uid: :py:obj:`~pgpy.PGPUID` :param selfsign: Whether or not to self-sign the user id before adding it :type selfsign: ``bool`` Valid optional keyword arguments are identical to those of self-signatures for :py:meth:`PGPKey.certify`. Any such keyword arguments are ignored if selfsign is ``False``
15,499
def Hakim_Steinberg_Stiel(T, Tc, Pc, omega, StielPolar=0): r Q = (0.1574 + 0.359*omega - 1.769*StielPolar - 13.69*StielPolar**2 - 0.510*omega**2 + 1.298*StielPolar*omega) m = (1.210 + 0.5385*omega - 14.61*StielPolar - 32.07*StielPolar**2 - 1.656*omega**2 + 22.03*StielPolar*omega) Tr = T/Tc Pc = Pc/101325. sigma = Pc**(2/3.)*Tc**(1/3.)*Q*((1 - Tr)/0.4)**m sigma = sigma/1000. return sigma
r'''Calculates air-water surface tension using the reference fluids methods of [1]_. .. math:: \sigma = 4.60104\times 10^{-7} P_c^{2/3}T_c^{1/3}Q_p \left(\frac{1-T_r}{0.4}\right)^m Q_p = 0.1574+0.359\omega-1.769\chi-13.69\chi^2-0.51\omega^2+1.298\omega\chi m = 1.21+0.5385\omega-14.61\chi-32.07\chi^2-1.65\omega^2+22.03\omega\chi Parameters ---------- T : float Temperature of fluid [K] Tc : float Critical temperature of fluid [K] Pc : float Critical pressure of fluid [Pa] omega : float Acentric factor for fluid, [-] StielPolar : float, optional Stiel Polar Factor, [-] Returns ------- sigma : float Liquid surface tension, N/m Notes ----- Original equation for m and Q are used. Internal units are atm and mN/m. Examples -------- 1-butanol, as compared to value in CRC Handbook of 0.02493. >>> Hakim_Steinberg_Stiel(298.15, 563.0, 4414000.0, 0.59, StielPolar=-0.07872) 0.021907902575190447 References ---------- .. [1] Hakim, D. I., David Steinberg, and L. I. Stiel. "Generalized Relationship for the Surface Tension of Polar Fluids." Industrial & Engineering Chemistry Fundamentals 10, no. 1 (February 1, 1971): 174-75. doi:10.1021/i160037a032.