Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
379,500
def process(self, batch, *args, **kwargs): if self.postprocessing is not None: batch = self.postprocessing(batch) return batch
Process a list of examples to create a batch. Postprocess the batch with user-provided Pipeline. Args: batch (list(object)): A list of object from a batch of examples. Returns: object: Processed object given the input and custom postprocessing Pipeline.
379,501
def bench(client, n): pairs = [(x, x + 1) for x in range(n)] started = time.time() for pair in pairs: res, err = client.call(, *pair) duration = time.time() - started print() util.print_stats(n, duration)
Benchmark n requests
379,502
def rvs(self, size=1, **kwargs): size = int(size) dtype = [(p, float) for p in self.params] arr = numpy.zeros(size, dtype=dtype) remaining = size keepidx = 0 while remaining: draws = self._draw(size=remaining, **kwargs) mask = self._constraints(draws) addpts = mask.sum() arr[keepidx:keepidx+addpts] = draws[mask] keepidx += addpts remaining = size - keepidx return arr
Returns random values for all of the parameters.
379,503
def is_valid_pid_for_create(did): if not d1_gmn.app.did.is_valid_pid_for_create(did): raise d1_common.types.exceptions.IdentifierNotUnique( 0, .format( d1_gmn.app.did.classify_identifier(did), did ), identifier=did, )
Assert that ``did`` can be used as a PID for creating a new object with MNStorage.create() or MNStorage.update().
379,504
def maybe_convert_objects(values, convert_dates=True, convert_numeric=True, convert_timedeltas=True, copy=True): if isinstance(values, (list, tuple)): values = np.array(values, dtype=np.object_) if not hasattr(values, ): values = np.array([values], dtype=np.object_) if convert_dates and values.dtype == np.object_: if convert_dates == : new_values = maybe_cast_to_datetime( values, , errors=) if not isna(new_values).all(): values = new_values else: values = lib.maybe_convert_objects(values, convert_datetime=convert_dates) if convert_timedeltas and values.dtype == np.object_: if convert_timedeltas == : from pandas.core.tools.timedeltas import to_timedelta new_values = to_timedelta(values, errors=) if not isna(new_values).all(): values = new_values else: values = lib.maybe_convert_objects( values, convert_timedelta=convert_timedeltas) if values.dtype == np.object_: if convert_numeric: try: new_values = lib.maybe_convert_numeric(values, set(), coerce_numeric=True) if not isna(new_values).all(): values = new_values except Exception: pass else: values = lib.maybe_convert_objects(values) values = values.copy() if copy else values return values
if we have an object dtype, try to coerce dates and/or numbers
379,505
def gps2_raw_encode(self, time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible, dgps_numch, dgps_age): return MAVLink_gps2_raw_message(time_usec, fix_type, lat, lon, alt, eph, epv, vel, cog, satellites_visible, dgps_numch, dgps_age)
Second GPS data. Coordinate frame is right-handed, Z-axis up (GPS frame). time_usec : Timestamp (microseconds since UNIX epoch or microseconds since system boot) (uint64_t) fix_type : See the GPS_FIX_TYPE enum. (uint8_t) lat : Latitude (WGS84), in degrees * 1E7 (int32_t) lon : Longitude (WGS84), in degrees * 1E7 (int32_t) alt : Altitude (AMSL, not WGS84), in meters * 1000 (positive for up) (int32_t) eph : GPS HDOP horizontal dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t) epv : GPS VDOP vertical dilution of position in cm (m*100). If unknown, set to: UINT16_MAX (uint16_t) vel : GPS ground speed (m/s * 100). If unknown, set to: UINT16_MAX (uint16_t) cog : Course over ground (NOT heading, but direction of movement) in degrees * 100, 0.0..359.99 degrees. If unknown, set to: UINT16_MAX (uint16_t) satellites_visible : Number of satellites visible. If unknown, set to 255 (uint8_t) dgps_numch : Number of DGPS satellites (uint8_t) dgps_age : Age of DGPS info (uint32_t)
379,506
def _from_binary_ea_info(cls, binary_stream): return cls(cls._REPR.unpack(binary_stream[:cls._REPR.size]))
See base class.
379,507
def update_cer(self, symbol, cer, account=None): assert isinstance( cer, Price ), "cer needs to be instance of `bitshares.price.Price`!" if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account, blockchain_instance=self) asset = Asset(symbol, blockchain_instance=self, full=True) assert ( asset["id"] == cer["base"]["asset"]["id"] or asset["id"] == cer["quote"]["asset"]["id"] ), "Price needs to contain the asset of the symbol you1.3.0'") options = asset["options"] options.update({"core_exchange_rate": cer.as_base(symbol).json()}) op = operations.Asset_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "issuer": account["id"], "asset_to_update": asset["id"], "new_options": options, "extensions": [], "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active")
Update the Core Exchange Rate (CER) of an asset :param str symbol: Symbol of the asset to publish feed for :param bitshares.price.Price cer: Core exchange Rate :param str account: (optional) the account to allow access to (defaults to ``default_account``)
379,508
def has_tag(self, model): for tag in model.tags: if self.is_tag(tag): return True return False
Does the given port have this tag?
379,509
def load_fasta_file_as_dict_of_seqrecords(filename): results = {} records = load_fasta_file(filename) for r in records: results[r.id] = r return results
Load a FASTA file and return the sequences as a dict of {ID: SeqRecord} Args: filename (str): Path to the FASTA file to load Returns: dict: Dictionary of IDs to their SeqRecords
379,510
def _create_buffer_control(self, editor_buffer): @Condition def preview_search(): return self.editor.incsearch input_processors = [ ), ReportingProcessor(editor_buffer), HighlightSelectionProcessor(), ConditionalProcessor( HighlightSearchProcessor(), Condition(lambda: self.editor.highlight_search)), ConditionalProcessor( HighlightIncrementalSearchProcessor(), Condition(lambda: self.editor.highlight_search) & preview_search), HighlightMatchingBracketProcessor(), DisplayMultipleCursors(), ] return BufferControl( lexer=DocumentLexer(editor_buffer), include_default_input_processors=False, input_processors=input_processors, buffer=editor_buffer.buffer, preview_search=preview_search, search_buffer_control=self.search_control, focus_on_click=True)
Create a new BufferControl for a given location.
379,511
def overlap_summary(self): olaps = self.compute_overlaps() table = [["5%: ",np.percentile(olaps,5)], ["25%: ",np.percentile(olaps,25)], ["50%: ",np.percentile(olaps,50)], ["75%: ",np.percentile(olaps,75)], ["95%: ",np.percentile(olaps,95)], [" " , " "], ["Min: ",np.min(olaps)], ["Mean: ",np.mean(olaps)], ["Max: ",np.max(olaps)]] header = ["Percentile","Overlap"] print tabulate(table,header,tablefmt="rst")
print summary of reconstruction overlaps
379,512
def get_names(file_dir, files): total_list = [] name_list = [] get_sub = False for path, subdir, dir_files in os.walk(file_dir): if not get_sub: total_list = subdir[:] get_sub = True else: break for user in total_list: has_file = True for f in files: file_path = file_dir + user + "/" + f + ".txt" if not os.path.exists(file_path): has_file = False break if has_file: name_list.append(user) if len(name_list) == 0: print("********Error: Cannot find any user who completes the files*************", file=ERROR_LOG) return name_list
Get the annotator name list based on a list of files Args: file_dir: AMR file folder files: a list of AMR names, e.g. nw_wsj_0001_1 Returns: a list of user names who annotate all the files
379,513
def _get_depthsr(self, goobj): if in self.gosubdag.prt_attr[]: return "R{R:02}".format(R=goobj.reldepth) return "D{D:02}".format(D=goobj.depth)
Return DNN or RNN depending on if relationships are loaded.
379,514
def read_from(self, provider, **options): for item in iter(self): if is_mixin(item): item.read_from(provider, **options)
All :class:`Pointer` fields in the `Sequence` read the necessary number of bytes from the data :class:`Provider` for their referenced :attr:`~Pointer.data` object. Null pointer are ignored. :param Provider provider: data :class:`Provider`. :keyword bool nested: if ``True`` all :class:`Pointer` fields in the :attr:`~Pointer.data` objects of all :class:`Pointer` fields in the `Sequence` reads their referenced :attr:`~Pointer.data` object as well (chained method call). Each :class:`Pointer` field stores the bytes for its referenced :attr:`~Pointer.data` object in its :attr:`~Pointer.bytestream`.
379,515
def build_columns(self, X, verbose=False): return sp.sparse.csc_matrix(np.ones((len(X), 1)))
construct the model matrix columns for the term Parameters ---------- X : array-like Input dataset with n rows verbose : bool whether to show warnings Returns ------- scipy sparse array with n rows
379,516
def cli(env, identifier, sortby, cpu, domain, hostname, memory, tag, columns): mgr = SoftLayer.DedicatedHostManager(env.client) guests = mgr.list_guests(host_id=identifier, cpus=cpu, hostname=hostname, domain=domain, memory=memory, tags=tag, mask=columns.mask()) table = formatting.Table(columns.columns) table.sortby = sortby for guest in guests: table.add_row([value or formatting.blank() for value in columns.row(guest)]) env.fout(table)
List guests which are in a dedicated host server.
379,517
def _generate_struct_class_h(self, struct): self._generate_init_imports_h(struct) self._generate_imports_h(self._get_imports_h(struct)) self.emit() self.emit() self.emit() self.emit() self.emit() self._generate_class_comment(struct) struct_name = fmt_class_prefix(struct) with self.block_h_from_data_type(struct, protocol=[, ]): self.emit() self.emit() self._generate_struct_properties(struct.fields) self.emit() self.emit() self._generate_struct_cstor_signature(struct) self._generate_struct_cstor_signature_default(struct) self._generate_init_unavailable_signature(struct) self.emit() self.emit() self.emit() self.emit() self.emit(comment_prefix) self.emit_wrapped_text( .format( fmt_class(struct.name)), prefix=comment_prefix) self.emit(comment_prefix) with self.block_h(fmt_serial_class(struct_name)): self._generate_serializer_signatures(struct_name) self.emit() self.emit() self.emit()
Defines an Obj C header file that represents a struct in Stone.
379,518
def next(self): if self.tokens: other = self.copy() tok = other.tokens.pop() other.tokens.append(tok.next()) return other else: return Version.inf
Return 'next' version. Eg, next(1.2) is 1.2_
379,519
def split_once(self, horizontal: bool, position: int) -> None: cdata = self._as_cdata() lib.TCOD_bsp_split_once(cdata, horizontal, position) self._unpack_bsp_tree(cdata)
Split this partition into 2 sub-partitions. Args: horizontal (bool): position (int):
379,520
def get_mor_by_property(service_instance, object_type, property_value, property_name=, container_ref=None): object_list = get_mors_with_properties(service_instance, object_type, property_list=[property_name], container_ref=container_ref) for obj in object_list: obj_id = six.text_type(obj.get(, )).strip("object'] return None
Returns the first managed object reference having the specified property value. service_instance The Service Instance from which to obtain managed object references. object_type The type of content for which to obtain managed object references. property_value The name of the property for which to obtain the managed object reference. property_name An object property used to return the specified object reference results. Defaults to ``name``. container_ref An optional reference to the managed object to search under. Can either be an object of type Folder, Datacenter, ComputeResource, Resource Pool or HostSystem. If not specified, default behaviour is to search under the inventory rootFolder.
379,521
def check_type_compatibility(type_1_id, type_2_id): errors = [] type_1 = db.DBSession.query(TemplateType).filter(TemplateType.id==type_1_id).options(joinedload_all()).one() type_2 = db.DBSession.query(TemplateType).filter(TemplateType.id==type_2_id).options(joinedload_all()).one() template_1_name = type_1.template.name template_2_name = type_2.template.name type_1_attrs=set([t.attr_id for t in type_1.typeattrs]) type_2_attrs=set([t.attr_id for t in type_2.typeattrs]) shared_attrs = type_1_attrs.intersection(type_2_attrs) if len(shared_attrs) == 0: return [] type_1_dict = {} for t in type_1.typeattrs: if t.attr_id in shared_attrs: type_1_dict[t.attr_id]=t for ta in type_2.typeattrs: type_2_unit_id = ta.unit_id type_1_unit_id = type_1_dict[ta.attr_id].unit_id fmt_dict = { : template_1_name, : template_2_name, : ta.attr.name, : type_1_unit_id, : type_2_unit_id, : type_1.name } if type_1_unit_id is None and type_2_unit_id is not None: errors.append("Type %(type_name)s in template %(template_1_name)s" " stores %(attr_name)s with no units, while template" "%(template_2_name)s stores it with unit %(type_2_unit_id)s"%fmt_dict) elif type_1_unit_id is not None and type_2_unit_id is None: errors.append("Type %(type_name)s in template %(template_1_name)s" " stores %(attr_name)s in %(type_1_unit_id)s." " Template %(template_2_name)s stores it with no unit."%fmt_dict) elif type_1_unit_id != type_2_unit_id: errors.append("Type %(type_name)s in template %(template_1_name)s" " stores %(attr_name)s in %(type_1_unit_id)s, while" " template %(template_2_name)s stores it in %(type_2_unit_id)s"%fmt_dict) return errors
When applying a type to a resource, it may be the case that the resource already has an attribute specified in the new type, but the template which defines this pre-existing attribute has a different unit specification to the new template. This function checks for any situations where different types specify the same attributes, but with different units.
379,522
def declareProvisioner(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
Update a provisioner Declare a provisioner, supplying some details about it. `declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are possessed. For example, a request to update the `aws-provisioner-v1` provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope `queue:declare-provisioner:aws-provisioner-v1#description`. The term "provisioner" is taken broadly to mean anything with a provisionerId. This does not necessarily mean there is an associated service performing any provisioning activity. This method takes input: ``v1/update-provisioner-request.json#`` This method gives output: ``v1/provisioner-response.json#`` This method is ``experimental``
379,523
def compile( self, scss_string=None, scss_file=None, source_files=None, super_selector=None, filename=None, is_sass=None, line_numbers=True, import_static_css=False): self.scss_vars = _default_scss_vars.copy() if self._scss_vars is not None: self.scss_vars.update(self._scss_vars) root_namespace = Namespace( variables=self.scss_vars, functions=self._library, ) search_paths = [] if self._search_paths is not None: assert not isinstance(self._search_paths, six.string_types), \ "`search_paths` should be an iterable, not a string" search_paths.extend(self._search_paths) else: if config.LOAD_PATHS: if isinstance(config.LOAD_PATHS, six.string_types): search_paths.extend(config.LOAD_PATHS.split()) else: search_paths.extend(config.LOAD_PATHS) search_paths.extend(self._scss_opts.get(, [])) output_style = self._scss_opts.get(, config.STYLE) if output_style is True: output_style = elif output_style is False: output_style = fixed_search_path = [] for path in search_paths: if isinstance(path, six.string_types): fixed_search_path.append(Path(path)) else: fixed_search_path.append(path) compiler = Compiler( namespace=root_namespace, extensions=[ CoreExtension, ExtraExtension, FontsExtension, CompassExtension, BootstrapExtension, ], search_path=fixed_search_path, import_static_css=import_static_css, live_errors=self.live_errors, generate_source_map=self._scss_opts.get(, False), output_style=output_style, warn_unused_imports=self._scss_opts.get(, False), ignore_parse_errors=config.DEBUG, loops_have_own_scopes=config.CONTROL_SCOPING, undefined_variables_fatal=config.FATAL_UNDEFINED, super_selector=super_selector or self.super_selector, ) compilation = compiler.make_compilation() if source_files is not None: for source in source_files: compilation.add_source(source) elif scss_string is not None: source = SourceFile.from_string( scss_string, relpath=filename, is_sass=is_sass, ) compilation.add_source(source) elif scss_file is not None: with open(scss_file, ) as f: source = SourceFile.from_file( f, relpath=filename or scss_file, is_sass=is_sass, ) compilation.add_source(source) if self._scss_files: for name, contents in list(self._scss_files.items()): source = SourceFile.from_string(contents, relpath=name) compilation.add_source(source) compiled = compiler.call_and_catch_errors(compilation.run) self.source_files = list(SourceFileTuple(*os.path.split(s.path)) for s in compilation.source_index.values()) return compiled
Compile Sass to CSS. Returns a single CSS string. This method is DEPRECATED; see :mod:`scss.compiler` instead.
379,524
def save(self, directory_path): if self.material: directory_path = os.path.expanduser(directory_path) file_path = os.path.join(directory_path, % self.name) if os.path.exists(file_path): raise BotoClientError( % file_path) fp = open(file_path, ) fp.write(self.material) fp.close() os.chmod(file_path, 0600) return True else: raise BotoClientError()
Save the material (the unencrypted PEM encoded RSA private key) of a newly created KeyPair to a local file. :type directory_path: string :param directory_path: The fully qualified path to the directory in which the keypair will be saved. The keypair file will be named using the name of the keypair as the base name and .pem for the file extension. If a file of that name already exists in the directory, an exception will be raised and the old file will not be overwritten. :rtype: bool :return: True if successful.
379,525
def setupFeatures(self): if self.featureWriters: featureFile = parseLayoutFeatures(self.ufo) for writer in self.featureWriters: writer.write(self.ufo, featureFile, compiler=self) self.features = featureFile.asFea() else: self.features = tounicode(self.ufo.features.text or "", "utf-8")
Make the features source. **This should not be called externally.** Subclasses may override this method to handle the file creation in a different way if desired.
379,526
def from_gff3(path, attributes=None, region=None, score_fill=-1, phase_fill=-1, attributes_fill=, dtype=None): a = gff3_to_recarray(path, attributes=attributes, region=region, score_fill=score_fill, phase_fill=phase_fill, attributes_fill=attributes_fill, dtype=dtype) if a is None: return None else: return FeatureTable(a, copy=False)
Read a feature table from a GFF3 format file. Parameters ---------- path : string File path. attributes : list of strings, optional List of columns to extract from the "attributes" field. region : string, optional Genome region to extract. If given, file must be position sorted, bgzipped and tabix indexed. Tabix must also be installed and on the system path. score_fill : int, optional Value to use where score field has a missing value. phase_fill : int, optional Value to use where phase field has a missing value. attributes_fill : object or list of objects, optional Value(s) to use where attribute field(s) have a missing value. dtype : numpy dtype, optional Manually specify a dtype. Returns ------- ft : FeatureTable
379,527
def get_platform_settings(): s = settings.PLATFORMS if hasattr(settings, ) and settings.FACEBOOK: s.append({ : , : settings.FACEBOOK, }) return s
Returns the content of `settings.PLATFORMS` with a twist. The platforms settings was created to stay compatible with the old way of declaring the FB configuration, in order not to break production bots. This function will convert the legacy configuration into the new configuration if required. As a result, it should be the only used way to access the platform configuration.
379,528
def colorbar(fig, ax, im, width=0.05, height=1.0, hoffset=0.01, voffset=0.0, orientation=): horizontalvertical rect = np.array(ax.get_position().bounds) rect = np.array(ax.get_position().bounds) caxrect = [0]*4 caxrect[0] = rect[0] + rect[2] + hoffset*rect[2] caxrect[1] = rect[1] + voffset*rect[3] caxrect[2] = rect[2]*width caxrect[3] = rect[3]*height cax = fig.add_axes(caxrect) cb = fig.colorbar(im, cax=cax, orientation=orientation) return cb
draw colorbar without resizing the axes object to make room kwargs: :: fig : matplotlib.figure.Figure ax : matplotlib.axes.AxesSubplot im : matplotlib.image.AxesImage width : float, colorbar width in fraction of ax width height : float, colorbar height in fraction of ax height hoffset : float, horizontal spacing to main axes in fraction of width voffset : float, vertical spacing to main axis in fraction of height orientation : str, 'horizontal' or 'vertical' return: :: object : colorbar handle
379,529
def GET(self): todos = model.get_todos() form = self.form() return render.index(todos, form)
Show page
379,530
def insert_func(self, index, func, *args, **kwargs): wraped_func = partial(func, *args, **kwargs) self.insert(index, wraped_func)
insert func with given arguments and keywords.
379,531
def _generate_response_head_bytes(status_code, headers): head_string = str(status_code) + _DELIMITER_NEWLINE header_tuples = sorted((k, headers[k]) for k in headers) for name, value in header_tuples: name = _get_header_correctly_cased(name) if _should_sign_response_header(name): head_string += _FORMAT_HEADER_STRING.format(name, value) return (head_string + _DELIMITER_NEWLINE).encode()
:type status_code: int :type headers: dict[str, str] :rtype: bytes
379,532
def add_properties(props, mol): if not props: return for _, atom in mol.atoms_iter(): atom.charge = 0 atom.multi = 1 atom.mass = None for prop in props.get("CHG", []): mol.atom(prop[0]).charge = prop[1] for prop in props.get("RAD", []): mol.atom(prop[0]).multi = prop[1] for prop in props.get("ISO", []): mol.atom(prop[0]).mass = prop[1]
apply properties to the molecule object Returns: None (alter molecule object directly)
379,533
def setedgeval(delta, is_multigraph, graph, orig, dest, idx, key, value): if is_multigraph(graph): if ( graph in delta and in delta[graph] and orig in delta[graph][] and dest in delta[graph][][orig] and idx in delta[graph][][orig][dest] and not delta[graph][][orig][dest][idx] ): return delta.setdefault(graph, {}).setdefault(, {})\ .setdefault(orig, {}).setdefault(dest, {})\ .setdefault(idx, {})[key] = value else: if ( graph in delta and in delta[graph] and orig in delta[graph][] and dest in delta[graph][][orig] and not delta[graph][][orig][dest] ): return delta.setdefault(graph, {}).setdefault(, {})\ .setdefault(orig, {}).setdefault(dest, {})[key] = value
Change a delta to say that an edge stat was set to a certain value
379,534
def retry(*r_args, **r_kwargs): def decorate(func): @functools.wraps(func) def wrapper(*f_args, **f_kwargs): bound = functools.partial(func, *f_args, **f_kwargs) return retry_call(bound, *r_args, **r_kwargs) return wrapper return decorate
Decorator wrapper for retry_call. Accepts arguments to retry_call except func and then returns a decorator for the decorated function. Ex: >>> @retry(retries=3) ... def my_func(a, b): ... "this is my funk" ... print(a, b) >>> my_func.__doc__ 'this is my funk'
379,535
def read_input_file(self, fn): fnfull = os.path.realpath(os.path.join(self.tex_input_directory, fn)) if self.strict_input: fn ) return if not os.path.exists(fnfull) and os.path.exists(fnfull + ): fnfull = fnfull + if not os.path.exists(fnfull) and os.path.exists(fnfull + ): fnfull = fnfull + if not os.path.isfile(fnfull): logger.warning(u"Error, file doesn%st access : %s", fn, e) return
This method may be overridden to implement a custom lookup mechanism when encountering ``\\input`` or ``\\include`` directives. The default implementation looks for a file of the given name relative to the directory set by :py:meth:`set_tex_input_directory()`. If `strict_input=True` was set, we ensure strictly that the file resides in a subtree of the reference input directory (after canonicalizing the paths and resolving all symlinks). You may override this method to obtain the input data in however way you see fit. (In that case, a call to `set_tex_input_directory()` may not be needed as that function simply sets properties which are used by the default implementation of `read_input_file()`.) This function accepts the referred filename as argument (the argument to the ``\\input`` macro), and should return a string with the file contents (or generate a warning or raise an error).
379,536
def file_can_be_read(path): if path is None: return False try: with io.open(path, "rb") as test_file: pass return True except (IOError, OSError): pass return False
Return ``True`` if the file at the given ``path`` can be read. :param string path: the file path :rtype: bool .. versionadded:: 1.4.0
379,537
def visitLexerRuleSpec(self, ctx: jsgParser.LexerRuleSpecContext): self._context.grammarelts[as_token(ctx)] = JSGLexerRuleBlock(self._context, ctx.lexerRuleBlock())
lexerRuleSpec: LEXER_ID COLON lexerRuleBlock SEMI
379,538
def translations_link(self): translation_type = ContentType.objects.get_for_model(Translation) link = urlresolvers.reverse( % ( translation_type.app_label, translation_type.model), ) object_type = ContentType.objects.get_for_model(self) link += % (object_type.id, self.id) return % link
Print on admin change list the link to see all translations for this object @type text: string @param text: a string with the html to link to the translations admin interface
379,539
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): C = self.COEFFS_SINTER[imt] mag = rup.mag if mag > 8.5: mag = 8.5 G = 10 ** (1.2 - 0.18 * mag) pga_rock = self._compute_mean(self.COEFFS_SINTER[PGA()], G, mag, rup.hypo_depth, dists.rrup, sites.vs30, np.zeros_like(sites.vs30) + 600, PGA()) pga_rock = 10 ** (pga_rock) if imt.period in (0.2, 0.4): C04 = self.COEFFS_SINTER[SA(period=0.4, damping=5.0)] C02 = self.COEFFS_SINTER[SA(period=0.2, damping=5.0)] mean04 = self._compute_mean(C04, G, mag, rup.hypo_depth, dists.rrup, sites.vs30, pga_rock, imt) mean02 = self._compute_mean(C02, G, mag, rup.hypo_depth, dists.rrup, sites.vs30, pga_rock, imt) if imt.period == 0.2: mean = 0.333 * mean02 + 0.667 * mean04 else: mean = 0.333 * mean04 + 0.667 * mean02 else: mean = self._compute_mean(C, G, mag, rup.hypo_depth, dists.rrup, sites.vs30, pga_rock, imt) mean = np.log((10 ** mean) * 1e-2 / g) if imt.period == 4.0: mean /= 0.550 stddevs = self._get_stddevs(C, stddev_types, sites.vs30.shape[0]) return mean, stddevs
See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values.
379,540
def disabledColor(self): palette = self.palette() return palette.color(palette.Disabled, palette.NodeBackground)
Returns the color this node should render when its disabled. :return <QColor>
379,541
def is_sqlatype_text_of_length_at_least( coltype: Union[TypeEngine, VisitableType], min_length: int = 1000) -> bool: coltype = _coltype_to_typeengine(coltype) if not isinstance(coltype, sqltypes.String): return False if coltype.length is None: return True return coltype.length >= min_length
Is the SQLAlchemy column type a string type that's at least the specified length?
379,542
def add_bundle(name, scripts=[], files=[], scriptsdir=SCRIPTSDIR, filesdir=FILESDIR): scriptmap = makemap(scripts, join(PATH, scriptsdir)) filemap = dict(zip(files, [join(PATH, filesdir, os.path.basename(f)) for f in files])) new_bundle(name, scriptmap, filemap)
High level, simplified interface for creating a bundle which takes the bundle name, a list of script file names in a common scripts directory, and a list of absolute target file paths, of which the basename is also located in a common files directory. It converts those lists into maps and then calls new_bundle() to actually create the Bundle and add it to BUNDLEMAP
379,543
def path_completer(text, expected=None, classes=None, perm_level=None, include_current_proj=False, typespec=None, visibility=None): s name should be a potential result :type include_current_proj: boolean :param visibility: Visibility with which to restrict the completion (one of "either", "visible", or "hidden") (default behavior is dependent on *text*) Returns a list of matches to the text and restricted by the requested parameters. :/projectt ever include if text != "" or expected == : results = dxpy.find_projects(describe=True, level=perm_level) if not include_current_proj: results = [r for r in results if r[] != dxpy.WORKSPACE_ID] matches += [escape_colon(r[][])+ for r in results if r[][].startswith(text)] if expected == : return matches if colon_pos < 0 and slash_pos >= 0: if dxpy.WORKSPACE_ID is not None: try: dxproj = dxpy.get_handler(dxpy.WORKSPACE_ID) folderpath, entity_name = clean_folder_path(text) matches += get_folder_matches(text, slash_pos, dxproj, folderpath) if expected != : if classes is not None: for classname in classes: matches += get_data_matches(text, slash_pos, dxproj, folderpath, classname=classname, typespec=typespec, visibility=visibility) else: matches += get_data_matches(text, slash_pos, dxproj, folderpath, typespec=typespec, visibility=visibility) except: pass else: try: proj_ids, folderpath, entity_name = resolve_path(text, multi_projects=True) except ResolutionError as details: sys.stderr.write("\n" + fill(str(details))) return matches for proj in proj_ids: try: dxproj = dxpy.get_handler(proj) matches += get_folder_matches(text, delim_pos, dxproj, folderpath) if expected != : if classes is not None: for classname in classes: matches += get_data_matches(text, delim_pos, dxproj, folderpath, classname=classname, typespec=typespec, visibility=visibility) else: matches += get_data_matches(text, delim_pos, dxproj, folderpath, typespec=typespec, visibility=visibility) except: pass return matches
:param text: String to tab-complete to a path matching the syntax project-name:folder/entity_or_folder_name :type text: string :param expected: "folder", "entity", "project", or None (no restriction) as to the types of answers to look for :type expected: string :param classes: if expected="entity", the possible data object classes that are acceptable :type classes: list of strings :param perm_level: the minimum permissions level required, e.g. "VIEW" or "CONTRIBUTE" :type perm_level: string :param include_current_proj: Indicate whether the current project's name should be a potential result :type include_current_proj: boolean :param visibility: Visibility with which to restrict the completion (one of "either", "visible", or "hidden") (default behavior is dependent on *text*) Returns a list of matches to the text and restricted by the requested parameters.
379,544
def fromPattern(cls, datetime_string, datetime_pattern, time_zone, require_hour=True): %Y-%m-%dT%H:%M:%S.%f%z%A, %B %d, %Y %I:%M:%S.%f%p title = dT_req = [[,],[,,],[],[,]] req_counter = 0 for i in dT_req: for j in i: if j in datetime_pattern: req_counter += 1 if not req_counter == 4 and require_hour: raise Exception( % title) try: get_tz = tz.gettz(time_zone) except: raise ValueError( % title) python_datetime = datetime.strptime(datetime_string, datetime_pattern) python_datetime = python_datetime.replace(tzinfo=tz.gettz(time_zone)) dT = python_datetime.astimezone(pytz.utc) dt_kwargs = { : dT.year, : dT.month, : dT.day, : dT.hour, : dT.minute, : dT.second, : dT.microsecond, : dT.tzinfo } return labDT(**dt_kwargs)
a method for constructing labDT from a strptime pattern in a string https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior iso_pattern: '%Y-%m-%dT%H:%M:%S.%f%z' human_friendly_pattern: '%A, %B %d, %Y %I:%M:%S.%f%p' :param datetime_string: string with date and time info :param datetime_pattern: string with python formatted pattern :param time_zone: string with timezone info :param require_hour: [optional] boolean to disable hour requirement :return: labDT object with datetime
379,545
def asDictionary(self): if self._wkid == None and self._wkt is not None: return {"wkt": self._wkt} else: return {"wkid": self._wkid}
returns the wkid id for use in json calls
379,546
def create_from_name_and_dictionary(self, name, datas): if "type" not in datas: str_type = "any" else: str_type = str(datas["type"]).lower() if str_type not in ObjectRaw.Types: type = ObjectRaw.Types("type") else: type = ObjectRaw.Types(str_type) if type is ObjectRaw.Types.object: object = ObjectObject() if "properties" in datas: object.properties = self.create_dictionary_of_element_from_dictionary("properties", datas) if "patternProperties" in datas: object.pattern_properties = self.create_dictionary_of_element_from_dictionary("patternProperties", datas) if "additionalProperties" in datas: if isinstance(datas["additionalProperties"], dict): object.additional_properties = self.create_from_name_and_dictionary("additionalProperties", datas["additionalProperties"]) elif not to_boolean(datas["additionalProperties"]): object.additional_properties = None else: raise ValueError("AdditionalProperties doe not allow empty value (yet)") elif type is ObjectRaw.Types.array: object = ObjectArray() if "items" in datas: object.items = self.create_from_name_and_dictionary("items", datas["items"]) else: object.items = ObjectObject() if "sample_count" in datas: object.sample_count = int(datas["sample_count"]) elif type is ObjectRaw.Types.number: object = ObjectNumber() elif type is ObjectRaw.Types.integer: object = ObjectInteger() elif type is ObjectRaw.Types.string: object = ObjectString() elif type is ObjectRaw.Types.boolean: object = ObjectBoolean() if "sample" in datas: object.sample = to_boolean(datas["sample"]) elif type is ObjectRaw.Types.reference: object = ObjectReference() if "reference" in datas: object.reference_name = str(datas["reference"]) elif type is ObjectRaw.Types.type: object = ObjectType() object.type_name = str(datas["type"]) elif type is ObjectRaw.Types.none: object = ObjectNone() elif type is ObjectRaw.Types.dynamic: object = ObjectDynamic() if "items" in datas: object.items = self.create_from_name_and_dictionary("items", datas["items"]) if "sample" in datas: if isinstance(datas["sample"], dict): object.sample = {} for k, v in datas["sample"].items(): object.sample[str(k)] = str(v) else: raise ValueError("A dictionnary is expected for dynamic\s object in \"%s\"" % name) elif type is ObjectRaw.Types.const: object = ObjectConst() if "const_type" in datas: const_type = str(datas["const_type"]) if const_type not in ObjectConst.Types: raise ValueError("Const type \"%s\" unknwon" % const_type) else: const_type = ObjectConst.Types.string object.const_type = const_type if "value" not in datas: raise ValueError("Missing const value") object.value = datas["value"] elif type is ObjectRaw.Types.enum: object = ObjectEnum() if "values" not in datas or not isinstance(datas[], list): raise ValueError("Missing enum values") object.values = [str(value) for value in datas["values"]] if "descriptions" in datas and isinstance(datas[], dict): for (value_name, value_description) in datas["descriptions"].items(): value = EnumValue() value.name = value_name value.description = value_description object.descriptions.append(value) descriptions = [description.name for description in object.descriptions] for value_name in [x for x in object.values if x not in descriptions]: value = EnumValue() value.name = value_name object.descriptions.append(value) else: object = ObjectRaw() self.set_common_datas(object, name, datas) if isinstance(object, Constraintable): self.set_constraints(object, datas) object.type = type if "optional" in datas: object.optional = to_boolean(datas["optional"]) return object
Return a populated object Object from dictionary datas
379,547
def _find_child(self, tag): tag = self._get_namespace_tag(tag) children = self._root.findall(tag) if len(children) > 1: raise WSDLParseError("Duplicate tag " % tag) if len(children) == 0: return None return children[0]
Find the child C{etree.Element} with the matching C{tag}. @raises L{WSDLParseError}: If more than one such elements are found.
379,548
def query(starttime, endtime, output=None, *filenames): if not output: output = (filenames[0].replace(,) + starttime.isoformat() + + endtime.isoformat() + ) else: output = output with open(output,) as outfile: for filename in filenames: log.info("pcap.query: processing %s..." % filename) with open(filename, ) as stream: for header, packet in stream: if packet is not None: if header.timestamp >= starttime and header.timestamp <= endtime: outfile.write(packet, header=header)
Given a time range and input file, query creates a new file with only that subset of data. If no outfile name is given, the new file name is the old file name with the time range appended. Args: starttime: The datetime of the beginning time range to be extracted from the files. endtime: The datetime of the end of the time range to be extracted from the files. output: Optional: The output file name. Defaults to [first filename in filenames][starttime]-[endtime].pcap filenames: A tuple of one or more file names to extract data from.
379,549
def get_features_all(self): features = {} all_vars = vars(self) for name in all_vars.keys(): if name in feature_names_list_all: features[name] = all_vars[name] features = OrderedDict(sorted(features.items(), key=lambda t: t[0])) return features
Return all features with its names. Regardless of being used for train and prediction. Sorted by the names. Returns ------- all_features : OrderedDict Features dictionary.
379,550
def show_instance(name, call=None): if call != : raise SaltCloudSystemExit( ) try: node = list_nodes_full()[name] except KeyError: log.debug(%s\, name) node = {} __utils__[](node, __active_provider_name__, __opts__) return node
Show the details from AzureARM concerning an instance
379,551
def _deriv_logaddexp2(x1, x2): y1 = np.exp2(x1) y2 = np.exp2(x2) df_dx1 = y1 / (y1 + y2) df_dx2 = y2 / (y1 + y2) return np.vstack([df_dx1, df_dx2]).T
The derivative of f(x, y) = log2(2^x + 2^y)
379,552
def velocity_graph(adata, basis=None, vkey=, which_graph=, n_neighbors=10, alpha=.8, perc=90, edge_width=.2, edge_color=, color=None, use_raw=None, layer=None, color_map=None, colorbar=True, palette=None, size=None, sort_order=True, groups=None, components=None, projection=, legend_loc=, legend_fontsize=None, legend_fontweight=None, right_margin=None, left_margin=None, xlabel=None, ylabel=None, title=None, fontsize=None, figsize=None, dpi=None, frameon=None, show=True, save=None, ax=None): basis = default_basis(adata) if basis is None else basis title = which_graph + if title is None else title scatter_kwargs = {"basis": basis, "perc": perc, "use_raw": use_raw, "sort_order": sort_order, "alpha": alpha, "components": components, "projection": projection, "legend_loc": legend_loc, "groups": groups, "legend_fontsize": legend_fontsize, "legend_fontweight": legend_fontweight, "palette": palette, "color_map": color_map, "frameon": frameon, "title": title, "xlabel": xlabel, "ylabel": ylabel, "right_margin": right_margin, "left_margin": left_margin, "colorbar": colorbar, "dpi": dpi, "fontsize": fontsize, "show": False, "save": None, "figsize": figsize, } ax = scatter(adata, layer=layer, color=color, size=size, ax=ax, zorder=0, **scatter_kwargs) from networkx import Graph, draw_networkx_edges if which_graph == : T = adata.uns[][] if perc is not None: threshold = np.percentile(T.data, perc) T.data[T.data < threshold] = 0 T.eliminate_zeros() else: T = transition_matrix(adata, vkey=vkey, weight_indirect_neighbors=0, n_neighbors=n_neighbors, perc=perc) with warnings.catch_warnings(): warnings.simplefilter("ignore") edges = draw_networkx_edges(Graph(T), adata.obsm[ + basis], width=edge_width, edge_color=edge_color, ax=ax) edges.set_zorder(-2) edges.set_rasterized(settings._vector_friendly) savefig_or_show( if basis is None else basis, dpi=dpi, save=save, show=show) if not show: return ax
\ Plot of the velocity graph. Arguments --------- adata: :class:`~anndata.AnnData` Annotated data matrix. vkey: `str` or `None` (default: `None`) Key for annotations of observations/cells or variables/genes. which_graph: `'velocity'` or `'neighbors'` (default: `'velocity'`) Whether to show transitions from velocity graph or connectivities from neighbors graph. {scatter} Returns ------- `matplotlib.Axis` if `show==False`
379,553
def _register_custom_filters(self): custom_filters = self.settings.get(, []) if not isinstance(custom_filters, list): raise KeyError("`CUSTOM_FILTERS` setting must be a list.") for filter_module_name in custom_filters: try: filter_module = import_module(filter_module_name) except ImportError as error: raise ImproperlyConfigured( "Failed to load custom filter module .\n" "Error was: {}".format(filter_module_name, error) ) try: filter_map = getattr(filter_module, ) if not isinstance(filter_map, dict): raise TypeError except (AttributeError, TypeError): raise ImproperlyConfigured( "Filter module does not define a dictionary".format(filter_module_name) ) self._environment.filters.update(filter_map)
Register any custom filter modules.
379,554
async def _download_photo(self, photo, file, date, thumb, progress_callback): if isinstance(photo, types.MessageMediaPhoto): photo = photo.photo if not isinstance(photo, types.Photo): return size = self._get_thumb(photo.sizes, thumb) if not size or isinstance(size, types.PhotoSizeEmpty): return file = self._get_proper_filename(file, , , date=date) if isinstance(size, (types.PhotoCachedSize, types.PhotoStrippedSize)): return self._download_cached_photo_size(size, file) result = await self.download_file( types.InputPhotoFileLocation( id=photo.id, access_hash=photo.access_hash, file_reference=photo.file_reference, thumb_size=size.type ), file, file_size=size.size, progress_callback=progress_callback ) return result if file is bytes else file
Specialized version of .download_media() for photos
379,555
def absent(name, vname=None, use_32bit_registry=False): rd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml : reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted. nameresultchangescommentreg.read_valuesuccessvdata(value not set)comment{0} is already absentKey{0}\{1}Entry{0}(Default)testresultchangesregWill removeresultreg.delete_valueresultchangescommentFailed to remove {0} from {1}changesregRemovedcommentRemoved {0} from {1}'.format(key, hive) return ret
r''' Ensure a registry value is removed. To remove a key use key_absent. Args: name (str): A string value representing the full path of the key to include the HIVE, Key, and all Subkeys. For example: ``HKEY_LOCAL_MACHINE\\SOFTWARE\\Salt`` Valid hive values include: - HKEY_CURRENT_USER or HKCU - HKEY_LOCAL_MACHINE or HKLM - HKEY_USERS or HKU vname (str): The name of the value you'd like to create beneath the Key. If this parameter is not passed it will assume you want to set the ``(Default)`` value use_32bit_registry (bool): Use the 32bit portion of the registry. Applies only to 64bit windows. 32bit Windows will ignore this parameter. Default is False. Returns: dict: A dictionary showing the results of the registry operation. CLI Example: .. code-block:: yaml 'HKEY_CURRENT_USER\\SOFTWARE\\Salt': reg.absent - vname: version In the above example the value named ``version`` will be removed from the SOFTWARE\\Salt key in the HKEY_CURRENT_USER hive. If ``vname`` was not passed, the ``(Default)`` value would be deleted.
379,556
def tomask(self, pores=None, throats=None): r if (pores is not None) and (throats is None): mask = self._tomask(element=, indices=pores) elif (throats is not None) and (pores is None): mask = self._tomask(element=, indices=throats) else: raise Exception() return mask
r""" Convert a list of pore or throat indices into a boolean mask of the correct length Parameters ---------- pores or throats : array_like List of pore or throat indices. Only one of these can be specified at a time, and the returned result will be of the corresponding length. Returns ------- A boolean mask of length Np or Nt with True in the specified pore or throat locations. See Also -------- toindices Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[5, 5, 5]) >>> mask = pn.tomask(pores=[0, 10, 20]) >>> sum(mask) # 3 non-zero elements exist in the mask (0, 10 and 20) 3 >>> len(mask) # Mask size is equal to the number of pores in network 125 >>> mask = pn.tomask(throats=[0, 10, 20]) >>> len(mask) # Mask is now equal to number of throats in network 300
379,557
def make_call_types(f, globals_d): arg_spec = getargspec(f) args = [k for k in arg_spec.args if k != "self"] defaults = {} if arg_spec.defaults: default_args = args[-len(arg_spec.defaults):] for a, default in zip(default_args, arg_spec.defaults): defaults[a] = default if not getattr(f, "__annotations__", None): annotations = make_annotations(f, globals_d) else: annotations = f.__annotations__ call_types = OrderedDict() for a in args: anno = anno_with_default(annotations[a], defaults.get(a, NO_DEFAULT)) assert isinstance(anno, Anno), \ "Argument %r has type %r which is not an Anno" % (a, anno) call_types[a] = anno return_type = anno_with_default(annotations.get("return", None)) if return_type is Any: return_type = Anno("Any return value", Any, "return") assert return_type is None or isinstance(return_type, Anno), \ "Return has type %r which is not an Anno" % (return_type,) return call_types, return_type
Make a call_types dictionary that describes what arguments to pass to f Args: f: The function to inspect for argument names (without self) globals_d: A dictionary of globals to lookup annotation definitions in
379,558
def _ensure_device_active(self, device): act = device.tm.cm.devices.device.load( name=get_device_info(device).name, partition=self.partition ) if act.failoverState != : msg = "A device in the cluster was not in the state." raise UnexpectedDeviceGroupState(msg)
Ensure a single device is in an active state :param device: ManagementRoot object -- device to inspect :raises: UnexpectedClusterState
379,559
def init(host=, port=1338): CONTROLLER.host = host CONTROLLER.port = port CONTROLLER.setDaemon(True) CONTROLLER.start()
Initialize PyMLGame. This creates a controller thread that listens for game controllers and events. :param host: Bind to this address :param port: Bind to this port :type host: str :type port: int
379,560
async def main(): client = Client(BMAS_ENDPOINT) tasks = [] print("\nCall bma.node.summary:") task = asyncio.ensure_future(client(bma.node.summary)) tasks.append(task) print("\nCall bma.blockchain.parameters:") task = asyncio.ensure_future(client(bma.blockchain.parameters)) tasks.append(task) responses = await asyncio.gather(*tasks) print("\nResponses:") print(responses) await client.close()
Main code (asynchronous requests) You can send one millions request with aiohttp : https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html But don't do that on one server, it's DDOS !
379,561
def dedent_description(pkg_info): description = pkg_info[] surrogates = False if not isinstance(description, str): surrogates = True description = pkginfo_unicode(pkg_info, ) description_lines = description.splitlines() description_dedent = .join( (description_lines[0].lstrip(), textwrap.dedent(.join(description_lines[1:])), )) if surrogates: description_dedent = description_dedent \ .encode("utf8") \ .decode("ascii", "surrogateescape") return description_dedent
Dedent and convert pkg_info['Description'] to Unicode.
379,562
def improvise_step(oracle, i, lrs=0, weight=None, prune=False): if prune: prune_list = range(i % prune, oracle.n_states - 1, prune) trn_link = [s + 1 for s in oracle.latent[oracle.data[i]] if (oracle.lrs[s] >= lrs and (s + 1) < oracle.n_states) and s in prune_list] else: trn_link = [s + 1 for s in oracle.latent[oracle.data[i]] if (oracle.lrs[s] >= lrs and (s + 1) < oracle.n_states)] if not trn_link: if i == oracle.n_states - 1: n = 1 else: n = i + 1 else: if weight == : lrs_link = [oracle.lrs[s] for s in oracle.latent[oracle.data[i]] if (oracle.lrs[s] >= lrs and (s + 1) < oracle.n_states)] lrs_pop = list(itertools.chain.from_iterable(itertools.chain.from_iterable( [[[i] * _x for (i, _x) in zip(trn_link, lrs_link)]]))) n = np.random.choice(lrs_pop) else: n = trn_link[int(np.floor(random.random() * len(trn_link)))] return n
Given the current time step, improvise (generate) the next time step based on the oracle structure. :param oracle: an indexed vmo object :param i: current improvisation time step :param lrs: the length of minimum longest repeated suffixes allowed to jump :param weight: if None, jump to possible candidate time step uniformly, if "lrs", the probability is proportional to the LRS of each candidate time step :param prune: whether to prune improvisation steps based on regular beat structure or not :return: the next time step
379,563
def find_closing_parenthesis(sql, startpos): pattern = re.compile(r) level = 0 opening = [] for match in pattern.finditer(sql, startpos): par = match.group() if par == : if level == 0: opening = match.start() level += 1 if par == : assert level > 0, "missing before " level -= 1 if level == 0: closing = match.end() return opening, closing
Find the pair of opening and closing parentheses. Starts search at the position startpos. Returns tuple of positions (opening, closing) if search succeeds, otherwise None.
379,564
def initial(self, request, *args, **kwargs): super(FlatMultipleModelMixin, self).initial(request, *args, **kwargs) assert not (self.sorting_field and self.sorting_fields), \ \ .format(self.__class__.__name__) if self.sorting_field: warnings.warn( , DeprecationWarning ) self.sorting_fields = [self.sorting_field] self._sorting_fields = self.sorting_fields
Overrides DRF's `initial` in order to set the `_sorting_field` from corresponding property in view. Protected property is required in order to support overriding of `sorting_field` via `@property`, we do this after original `initial` has been ran in order to make sure that view has all its properties set up.
379,565
def get_courses(self, **kwargs): return PaginatedList( Course, self.__requester, , , _kwargs=combine_kwargs(**kwargs) )
Return a list of active courses for the current user. :calls: `GET /api/v1/courses \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.course.Course`
379,566
def _add(self, name, *args, **kw): argname = list(self.argdict)[self._argno] if argname != name: raise NameError( % (name, argname)) self._group.add_argument(*args, **kw) self.all_arguments.append((args, kw)) self.names.append(name) self._argno += 1
Add an argument to the underlying parser and grow the list .all_arguments and the set .names
379,567
def format_file_params(files): files_payload = {} if files: for idx, filename in enumerate(files): files_payload["file[" + str(idx) + "]"] = open(filename, ) return files_payload
Utility method for formatting file parameters for transmission
379,568
def database(self, name=None): if name == self.current_database or name is None: return self.database_class(self.current_database, self) else: client_class = type(self) new_client = client_class( uri=self.uri, user=self.user, password=self.password, host=self.host, port=self.port, database=name, protocol=self.protocol, execution_type=self.execution_type, ) return self.database_class(name, new_client)
Connect to a database called `name`. Parameters ---------- name : str, optional The name of the database to connect to. If ``None``, return the database named ``self.current_database``. Returns ------- db : Database An :class:`ibis.client.Database` instance. Notes ----- This creates a new connection if `name` is both not ``None`` and not equal to the current database.
379,569
def _load_libcrypto(): if sys.platform.startswith(): return cdll.LoadLibrary(str()) elif getattr(sys, , False) and salt.utils.platform.is_smartos(): return cdll.LoadLibrary(glob.glob(os.path.join( os.path.dirname(sys.executable), ))[0]) else: lib = find_library() if not lib and sys.platform.startswith(): lib = find_library(, sys.maxsize > 2**32) if not lib and salt.utils.platform.is_sunos(): lib = glob.glob() + glob.glob() lib = lib[0] if lib else None if lib: return cdll.LoadLibrary(lib) raise OSError()
Load OpenSSL libcrypto
379,570
def delete(self, eid, token): final_headers = self.header final_headers[] = "Bearer {}".format(token) r = requests.delete(self.apiurl + "/library-entries/{}".format(eid), headers=final_headers) if r.status_code != 204: print(r.status_code) raise ConnectionError(r.text) return True
Delete a library entry. :param eid str: Entry ID :param token str: OAuth Token :return: True or ServerError :rtype: Bool or Exception
379,571
def _available_services(): available_services = dict() for launch_dir in _launchd_paths(): for root, dirs, files in salt.utils.path.os_walk(launch_dir): for filename in files: file_path = os.path.join(root, filename) true_path = os.path.realpath(file_path) if not os.path.exists(true_path): continue try: with salt.utils.files.fopen(file_path): plist = plistlib.readPlist( salt.utils.data.decode(true_path) ) except Exception: } except AttributeError: pass return available_services
Return a dictionary of all available services on the system
379,572
def str2et(time): if isinstance(time, list): return numpy.array([str2et(t) for t in time]) time = stypes.stringToCharP(time) et = ctypes.c_double() libspice.str2et_c(time, ctypes.byref(et)) return et.value
Convert a string representing an epoch to a double precision value representing the number of TDB seconds past the J2000 epoch corresponding to the input epoch. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/str2et_c.html :param time: A string representing an epoch. :type time: str :return: The equivalent value in seconds past J2000, TDB. :rtype: float
379,573
def send_command_response(self, source: list, command: str, *args, **kwargs): args = _json.dumps(args).encode() kwargs = _json.dumps(kwargs).encode() if isinstance(source, list): frame = (*source, b, command.encode(), args, kwargs) else: frame = (b, command.encode(), args, kwargs) if self._run_control_loop: self.add_callback(self.command_socket.send_multipart, frame) else: self.command_socket.send_multipart(frame)
Used in bot observer `on_next` method
379,574
def put_resource(self, resource): rtracker = self._get_tracker(resource) try: self._put(rtracker) except PoolFullError: self._remove(rtracker)
Adds a resource back to the pool or discards it if the pool is full. :param resource: A resource object. :raises UnknownResourceError: If resource was not made by the pool.
379,575
def _setup_ipc(self): log.debug() self.ctx = zmq.Context() self.pub = self.ctx.socket(zmq.PUSH) self.pub.connect(LST_IPC_URL) log.debug(, self.opts[]) try: self.pub.setsockopt(zmq.HWM, self.opts[]) except AttributeError: self.pub.setsockopt(zmq.SNDHWM, self.opts[])
Setup the listener ICP pusher.
379,576
def create_revert(self, revert_to_create, project, repository_id): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if repository_id is not None: route_values[] = self._serialize.url(, repository_id, ) content = self._serialize.body(revert_to_create, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, content=content) return self._deserialize(, response)
CreateRevert. [Preview API] Starts the operation to create a new branch which reverts changes introduced by either a specific commit or commits that are associated to a pull request. :param :class:`<GitAsyncRefOperationParameters> <azure.devops.v5_1.git.models.GitAsyncRefOperationParameters>` revert_to_create: :param str project: Project ID or project name :param str repository_id: ID of the repository. :rtype: :class:`<GitRevert> <azure.devops.v5_1.git.models.GitRevert>`
379,577
def fast_hash(self): fast = sum(i.fast_hash() for i in self.data.values()) return fast
Get a CRC32 or xxhash.xxh64 reflecting the DataStore. Returns ------------ hashed: int, checksum of data
379,578
def train_batch(self, batch_info, data, target): batch_info.optimizer.zero_grad() loss = self.feed_batch(batch_info, data, target) loss.backward() if self.max_grad_norm is not None: batch_info[] = torch.nn.utils.clip_grad_norm_( filter(lambda p: p.requires_grad, self.model.parameters()), max_norm=self.max_grad_norm ) batch_info.optimizer.step()
Train single batch of data
379,579
def surface_normal(self, param): if self.ndim == 1 and self.space_ndim == 2: return -perpendicular_vector(self.surface_deriv(param)) elif self.ndim == 2 and self.space_ndim == 3: deriv = self.surface_deriv(param) if deriv.ndim > 2: deriv = moveaxis(deriv, -2, 0) normal = np.cross(*deriv, axis=-1) normal /= np.linalg.norm(normal, axis=-1, keepdims=True) return normal else: raise NotImplementedError( .format(self.ndim, self.space_ndim))
Unit vector perpendicular to the detector surface at ``param``. The orientation is chosen as follows: - In 2D, the system ``(normal, tangent)`` should be right-handed. - In 3D, the system ``(tangent[0], tangent[1], normal)`` should be right-handed. Here, ``tangent`` is the return value of `surface_deriv` at ``param``. Parameters ---------- param : `array-like` or sequence Parameter value(s) at which to evaluate. If ``ndim >= 2``, a sequence of length `ndim` must be provided. Returns ------- normal : `numpy.ndarray` Unit vector(s) perpendicular to the detector surface at ``param``. If ``param`` is a single parameter, an array of shape ``(space_ndim,)`` representing a single vector is returned. Otherwise the shape of the returned array is - ``param.shape + (space_ndim,)`` if `ndim` is 1, - ``param.shape[:-1] + (space_ndim,)`` otherwise.
379,580
def get_tunnel_info_input_filter_type_filter_by_site_site_name(self, **kwargs): config = ET.Element("config") get_tunnel_info = ET.Element("get_tunnel_info") config = get_tunnel_info input = ET.SubElement(get_tunnel_info, "input") filter_type = ET.SubElement(input, "filter-type") filter_by_site = ET.SubElement(filter_type, "filter-by-site") site_name = ET.SubElement(filter_by_site, "site-name") site_name.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
379,581
def needsEncoding(self, s): if isinstance(s, str): for c in self.special: if c in s: return True return False
Get whether string I{s} contains special characters. @param s: A string to check. @type s: str @return: True if needs encoding. @rtype: boolean
379,582
def dump_to_path(self, cnf, filepath, **kwargs): with self.wopen(filepath) as out: out.write(self.dump_to_string(cnf, **kwargs))
Dump config 'cnf' to a file 'filepath'. :param cnf: Configuration data to dump :param filepath: Config file path :param kwargs: optional keyword parameters to be sanitized :: dict
379,583
def delete(self, file_, delete_file=True): image_file = ImageFile(file_) if delete_file: image_file.delete() default.kvstore.delete(image_file)
Deletes file_ references in Key Value store and optionally the file_ it self.
379,584
def determine_types(self): from nefertari.elasticsearch import ES collections = self.get_collections() resources = self.get_resources(collections) models = set([res.view.Model for res in resources]) es_models = [mdl for mdl in models if mdl and getattr(mdl, , False)] types = [ES.src2type(mdl.__name__) for mdl in es_models] return types
Determine ES type names from request data. In particular `request.matchdict['collections']` is used to determine types names. Its value is comma-separated sequence of collection names under which views have been registered.
379,585
def calc_fft_with_PyCUDA(Signal): print("starting fft") Signal = Signal.astype(_np.float32) Signal_gpu = _gpuarray.to_gpu(Signal) Signalfft_gpu = _gpuarray.empty(len(Signal)//2+1,_np.complex64) plan = _Plan(Signal.shape,_np.float32,_np.complex64) _fft(Signal_gpu, Signalfft_gpu, plan) Signalfft = Signalfft_gpu.get() Signalfft = _np.hstack((Signalfft,_np.conj(_np.flipud(Signalfft[1:len(Signal)//2])))) print("fft done") return Signalfft
Calculates the FFT of the passed signal by using the scikit-cuda libary which relies on PyCUDA Parameters ---------- Signal : ndarray Signal to be transformed into Fourier space Returns ------- Signalfft : ndarray Array containing the signal's FFT
379,586
def makeStylesheetResource(self, path, registry): return StylesheetRewritingResourceWrapper( File(path), self.installedOfferingNames, self.rootURL)
Return a resource for the css at the given path with its urls rewritten based on self.rootURL.
379,587
def update(self): if self._owner_changed: self.update_owner(self.owner) self._resources = [res.name for res in self.resources] return self.parent.update(self)
Update this function. :return: None
379,588
def _add_task(self, tile_address, coroutine): self.verify_calling_thread(True, "_add_task is not thread safe") if tile_address not in self._tasks: self._tasks[tile_address] = [] task = self._loop.create_task(coroutine) self._tasks[tile_address].append(task)
Add a task from within the event loop. All tasks are associated with a tile so that they can be cleanly stopped when that tile is reset.
379,589
def check(self, line_info): if not self.shell.automagic or not self.shell.find_magic(line_info.ifun): return None if line_info.continue_prompt and not self.prefilter_manager.multi_line_specials: return None head = line_info.ifun.split(,1)[0] if is_shadowed(head, self.shell): return None return self.prefilter_manager.get_handler_by_name()
If the ifun is magic, and automagic is on, run it. Note: normal, non-auto magic would already have been triggered via '%' in check_esc_chars. This just checks for automagic. Also, before triggering the magic handler, make sure that there is nothing in the user namespace which could shadow it.
379,590
def restore(delta, which): r try: tag = {1: "- ", 2: "+ "}[int(which)] except KeyError: raise ValueError, ( % which) prefixes = (" ", tag) for line in delta: if line[:2] in prefixes: yield line[2:]
r""" Generate one of the two sequences that generated a delta. Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract lines originating from file 1 or 2 (parameter `which`), stripping off line prefixes. Examples: >>> diff = ndiff('one\ntwo\nthree\n'.splitlines(1), ... 'ore\ntree\nemu\n'.splitlines(1)) >>> diff = list(diff) >>> print ''.join(restore(diff, 1)), one two three >>> print ''.join(restore(diff, 2)), ore tree emu
379,591
def process_mutect_vcf(job, mutect_vcf, work_dir, univ_options): mutect_vcf = job.fileStore.readGlobalFile(mutect_vcf) with open(mutect_vcf, ) as infile, open(mutect_vcf + , ) as outfile: for line in infile: line = line.strip() if line.startswith(): print(line, file=outfile) continue line = line.split() if line[6] != : print(.join(line), file=outfile) return outfile.name
Process the MuTect vcf for accepted calls. :param toil.fileStore.FileID mutect_vcf: fsID for a MuTect generated chromosome vcf :param str work_dir: Working directory :param dict univ_options: Dict of universal options used by almost all tools :return: Path to the processed vcf :rtype: str
379,592
def notebook_executed(pth): nb = nbformat.read(pth, as_version=4) for n in range(len(nb[])): if nb[][n].cell_type == and \ nb[][n].execution_count is None: return False return True
Determine whether the notebook at `pth` has been executed.
379,593
def get_instance(self, payload): return ThisMonthInstance(self._version, payload, account_sid=self._solution[], )
Build an instance of ThisMonthInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance :rtype: twilio.rest.api.v2010.account.usage.record.this_month.ThisMonthInstance
379,594
def add_child_bin(self, bin_id, child_id): if self._catalog_session is not None: return self._catalog_session.add_child_catalog(catalog_id=bin_id, child_id=child_id) return self._hierarchy_session.add_child(id_=bin_id, child_id=child_id)
Adds a child to a bin. arg: bin_id (osid.id.Id): the ``Id`` of a bin arg: child_id (osid.id.Id): the ``Id`` of the new child raise: AlreadyExists - ``bin_id`` is already a parent of ``child_id`` raise: NotFound - ``bin_id`` or ``child_id`` not found raise: NullArgument - ``bin_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
379,595
def _hosts_in_unenumerated_pattern(self, pattern): hosts = {} pattern = pattern.replace("!","").replace("&", "") groups = self.get_groups() for group in groups: for host in group.get_hosts(): if pattern == or self._match(group.name, pattern) or self._match(host.name, pattern): hosts[host.name] = host return sorted(hosts.values(), key=lambda x: x.name)
Get all host names matching the pattern
379,596
def _set_notification(self, conn, char, enabled, timeout=1.0): if not in char: return False, {: } props = char[] if not props.notify: return False, {: } value = char[][]
Enable/disable notifications on a GATT characteristic Args: conn (int): The connection handle for the device we should interact with char (dict): The characteristic we should modify enabled (bool): Should we enable or disable notifications timeout (float): How long to wait before failing
379,597
def gmst(utc_time): ut1 = jdays2000(utc_time) / 36525.0 theta = 67310.54841 + ut1 * (876600 * 3600 + 8640184.812866 + ut1 * (0.093104 - ut1 * 6.2 * 10e-6)) return np.deg2rad(theta / 240.0) % (2 * np.pi)
Greenwich mean sidereal utc_time, in radians. As defined in the AIAA 2006 implementation: http://www.celestrak.com/publications/AIAA/2006-6753/
379,598
def set_connection(connection=defaults.sqlalchemy_connection_string_default): config_path = defaults.config_file_path config = RawConfigParser() if not os.path.exists(config_path): with open(config_path, ) as config_file: config[] = {: connection} config.write(config_file) log.info(.format(config_path)) else: config.read(config_path) config.set(, , connection) with open(config_path, ) as configfile: config.write(configfile)
Set the connection string for sqlalchemy and write it to the config file. .. code-block:: python import pyhgnc pyhgnc.set_connection('mysql+pymysql://{user}:{passwd}@{host}/{db}?charset={charset}') .. hint:: valid connection strings - mysql+pymysql://user:passwd@localhost/database?charset=utf8 - postgresql://scott:tiger@localhost/mydatabase - mssql+pyodbc://user:passwd@database - oracle://user:[email protected]:1521/database - Linux: sqlite:////absolute/path/to/database.db - Windows: sqlite:///C:\path\to\database.db :param str connection: sqlalchemy connection string
379,599
def star(self) -> snug.Query[bool]: req = snug.PUT(BASE + f) return (yield req).status_code == 204
star this repo