Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
375,400
def make_list(cls, item_converter=None, listsep=): if not item_converter: item_converter = parse_anything return cls.with_cardinality(Cardinality.many, item_converter, pattern=cls.anything_pattern, listsep=listsep)
Create a type converter for a list of items (many := 1..*). The parser accepts anything and the converter needs to fail on errors. :param item_converter: Type converter for an item. :param listsep: List separator to use (as string). :return: Type converter function object for the list.
375,401
def register_path(self, path, modified_time=None): if not foundations.common.path_exists(path): raise foundations.exceptions.PathExistsError("{0} | path doesn{1}' path is already registered!".format( self.__class__.__name__, path)) self.__paths[path] = (self.get_path_modified_time( path) if modified_time is None else modified_time, os.path.isfile(path)) return True
Registers given path. :param path: Path name. :type path: unicode :param modified_time: Custom modified time. :type modified_time: int or float :return: Method success. :rtype: bool
375,402
def __File_Command_lineEdit_set_ui(self): file_command)) self.__file_command = file_command self.File_Command_lineEdit.setText(file_command)
Fills **File_Command_lineEdit** Widget.
375,403
def _get_metadata(field, expr, metadata_expr, no_metadata_rule): if isinstance(metadata_expr, bz.Expr) or metadata_expr is None: return metadata_expr try: return expr._child[.join(((expr._name or ), field))] except (ValueError, AttributeError): if no_metadata_rule == : raise ValueError( "no %s table could be reflected for %s" % (field, expr) ) elif no_metadata_rule == : warnings.warn(NoMetaDataWarning(expr, field), stacklevel=4) return None
Find the correct metadata expression for the expression. Parameters ---------- field : {'deltas', 'checkpoints'} The kind of metadata expr to lookup. expr : Expr The baseline expression. metadata_expr : Expr, 'auto', or None The metadata argument. If this is 'auto', then the metadata table will be searched for by walking up the expression tree. If this cannot be reflected, then an action will be taken based on the ``no_metadata_rule``. no_metadata_rule : {'warn', 'raise', 'ignore'} How to handle the case where the metadata_expr='auto' but no expr could be found. Returns ------- metadata : Expr or None The deltas or metadata table to use.
375,404
def drop(self, format_p, action): if not isinstance(format_p, basestring): raise TypeError("format_p can only be an instance of type basestring") if not isinstance(action, DnDAction): raise TypeError("action can only be an instance of type DnDAction") progress = self._call("drop", in_p=[format_p, action]) progress = IProgress(progress) return progress
Informs the source that a drop event occurred for a pending drag and drop operation. in format_p of type str The mime type the data must be in. in action of type :class:`DnDAction` The action to use. return progress of type :class:`IProgress` Progress object to track the operation completion. raises :class:`VBoxErrorVmError` VMM device is not available.
375,405
def compare_modules(file_, imports): modules = parse_requirements(file_) imports = [imports[i]["name"] for i in range(len(imports))] modules = [modules[i]["name"] for i in range(len(modules))] modules_not_imported = set(modules) - set(imports) return modules_not_imported
Compare modules in a file to imported modules in a project. Args: file_ (str): File to parse for modules to be compared. imports (tuple): Modules being imported in the project. Returns: tuple: The modules not imported in the project, but do exist in the specified file.
375,406
def _status_message_0x01_received(self, msg): if msg.cmd2 == 0x00 or msg.cmd2 == 0x02: self._update_subscribers(0x00) elif msg.cmd2 == 0x01 or msg.cmd2 == 0x03: self._update_subscribers(0xff) else: raise ValueError
Handle status received messages. The following status values can be received: 0x00 = Both Outlets Off 0x01 = Only Top Outlet On 0x02 = Only Bottom Outlet On 0x03 = Both Outlets On
375,407
def dump(self, f): self.validate() with _open_file_obj(f, "w") as f: parser = self._get_parser() self.serialize(parser) self.build_file(parser, f)
Dump data to a file. :param f: file-like object or path to file :type f: file or str
375,408
def determine_actions(self, request, view): from rest_framework.generics import GenericAPIView actions = {} excluded_methods = {, , , } for method in set(view.allowed_methods) - excluded_methods: view.request = clone_request(request, method) try: if isinstance(view, GenericAPIView): has_object = view.lookup_url_kwarg or view.lookup_field in view.kwargs elif method in {, }: has_object = method in {} else: continue if hasattr(view, ): view.check_permissions(view.request) if has_object and hasattr(view, ): view.get_object() except (exceptions.APIException, PermissionDenied, Http404): pass else: serializer = view.get_serializer() actions[method] = self.get_serializer_info(serializer) finally: view.request = request return actions
Allow all allowed methods
375,409
def draw(obj, plane=, inline=False, **kwargs): xy3d3D if plane.lower() == : return _plot_neuron3d(obj, inline, **kwargs) return _plot_neuron(obj, plane, inline, **kwargs)
Draw the morphology using in the given plane plane (str): a string representing the 2D plane (example: 'xy') or '3d', '3D' for a 3D view inline (bool): must be set to True for interactive ipython notebook plotting
375,410
def rmdir(self, dir_name): self.check_write(dir_name) path = normpath_url(join_url(self.cur_dir, dir_name)) shutil.rmtree(path)
Remove cur_dir/name.
375,411
def _create_json(self): data_json = { "rclass": "local", "key": self.name, "description": self.description, "packageType": self.packageType, "notes": "", "includesPattern": "**/*", "excludesPattern": "", "repoLayoutRef": self.repoLayoutRef, "dockerApiVersion": self.dockerApiVersion, "checksumPolicyType": "client-checksums", "handleReleases": True, "handleSnapshots": True, "maxUniqueSnapshots": 0, "snapshotVersionBehavior": "unique", "suppressPomConsistencyChecks": True, "blackedOut": False, "propertySets": [], "archiveBrowsingEnabled": self.archiveBrowsingEnabled, "yumRootDepth": 0, } return data_json
JSON Documentation: https://www.jfrog.com/confluence/display/RTF/Repository+Configuration+JSON
375,412
def update(cls, id, memory, cores, console, password, background, max_memory): if not background and not cls.intty(): background = True vm_params = {} if memory: vm_params[] = memory if cores: vm_params[] = cores if console: vm_params[] = console if password: vm_params[] = password if max_memory: vm_params[] = max_memory result = cls.call(, cls.usable_id(id), vm_params) if background: return result cls.echo( % id) cls.display_progress(result)
Update a virtual machine.
375,413
def probability_density(self, X): self.check_fit() return norm.pdf(X, loc=self.mean, scale=self.std)
Compute probability density. Arguments: X: `np.ndarray` of shape (n, 1). Returns: np.ndarray
375,414
def _put(self, *args, **kwargs): if in kwargs: kwargs[] = json.dumps(kwargs[]) response = requests.put(*args, **kwargs) if not response.ok: raise NewRelicAPIServerException(.format(response.status_code, response.text)) return response.json()
A wrapper for putting things. It will also json encode your 'data' parameter :returns: The response of your put :rtype: dict :raises: This will raise a :class:`NewRelicAPIServerException<newrelic_api.exceptions.NewRelicAPIServerException>` if there is an error from New Relic
375,415
def handle_error(result, exception_class=None): if result == 0: return if result == Secur32Const.SEC_E_OUT_OF_SEQUENCE: raise TLSError() if result == Secur32Const.SEC_E_MESSAGE_ALTERED: raise TLSError() if result == Secur32Const.SEC_E_CONTEXT_EXPIRED: raise TLSError() _, error_string = get_error() if not isinstance(error_string, str_cls): error_string = _try_decode(error_string) if exception_class is None: exception_class = OSError raise exception_class(( % result) + error_string)
Extracts the last Windows error message into a python unicode string :param result: A function result, 0 or None indicates failure :param exception_class: The exception class to use for the exception if an error occurred :return: A unicode string error message
375,416
def instances_set(self, root, reservation): instances = [] for instance_data in root.find("instancesSet"): instances.append(self.instance(instance_data, reservation)) return instances
Parse instance data out of an XML payload. @param root: The root node of the XML payload. @param reservation: The L{Reservation} associated with the instances from the response. @return: A C{list} of L{Instance}s.
375,417
def push(self, value): if self.closed and not self.allow_add_after_close: Log.error("Do not push to closed queue") with self.lock: self._wait_for_queue_space() if not self.closed: self.queue.appendleft(value) return self
SNEAK value TO FRONT OF THE QUEUE
375,418
def matches_at_fpr(fg_vals, bg_vals, fpr=0.01): fg_vals = np.array(fg_vals) s = scoreatpercentile(bg_vals, 100 - fpr * 100) return [sum(fg_vals >= s), sum(bg_vals >= s)]
Computes the hypergeometric p-value at a specific FPR (default 1%). Parameters ---------- fg_vals : array_like The list of values for the positive set. bg_vals : array_like The list of values for the negative set. fpr : float, optional The FPR (between 0.0 and 1.0). Returns ------- fraction : float The fraction positives at the specified FPR.
375,419
def p_kwl_kwl(self, p): _LOGGER.debug("kwl -> kwl ; kwl") if p[3] is not None: p[0] = p[3] elif p[1] is not None: p[0] = p[1] else: p[0] = TypedClass(None, TypedClass.UNKNOWN)
kwl : kwl SEPARATOR kwl
375,420
def device_key(self, device_key): if device_key is not None and len(device_key) > 512: raise ValueError("Invalid value for `device_key`, length must be less than or equal to `512`") self._device_key = device_key
Sets the device_key of this DeviceData. The fingerprint of the device certificate. :param device_key: The device_key of this DeviceData. :type: str
375,421
def parse(system): t, _ = elapsed() input_format = system.files.input_format add_format = system.files.add_format if not input_format: logger.error( ) return False try: parser = importlib.import_module( + input_format, __name__) dmparser = importlib.import_module( + , __name__) if add_format: addparser = importlib.import_module( + add_format, __name__) except ImportError: logger.error( .format( input_format)) return False logger.info(.format(system.files.fullname)) if not parser.read(system.files.case, system): logger.error( .format( system.files.fullname, input_format)) return False if system.files.addfile: if not system.files.add_format: logger.error() return logger.info(.format( system.files.addfile)) if not addparser.readadd(system.files.addfile, system): logger.error( .format( system.files.addfile, input_format)) return False if system.files.dynfile: logger.info(.format( system.files.dynfile)) if not dmparser.read(system.files.dynfile, system): logger.error( .format( system.files.dynfile)) return False _, s = elapsed(t) logger.debug(.format( system.files.fullname, s)) return True
Parse input file with the given format in system.files.input_format
375,422
def normalize_locale(locale): import re match = re.match(r, locale.lower()) if match: return match.group()
Normalize locale Extracts language code from passed in locale string to be used later for dictionaries loading. :param locale: string, locale (en, en_US) :return: string, language code
375,423
def find_library_darwin(cls): dll = Library.JLINK_SDK_NAME root = os.path.join(, , ) if not os.path.isdir(root): return for d in os.listdir(root): dir_path = os.path.join(root, d) if os.path.isdir(dir_path) and d.startswith(): files = list(f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))) if (dll + ) in files: yield os.path.join(dir_path, dll + ) for f in files: if f.startswith(dll): yield os.path.join(dir_path, f)
Loads the SEGGER DLL from the installed applications. This method accounts for the all the different ways in which the DLL may be installed depending on the version of the DLL. Always uses the first directory found. SEGGER's DLL is installed in one of three ways dependent on which which version of the SEGGER tools are installed: ======== ============================================================ Versions Directory ======== ============================================================ < 5.0.0 ``/Applications/SEGGER/JLink\\ NUMBER`` < 6.0.0 ``/Applications/SEGGER/JLink/libjlinkarm.major.minor.dylib`` >= 6.0.0 ``/Applications/SEGGER/JLink/libjlinkarm`` ======== ============================================================ Args: cls (Library): the ``Library`` class Returns: The path to the J-Link library files in the order they are found.
375,424
def fix_imports(script): with open(script, ) as f_script: lines = f_script.read().splitlines() new_lines = [] for l in lines: if l.startswith("import "): l = "from . " + l if "from PyQt5 import" in l: l = l.replace("from PyQt5 import", "from pyqode.qt import") new_lines.append(l) with open(script, ) as f_script: f_script.write("\n".join(new_lines))
Replace "from PyQt5 import" by "from pyqode.qt import". :param script: script path
375,425
def update_metadata(self, resource, keys_vals): self.metadata_service.set_auth(self._token_metadata) self.metadata_service.update(resource, keys_vals)
Updates key-value pairs with the given resource. Will attempt to update all key-value pairs even if some fail. Keys must already exist. Args: resource (intern.resource.boss.BossResource) keys_vals (dictionary): Collection of key-value pairs to update on the given resource. Raises: HTTPErrorList on failure.
375,426
def remove_from_bin(self, name): self.__remove_path(os.path.join(self.root_dir, "bin", name))
Remove an object from the bin folder.
375,427
def create_archive(path, remove_path=True): root_path = os.path.dirname(path) relative_path = os.path.basename(path) archive_path = path + ".tar.gz" cmd = [["tar", "-C", root_path, "-czf", archive_path, relative_path]] call(cmd, env=SAFE_ENV) if remove_path: fs.remove(path) return archive_path
Creates a tar.gz of the path using the path basename + "tar.gz" The resulting file is in the parent directory of the original path, and the original path is removed.
375,428
def update_positions(self, time, xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls, ds=None, Fs=None, ignore_effects=False): self.xs = np.array(_value(xs)) self.ys = np.array(_value(ys)) self.zs = np.array(_value(zs)) for starref,body in self.items(): body.update_position(time, xs, ys, zs, vxs, vys, vzs, ethetas, elongans, eincls, ds=ds, Fs=Fs, ignore_effects=ignore_effects)
TODO: add documentation all arrays should be for the current time, but iterable over all bodies
375,429
def RollbackAll(close=None): if close: warnings.simplefilter() warnings.warn("close parameter will not need at all.", DeprecationWarning) for k, v in engine_manager.items(): session = v.session(create=False) if session: session.rollback()
Rollback all transactions, according Local.conn
375,430
def _get_network_interface(name, resource_group): public_ips = [] private_ips = [] netapi_versions = get_api_versions(kwargs={ : , : } ) netapi_version = netapi_versions[0] netconn = get_conn(client_type=) netiface_query = netconn.network_interfaces.get( resource_group_name=resource_group, network_interface_name=name ) netiface = netiface_query.as_dict() for index, ip_config in enumerate(netiface[]): if ip_config.get() is not None: private_ips.append(ip_config[]) if in ip_config.get(, {}): public_ip_name = get_resource_by_id( ip_config[][], netapi_version, ) public_ip = _get_public_ip(public_ip_name, resource_group) public_ips.append(public_ip[]) netiface[][index][].update(public_ip) return netiface, public_ips, private_ips
Get a network interface.
375,431
def secretfile_args(parser): parser.add_argument(, dest=, help=, default=os.path.join(os.getcwd(), ".secrets")) parser.add_argument(, dest=, help=, default=os.path.join(os.getcwd(), "vault", "")) parser.add_argument(, dest=, help=, default=os.path.join(os.getcwd(), "Secretfile")) parser.add_argument(, dest=, help=, default=[], type=str, action=) parser.add_argument(, dest=, help=, default=[], type=str, action=) parser.add_argument(, dest=, help=, default=[], type=str, action=)
Add Secretfile management command line arguments to parser
375,432
def defaultSTDPKernel(preSynActivation, postSynActivation, dt, inhibitoryPresyn=False, inhibitoryPostsyn=False): stdpScaler = 1 stdpTimeScaler = 1. if inhibitoryPresyn and not inhibitoryPostsyn: stdpScaler *= 1 elif not inhibitoryPresyn and inhibitoryPostsyn: stdpScaler *= 1 elif inhibitoryPresyn and inhibitoryPostsyn: stdpScaler *= -1 if dt < 0 and not inhibitoryPresyn: stdpScaler *= 1 stdpTimeScaler *= 3 elif dt > 0 and not inhibitoryPresyn: stdpScaler *= 1.2 stdpTimeScaler *= 4 elif dt > 0 and inhibitoryPresyn: stdpScaler *= .5 stdpTimeScaler *= 4 elif dt < 0 and inhibitoryPresyn: stdpScaler *= 1 stdpTimeScaler *= 2 timeFactor = np.exp(-1*np.abs(dt)/(STDP_TIME_CONSTANT*stdpTimeScaler)) updates = np.outer(preSynActivation*timeFactor*np.sign(dt)*stdpScaler, postSynActivation) return updates
This function implements a modified version of the STDP kernel from Widloski & Fiete, 2014. :param preSynActivation: Vector of pre-synaptic activations :param postSynActivation: Vector of post-synaptic activations :param dt: the difference in time between the two (in seconds), positive if after and negative if before :return: A matrix of synapse weight changes.
375,433
def assets2s3(): import flask_s3 header("Assets2S3...") print("") print("Building assets files..." ) print("") build_assets(application.app) print("") print("Uploading assets files to S3 ...") flask_s3.create_all(application.app) print("")
Upload assets files to S3
375,434
def _register_function(func, con): nargs = number_of_arguments(func) con.connection.connection.create_function(func.__name__, nargs, func)
Register a Python callable with a SQLite connection `con`. Parameters ---------- func : callable con : sqlalchemy.Connection
375,435
def __add_sentence_root_node(self, sent_number): sent_id = .format(sent_number) self.add_node(sent_id, layers={self.ns, self.ns+}, tokens=[]) self.add_edge(self.root, sent_id, layers={self.ns, self.ns+}, edge_type=EdgeTypes.dominance_relation) self.sentences.append(sent_id) return sent_id
adds the root node of a sentence to the graph and the list of sentences (``self.sentences``). the node has a ``tokens` attribute, which contains a list of the tokens (token node IDs) of this sentence. Parameters ---------- sent_number : int the index of the sentence within the document Results ------- sent_id : str the ID of the sentence
375,436
def cleanup_relations(self): collections = self.collections for relation in [x for col in collections.values() for x in col.model.relations.values()]: db.session.query(relation)\ .filter(~relation.listing.any())\ .delete(synchronize_session=False) db.session.commit()
Cleanup listing relations
375,437
def FromString(cls, desc): if language.stream is None: language.get_language() parse_exp = Optional(time_interval() - Literal().suppress()) - language.stream() - Literal().suppress() - number() try: data = parse_exp.parseString(desc) time = 0 if in data: time = data[][0] return SimulationStimulus(time, data[][0], data[]) except (ParseException, ParseSyntaxException): raise ArgumentError("Could not parse stimulus descriptor", descriptor=desc)
Create a new stimulus from a description string. The string must have the format: [time: ][system ]input X = Y where X and Y are integers. The time, if given must be a time_interval, which is an integer followed by a time unit such as second(s), minute(s), etc. Args: desc (str): A string description of the stimulus. Returns: SimulationStimulus: The parsed stimulus object.
375,438
def RenderWidget(self): t = self.type if t == int: ret = QSpinBox() ret.setMaximum(999999999) ret.setValue(self.value) elif t == float: ret = QLineEdit() ret.setText(str(self.value)) elif t == bool: ret = QCheckBox() ret.setChecked(self.value) else: ret = QLineEdit() ret.setText(str(self.value)) if self.toolTip is not None: ret.setToolTip(self.toolTip) self.widget = ret return ret
Returns a QWidget subclass instance. Exact class depends on self.type
375,439
def list(self, mask=None): if mask is None: mask = "mask[id, name, createDate, rule, guestCount, backendRouter[id, hostname]]" groups = self.client.call(, , mask=mask, iter=True) return groups
List existing placement groups Calls SoftLayer_Account::getPlacementGroups
375,440
def get_email_domain(emailaddr): realname, address = email.utils.parseaddr(emailaddr) try: username, domain = address.split() if not username: return None return domain or None except ValueError: return None
Return the domain component of an email address. Returns None if the provided string cannot be parsed as an email address. >>> get_email_domain('[email protected]') 'example.com' >>> get_email_domain('[email protected]') 'example.com' >>> get_email_domain('Example Address <[email protected]>') 'example.com' >>> get_email_domain('foobar') >>> get_email_domain('foo@bar@baz') 'bar' >>> get_email_domain('foobar@') >>> get_email_domain('@foobar')
375,441
def draw(self): if not self.vao: self.vao = VAO(indices=self.array_indices) self._fill_vao() if self.visible: if self.dynamic: for vbo in self.vbos: vbo._buffer_subdata() if self.drawmode == gl.GL_POINTS: gl.glPointSize(self.point_size) for texture in self.textures: texture.bind() with self.vao as vao: self.uniforms.send() vao.draw(mode=self.drawmode) for texture in self.textures: texture.unbind()
Draw the Mesh if it's visible, from the perspective of the camera and lit by the light. The function sends the uniforms
375,442
def time_sp(self): self._time_sp, value = self.get_attr_int(self._time_sp, ) return value
Writing specifies the amount of time the motor will run when using the `run-timed` command. Reading returns the current value. Units are in milliseconds.
375,443
def load_plugin(self, manifest, *args): if self.get_plugin_loaded(manifest["name"]): self._logger.debug("Plugin {} is already loaded.".format(manifest["name"])) return try: self._logger.debug("Attempting to load plugin {}.".format(manifest["name"])) for dependency in manifest.get("dependencies", []): if not self.get_plugin_loaded(dependency): self._logger.debug("Must load dependency {} first.".format(dependency)) if self.get_manifest(dependency) is None: self._logger.error("Dependency {} could not be found.".format(dependency)) else: self.load_plugin(self.get_manifest(dependency), *args) not_loaded = [i for i in manifest.get("dependencies", []) if not self.get_plugin_loaded(i)] if len(not_loaded) != 0: self._logger.error("Plugin {} failed to load due to missing dependencies. Dependencies: {}".format( manifest["name"], ", ".join(not_loaded) )) return if PY3: spec = importlib.util.spec_from_file_location( manifest.get("module_name", manifest["name"].replace(" ", "_")), os.path.join(manifest["path"], manifest.get("main_path", "__init__.py")) ) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) else: module = imp.load_source( manifest.get("module_name", manifest["name"].replace(" ", "_")), os.path.join(manifest["path"], manifest.get("main_path", "__init__.py")) ) module_class = manifest.get("main_class", "Plugin") plugin_class = getattr(module, module_class) if issubclass(plugin_class, self._plugin_class): plugin = plugin_class(manifest, *args) else: self._logger.error("Failed to load {} due to invalid baseclass.".format(manifest["name"])) return self._plugins[manifest["name"]] = plugin self._modules[manifest["name"]] = module self._logger.debug("Plugin {} loaded.".format(manifest["name"])) except: exc_path = os.path.join(manifest["path"], "error.log") with open(exc_path, "w") as f: f.write(traceback.format_exc(5)) self._logger.error("Failed to load plugin {}. Error log written to {}.".format(manifest["name"], exc_path))
Loads a plugin from the given manifest :param manifest: The manifest to use to load the plugin :param args: Arguments to pass to the plugin
375,444
def add_index(collection, name, fields, transformer=None, unique=False, case_insensitive=False): assert len(name) > 0 assert len(fields) > 0 indexes = _db[collection].indexes index = indexes.setdefault(name, aadict()) index.transformer = transformer index.value_map = {} index.unique = unique index.case_insensitive = case_insensitive index.fields = fields for obj in each_object(collection): _add_to_index(index, obj) _logger.info(, if unique else , if case_insensitive else , collection, .join(fields))
Add a secondary index for a collection ``collection`` on one or more ``fields``. The values at each of the ``fields`` are loaded from existing objects and their object ids added to the index. You can later iterate the objects of an index via ``each_indexed_object``. If you update an object and call ``save_object``, the index will be updated with the latest values from the updated object. If you delete an object via ``delete_object``, the object will be removed from any indexes on the object's collection. If a function is provided for ``transformer``, the values extracted from each object in the collection will be passed to the ``transformer``. The ``transformer`` should return a list of values that will go into the index. If ``unique`` is true, then there may only be at most one object in the collection with a unique set of values for each the ``fields`` provided. If ``case_insensitive`` is true, then the value stored in the index will be lower-cased and comparisons thereto will be lower-cased as well.
375,445
def pysal_G(self, **kwargs): if self.weights is None: self.raster_weights(**kwargs) rasterf = self.raster.flatten() rasterf = rasterf[rasterf.mask==False] self.G = pysal.G(rasterf, self.weights, **kwargs)
Compute Getis and Ord’s G for GeoRaster Usage: geo.pysal_G(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.G See help(gr.raster_weights), help(pysal.G) for options
375,446
def _clean_metadata(self): desc = self.metadata.get_long_description() if not isinstance(desc, six.text_type): desc = desc.decode() lines = io.StringIO(desc) def trim_eight_spaces(line): if line.startswith( * 8): line = line[8:] return line lines = itertools.chain( itertools.islice(lines, 1), six.moves.map(trim_eight_spaces, lines), ) self.metadata.long_description = .join(lines)
the long description doesn't load properly (gets unwanted indents), so fix it.
375,447
def write_stream(self, stream, validate=True): content = self.dump(validate=validate) try: if stream.seekable(): stream.seek(0) stream.truncate(0) stream.write(content) except OSError as e: raise error.WriteError(e.errno)
Write :attr:`metainfo` to a file-like object Before any data is written, `stream` is truncated if possible. :param stream: Writable file-like object (e.g. :class:`io.BytesIO`) :param bool validate: Whether to run :meth:`validate` first :raises WriteError: if writing to `stream` fails :raises MetainfoError: if `validate` is `True` and :attr:`metainfo` contains invalid data
375,448
def text(self, value): if value in (None, ) or value.strip() == "": raise AttributeError("Empty text value is invalid.") self._text = value
Set Text content for Comment (validation of input)
375,449
def get(self, copy=False): array = getattr(self.owner, self.name) if copy: return array.copy() else: return array
Return the value of the attribute
375,450
def retry(retries=10, wait=5, catch=None): catch = catch or (Exception,) def real_retry(function): def wrapper(*args, **kwargs): for _ in range(retries): try: ret = function(*args, **kwargs) return ret except catch: time.sleep(wait) except Exception as e: raise e else: raise DSBException() return wrapper return real_retry
Decorator to retry on exceptions raised
375,451
def global_closeness_centrality(g, node=None, normalize=True): if not node: C = {} for node in g.nodes(): C[node] = global_closeness_centrality(g, node, normalize=normalize) return C values = nx.shortest_path_length(g, node).values() c = sum([1./pl for pl in values if pl != 0.]) / len(g) if normalize: ac = 0 for sg in nx.connected_component_subgraphs(g): if len(sg.nodes()) > 1: aspl = nx.average_shortest_path_length(sg) ac += (1./aspl) * (float(len(sg)) / float(len(g))**2 ) c = c/ac return c
Calculates global closeness centrality for one or all nodes in the network. See :func:`.node_global_closeness_centrality` for more information. Parameters ---------- g : networkx.Graph normalize : boolean If True, normalizes centrality based on the average shortest path length. Default is True. Returns ------- C : dict Dictionary of results, with node identifiers as keys and gcc as values.
375,452
def purge(self): try: return self._api.purge() except AttributeError: while True: lst = self.list() if len(lst) == 0: break for task in lst: self.delete(task) self.wait() return self
Deletes all tasks in the queue.
375,453
def estimate_lmax(self, method=): r if method == self._lmax_method: return self._lmax_method = method if method == : try: lmax = sparse.linalg.eigsh(self.L.asfptype(), k=1, tol=5e-3, ncv=min(self.N, 10), return_eigenvectors=False) lmax = lmax[0] assert lmax <= self._get_upper_bound() + 1e-12 lmax *= 1.01 self._lmax = lmax except sparse.linalg.ArpackNoConvergence: raise ValueError( ) elif method == : self._lmax = self._get_upper_bound() else: raise ValueError(.format(method))
r"""Estimate the Laplacian's largest eigenvalue (cached). The result is cached and accessible by the :attr:`lmax` property. Exact value given by the eigendecomposition of the Laplacian, see :func:`compute_fourier_basis`. That estimation is much faster than the eigendecomposition. Parameters ---------- method : {'lanczos', 'bounds'} Whether to estimate the largest eigenvalue with the implicitly restarted Lanczos method, or to return an upper bound on the spectrum of the Laplacian. Notes ----- Runs the implicitly restarted Lanczos method (as implemented in :func:`scipy.sparse.linalg.eigsh`) with a large tolerance, then increases the calculated largest eigenvalue by 1 percent. For much of the PyGSP machinery, we need to approximate filter kernels on an interval that contains the spectrum of L. The only cost of using a larger interval is that the polynomial approximation over the larger interval may be a slightly worse approximation on the actual spectrum. As this is a very mild effect, it is not necessary to obtain very tight bounds on the spectrum of L. A faster but less tight alternative is to use known algebraic bounds on the graph Laplacian. Examples -------- >>> G = graphs.Logo() >>> G.compute_fourier_basis() # True value. >>> print('{:.2f}'.format(G.lmax)) 13.78 >>> G.estimate_lmax(method='lanczos') # Estimate. >>> print('{:.2f}'.format(G.lmax)) 13.92 >>> G.estimate_lmax(method='bounds') # Upper bound. >>> print('{:.2f}'.format(G.lmax)) 18.58
375,454
def format_color(text, color, use_color_setting): if not use_color_setting: return text else: return .format(color, text, NORMAL)
Format text with color. Args: text - Text to be formatted with color if `use_color` color - The color start string use_color_setting - Whether or not to color
375,455
def exam_reliability(x_axis, x_axis_new, reliable_distance, precision=0.0001): x_axis = x_axis[::-1] x_axis.append(-2**32) distance_to_closest_point = list() for t in x_axis_new: while 1: try: x = x_axis.pop() if x <= t: left = x else: right = x x_axis.append(right) x_axis.append(left) left_dist, right_dist = (t - left), (right - t) if left_dist <= right_dist: distance_to_closest_point.append(left_dist) else: distance_to_closest_point.append(right_dist) break except: distance_to_closest_point.append(t - left) break reliable_flag = list() for dist in distance_to_closest_point: if dist - precision - reliable_distance <= 0: reliable_flag.append(True) else: reliable_flag.append(False) return reliable_flag
When we do linear interpolation on x_axis and derive value for x_axis_new, we also evaluate how can we trust those interpolated data points. This is how it works: For each new x_axis point in x_axis new, let's say xi. Find the closest point in x_axis, suppose the distance is #dist. Compare this to #reliable_distance. If #dist < #reliable_distance, then we can trust it, otherwise, we can't. The precision is to handle decimal value's precision problem. Because 1.0 may actually is 1.00000000001 or 0.999999999999 in computer system. So we define that: if ``dist`` + ``precision`` <= ``reliable_distance``, then we can trust it, else, we can't. Here is an O(n) algorithm implementation. A lots of improvement than classic binary search one, which is O(n^2).
375,456
def get_reward_and_done(board): all_sums = [np.sum(board[i, :]) for i in range(3)] all_sums.extend([np.sum(board[:, i]) for i in range(3)]) all_sums.append(np.sum([board[i, i] for i in range(3)])) all_sums.append(np.sum([board[i, 2 - i] for i in range(3)])) if -3 in all_sums: return -1, True if 3 in all_sums: return 1, True done = True if get_open_spaces(board): done = False return 0, done
Given a representation of the board, returns reward and done.
375,457
def connection_made(self, transport: asyncio.transports.Transport): self._transport = transport self._remote_host = self._transport.get_extra_info() self._extra = {"client": str(self._remote_host)} self.connections.add(self) self._stream_reader = asyncio.StreamReader(loop=self._loop) self._stream_writer = asyncio.StreamWriter(transport, self, self._stream_reader, self._loop) super().connection_made(transport) if self.timeout: self._timeout_handler = self._loop.call_soon( self.timeout_callback) self._handlertask = asyncio.ensure_future(self.query_handler()) if self.debug: access_logger.info("connected", extra=self._extra)
连接建立起来触发的回调函数. 用于设定一些参数,并将监听任务放入事件循环,如果设置了timeout,也会将timeout_callback放入事件循环 Parameters: transport (asyncio.Transports): - 连接的传输对象
375,458
def build_duration_pretty(self): from ambry.util import pretty_time from time import time if not self.state.building: return None built = self.state.built or time() try: return pretty_time(int(built) - int(self.state.building)) except TypeError: return None
Return the difference between build and build_done states, in a human readable format
375,459
def payment_init(self, wallet): wallet = self._process_value(wallet, ) payload = {"wallet": wallet} resp = self.call(, payload) return resp[] ==
Marks all accounts in wallet as available for being used as a payment session. :param wallet: Wallet to init payment in :type wallet: str :raises: :py:exc:`nano.rpc.RPCException` >>> rpc.payment_init( ... wallet="000D1BAEC8EC208142C99059B393051BAC8380F9B5A2E6B2489A277D81789F3F" ... ) True
375,460
def default_instance(cls): if cls._instance is None: with cls._instance_lock: if cls._instance is None: cls._instance = FlowsLogger() return cls._instance
For use like a singleton, return the existing instance of the object or a new instance
375,461
def call_hook(self, name, **kwargs): return [y for y in [x(**kwargs) for x, _ in self._hooks.get(name, [])] if y is not None]
Call all hooks registered with this name. Returns a list of the returns values of the hooks (in the order the hooks were added)
375,462
def set_spcPct(self, value): self._remove_spcPts() spcPct = self.get_or_add_spcPct() spcPct.val = value
Set spacing to *value* lines, e.g. 1.75 lines. A ./a:spcPts child is removed if present.
375,463
def iterrows(self, workbook=None): resolved_tables = [] max_height = 0 max_width = 0 self.__formula_values = {} for name, (table, (row, col)) in list(self.__tables.items()): self.__tables[None] = (table, (row, col)) data = table.get_data(workbook, row, col, self.__formula_values) del self.__tables[None] height, width = data.shape upper_left = (row, col) lower_right = (row + height - 1, col + width - 1) max_height = max(max_height, lower_right[0] + 1) max_width = max(max_width, lower_right[1] + 1) resolved_tables.append((name, data, upper_left, lower_right)) for row, col in self.__values.keys(): max_width = max(max_width, row+1) max_height = max(max_height, col+1) table = [[None] * max_width for i in range(max_height)] for name, data, upper_left, lower_right in resolved_tables: for i, r in enumerate(range(upper_left[0], lower_right[0]+1)): for j, c in enumerate(range(upper_left[1], lower_right[1]+1)): table[r][c] = data[i][j] for (r, c), value in self.__values.items(): if isinstance(value, Value): value = value.value if isinstance(value, Expression): if value.has_value: self.__formula_values[(r, c)] = value.value value = value.get_formula(workbook, r, c) table[r][c] = value for row in table: yield row
Yield rows as lists of data. The data is exactly as it is in the source pandas DataFrames and any formulas are not resolved.
375,464
def _parse_lines(self, diff_lines): added_lines = [] deleted_lines = [] current_line_new = None current_line_old = None for line in diff_lines: if line.startswith(): line_num = self._parse_hunk_line(line) current_line_new, current_line_old = line_num, line_num elif line.startswith(): if current_line_old is not None: deleted_lines.append(current_line_old) current_line_old += 1 else: if current_line_old is not None: current_line_old += 1 if current_line_new is not None: current_line_new += 1 else: pass return added_lines, deleted_lines
Given the diff lines output from `git diff` for a particular source file, return a tuple of `(ADDED_LINES, DELETED_LINES)` where `ADDED_LINES` and `DELETED_LINES` are lists of line numbers added/deleted respectively. Raises a `GitDiffError` if the diff lines are in an invalid format.
375,465
def cape_cin(pressure, temperature, dewpt, parcel_profile): r lfc_pressure, _ = lfc(pressure, temperature, dewpt, parcel_temperature_profile=parcel_profile) if np.isnan(lfc_pressure): return 0 * units(), 0 * units() else: lfc_pressure = lfc_pressure.magnitude el_pressure, _ = el(pressure, temperature, dewpt, parcel_temperature_profile=parcel_profile) if np.isnan(el_pressure): el_pressure = pressure[-1].magnitude else: el_pressure = el_pressure.magnitude y = (parcel_profile - temperature).to(units.degK) x, y = _find_append_zero_crossings(np.copy(pressure), y) p_mask = _less_or_close(x, lfc_pressure) & _greater_or_close(x, el_pressure) x_clipped = x[p_mask] y_clipped = y[p_mask] cape = (mpconsts.Rd * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units()) p_mask = _greater_or_close(x, lfc_pressure) x_clipped = x[p_mask] y_clipped = y[p_mask] cin = (mpconsts.Rd * (np.trapz(y_clipped, np.log(x_clipped)) * units.degK)).to(units()) return cape, cin
r"""Calculate CAPE and CIN. Calculate the convective available potential energy (CAPE) and convective inhibition (CIN) of a given upper air profile and parcel path. CIN is integrated between the surface and LFC, CAPE is integrated between the LFC and EL (or top of sounding). Intersection points of the measured temperature profile and parcel profile are linearly interpolated. Parameters ---------- pressure : `pint.Quantity` The atmospheric pressure level(s) of interest. The first entry should be the starting point pressure. temperature : `pint.Quantity` The atmospheric temperature corresponding to pressure. dewpt : `pint.Quantity` The atmospheric dew point corresponding to pressure. parcel_profile : `pint.Quantity` The temperature profile of the parcel Returns ------- `pint.Quantity` Convective available potential energy (CAPE). `pint.Quantity` Convective inhibition (CIN). Notes ----- Formula adopted from [Hobbs1977]_. .. math:: \text{CAPE} = -R_d \int_{LFC}^{EL} (T_{parcel} - T_{env}) d\text{ln}(p) .. math:: \text{CIN} = -R_d \int_{SFC}^{LFC} (T_{parcel} - T_{env}) d\text{ln}(p) * :math:`CAPE` Convective available potential energy * :math:`CIN` Convective inhibition * :math:`LFC` Pressure of the level of free convection * :math:`EL` Pressure of the equilibrium level * :math:`SFC` Level of the surface or beginning of parcel path * :math:`R_d` Gas constant * :math:`g` Gravitational acceleration * :math:`T_{parcel}` Parcel temperature * :math:`T_{env}` Environment temperature * :math:`p` Atmospheric pressure See Also -------- lfc, el
375,466
def find(cls, dtype): try: return cls._member_map_[dtype] except KeyError: try: dtype = numpy.dtype(dtype).type except TypeError: for ndstype in cls._member_map_.values(): if ndstype.value is dtype: return ndstype else: for ndstype in cls._member_map_.values(): if ndstype.value and ndstype.numpy_dtype is dtype: return ndstype raise ValueError( % (dtype, cls.__name__))
Returns the NDS2 type corresponding to the given python type
375,467
def _make_value_pb(value): if value is None: return Value(null_value="NULL_VALUE") if isinstance(value, (list, tuple)): return Value(list_value=_make_list_value_pb(value)) if isinstance(value, bool): return Value(bool_value=value) if isinstance(value, six.integer_types): return Value(string_value=str(value)) if isinstance(value, float): if math.isnan(value): return Value(string_value="NaN") if math.isinf(value): if value > 0: return Value(string_value="Infinity") else: return Value(string_value="-Infinity") return Value(number_value=value) if isinstance(value, datetime_helpers.DatetimeWithNanoseconds): return Value(string_value=value.rfc3339()) if isinstance(value, datetime.datetime): return Value(string_value=_datetime_to_rfc3339(value)) if isinstance(value, datetime.date): return Value(string_value=value.isoformat()) if isinstance(value, six.binary_type): value = _try_to_coerce_bytes(value) return Value(string_value=value) if isinstance(value, six.text_type): return Value(string_value=value) if isinstance(value, ListValue): return Value(list_value=value) raise ValueError("Unknown type: %s" % (value,))
Helper for :func:`_make_list_value_pbs`. :type value: scalar value :param value: value to convert :rtype: :class:`~google.protobuf.struct_pb2.Value` :returns: value protobufs :raises ValueError: if value is not of a known scalar type.
375,468
def same_guid(post, parameter=DEFAULT_SIMILARITY_TIMESPAN): from feedjack.models import Post if isinstance(parameter, types.StringTypes): parameter = int(parameter.strip()) similar = Post.objects.filtered(for_display=False)\ .exclude(id=post.id).filter(guid=post.guid) if parameter: similar = similar.filter(date_updated__gt=timezone.now() - timedelta(seconds=parameter)) return not bool(similar.exists())
Skip posts with exactly same GUID. Parameter: comparison timespan, seconds (int, 0 = inf, default: {0}).
375,469
def _script_names(dist, script_name, is_gui): if dist_in_usersite(dist): bin_dir = bin_user else: bin_dir = bin_py exe_name = os.path.join(bin_dir, script_name) paths_to_remove = [exe_name] if WINDOWS: paths_to_remove.append(exe_name + ) paths_to_remove.append(exe_name + ) if is_gui: paths_to_remove.append(exe_name + ) else: paths_to_remove.append(exe_name + ) return paths_to_remove
Create the fully qualified name of the files created by {console,gui}_scripts for the given ``dist``. Returns the list of file names
375,470
def bar3_chart(self, title, labels, data1, file_name, data2, data3, legend=["", ""]): colors = ["orange", "grey"] data1 = self.__convert_none_to_zero(data1) data2 = self.__convert_none_to_zero(data2) data3 = self.__convert_none_to_zero(data3) fig, ax = plt.subplots(1) xpos = np.arange(len(data1)) width = 0.28 plt.title(title) y_pos = np.arange(len(data1)) ppl.bar(xpos + width + width, data3, color="orange", width=0.28, annotate=True) ppl.bar(xpos + width, data1, color=, width=0.28, annotate=True) ppl.bar(xpos, data2, grid=, width=0.28, annotate=True) plt.xticks(xpos + width, labels) plt.legend(legend, loc=2) os.makedirs(os.path.dirname(file_name), exist_ok=True) plt.savefig(file_name) plt.close()
Generate a bar plot with three columns in each x position and save it to file_name :param title: title to be used in the chart :param labels: list of labels for the x axis :param data1: values for the first columns :param file_name: name of the file in which to save the chart :param data2: values for the second columns :param data3: values for the third columns :param legend: legend to be shown in the chart :return:
375,471
def _UserUpdateIgnoredDirs(self, origIgnoredDirs = []): ignoredDirs = list(origIgnoredDirs) inputDone = None while inputDone is None: prompt = "Enter new directory to ignore (e.g. DONE), " \ " to reset directory list, " \ " to finish or " \ " to exit: " response = goodlogging.Log.Input("CLEAR", prompt) if response.lower() == : sys.exit(0) elif response.lower() == : inputDone = 1 elif response.lower() == : ignoredDirs = [] else: if response is not None: ignoredDirs.append(response) ignoredDirs = set(ignoredDirs) origIgnoredDirs = set(origIgnoredDirs) if ignoredDirs != origIgnoredDirs: self._db.PurgeIgnoredDirs() for ignoredDir in ignoredDirs: self._db.AddIgnoredDir(ignoredDir) return list(ignoredDirs)
Add ignored directories to database table. Always called if the database table is empty. User can build a list of entries to add to the database table (one entry at a time). Once finished they select the finish option and all entries will be added to the table. They can reset the list at any time before finishing. Parameters ---------- origIgnoredDirs : list [optional : default = []] List of original ignored directories from database table. Returns ---------- string List of updated ignored directories from database table.
375,472
def liquid_precip_ratio(pr, prsn=None, tas=None, freq=): r if prsn is None: tu = units.parse_units(tas.attrs[].replace(, )) fu = frz = 0 if fu != tu: frz = units.convert(frz, fu, tu) prsn = pr.where(tas < frz, 0) tot = pr.resample(time=freq).sum(dim=) rain = tot - prsn.resample(time=freq).sum(dim=) ratio = rain / tot return ratio
r"""Ratio of rainfall to total precipitation The ratio of total liquid precipitation over the total precipitation. If solid precipitation is not provided, then precipitation is assumed solid if the temperature is below 0°C. Parameters ---------- pr : xarray.DataArray Mean daily precipitation flux [Kg m-2 s-1] or [mm]. prsn : xarray.DataArray Mean daily solid precipitation flux [Kg m-2 s-1] or [mm]. tas : xarray.DataArray Mean daily temperature [℃] or [K] freq : str Resampling frequency Returns ------- xarray.DataArray Ratio of rainfall to total precipitation Notes ----- Let :math:`PR_i` be the mean daily precipitation of day :math:`i`, then for a period :math:`j` starting at day :math:`a` and finishing on day :math:`b`: .. math:: PR_{ij} = \sum_{i=a}^{b} PR_i PRwet_{ij} See also -------- winter_rain_ratio
375,473
def get_first_language(self, site_id=None): if site_id is None: site_id = getattr(settings, , None) try: return self[site_id][0][] except (KeyError, IndexError): return self[][]
Return the first language for the current site. This can be used for user interfaces, where the languages are displayed in tabs.
375,474
async def uint(self, elem, elem_type, params=None): if self.writing: return await x.dump_uint(self.iobj, elem, elem_type.WIDTH) else: return await x.load_uint(self.iobj, elem_type.WIDTH)
Integer types :param elem: :param elem_type: :param params: :return:
375,475
def migrateUp(self): siteStore = self.store.parent def _(): siteStoreSubRef = siteStore.getItemByID(self.store.idInParent) self.cloneInto(siteStore, siteStoreSubRef) IScheduler(self.store).migrateUp() siteStore.transact(_)
Copy this LoginAccount and all associated LoginMethods from my store (which is assumed to be a SubStore, most likely a user store) into the site store which contains it.
375,476
def linguist_field_names(self): return list(self.model._linguist.fields) + list( utils.get_language_fields(self.model._linguist.fields) )
Returns linguist field names (example: "title" and "title_fr").
375,477
def check_suspension(user_twitter_id_list): twitter = login() suspended_user_twitter_id_list = list() non_suspended_user_twitter_id_list = list() unknown_status_user_twitter_id_list = list() append_suspended_twitter_user = suspended_user_twitter_id_list.append append_non_suspended_twitter_user = non_suspended_user_twitter_id_list.append extend_unknown_status_twitter_user = unknown_status_user_twitter_id_list.extend user_lookup_counter = 0 user_lookup_time_window_start = time.perf_counter() for hundred_length_sub_list in chunks(list(user_twitter_id_list), 100): try: api_result, user_lookup_counter, user_lookup_time_window_start\ = safe_twitter_request_handler(twitter_api_func=twitter.lookup_user, call_rate_limit=60, call_counter=user_lookup_counter, time_window_start=user_lookup_time_window_start, max_retries=10, wait_period=2, parameters=hundred_length_sub_list) hundred_length_sub_list = set(hundred_length_sub_list) for hydrated_user_object in api_result: hydrated_twitter_user_id = hydrated_user_object["id"] if hydrated_twitter_user_id in hundred_length_sub_list: append_non_suspended_twitter_user(hydrated_twitter_user_id) else: append_suspended_twitter_user(hydrated_twitter_user_id) except twython.TwythonError: extend_unknown_status_twitter_user(hundred_length_sub_list) except URLError: extend_unknown_status_twitter_user(hundred_length_sub_list) except BadStatusLine: extend_unknown_status_twitter_user(hundred_length_sub_list) return suspended_user_twitter_id_list, non_suspended_user_twitter_id_list, unknown_status_user_twitter_id_list
Looks up a list of user ids and checks whether they are currently suspended. Input: - user_twitter_id_list: A python list of Twitter user ids in integer format to be looked-up. Outputs: - suspended_user_twitter_id_list: A python list of suspended Twitter user ids in integer format. - non_suspended_user_twitter_id_list: A python list of non suspended Twitter user ids in integer format. - unknown_status_user_twitter_id_list: A python list of unknown status Twitter user ids in integer format.
375,478
def bootstrap_counts_singletraj(dtraj, lagtime, n): L = len(dtraj) if (lagtime > L): raise ValueError( + str(lagtime) + + str(L)) I = np.random.randint(0, L - lagtime - 1, size=n) J = I + lagtime return (dtraj[I], dtraj[J])
Samples n counts at the given lagtime from the given trajectory
375,479
def tomography_basis(basis, prep_fun=None, meas_fun=None): ret = TomographyBasis(basis) ret.prep_fun = prep_fun ret.meas_fun = meas_fun return ret
Generate a TomographyBasis object. See TomographyBasis for further details.abs Args: prep_fun (callable) optional: the function which adds preparation gates to a circuit. meas_fun (callable) optional: the function which adds measurement gates to a circuit. Returns: TomographyBasis: A tomography basis.
375,480
def text(self, prompt, default=None): prompt = prompt if prompt is not None else prompt += " [{0}]: ".format(default) if default is not None else return self.input(curry(filter_text, default=default), prompt)
Prompts the user for some text, with optional default
375,481
def blank(columns=1, name=None): if name is None: name = field = pp.Regex( + str(columns) + ) field.leaveWhitespace() field.suppress() field.setName(name) return field
Creates the grammar for a blank field. These are for constant empty strings which should be ignored, as they are used just as fillers. :param columns: number of columns, which is the required number of whitespaces :param name: name for the field :return: grammar for the blank field
375,482
def login(self, user, passwd): resp = self.post(, json={: user, : passwd}) self._token = resp.json()[][]
Logs the user into SecurityCenter and stores the needed token and cookies.
375,483
def beta_to_uni(text, strict=False): param_key = (strict,) try: t = _BETA_CONVERSION_TRIES[param_key] except KeyError: t = _create_conversion_trie(*param_key) _BETA_CONVERSION_TRIES[param_key] = t transform = [] idx = 0 possible_word_boundary = False while idx < len(text): if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA step = t.longest_prefix(text[idx:idx + _MAX_BETA_TOKEN_LEN]) if step: possible_word_boundary = text[idx] in _BETA_PUNCTUATION key, value = step transform.append(value) idx += len(key) else: possible_word_boundary = True transform.append(text[idx]) idx += 1 if possible_word_boundary and _penultimate_sigma_word_final(transform): transform[-2] = _FINAL_LC_SIGMA elif len(transform) > 0 and transform[-1] == _MEDIAL_LC_SIGMA: transform[-1] = _FINAL_LC_SIGMA converted = .join(transform) return converted
Converts the given text from betacode to unicode. Args: text: The beta code text to convert. All of this text must be betacode. strict: Flag to allow for flexible diacritic order on input. Returns: The converted text.
375,484
def __get_issue_notes(self, issue_id): notes = [] group_notes = self.client.notes(GitLabClient.ISSUES, issue_id) for raw_notes in group_notes: for note in json.loads(raw_notes): note_id = note[] note[] = \ self.__get_note_award_emoji(GitLabClient.ISSUES, issue_id, note_id) notes.append(note) return notes
Get issue notes
375,485
def safe_lshift(a, b): if b > MAX_SHIFT: raise RuntimeError("Invalid left shift, max left shift is {}".format(MAX_SHIFT)) return a << b
safe version of lshift
375,486
def load_backend(backend_name): try: if len(backend_name.split(".")) > 1: mod = import_module(backend_name) else: mod = import_module("spamc.backend_%s" % backend_name) return mod except ImportError: error_msg = "%s isn't a spamc backend" % backend_name raise ImportError(error_msg)
load pool backend.
375,487
def _set_origin(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=origin.origin, is_container=, presence=False, yang_name="origin", rest_name="origin", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__origin = t if hasattr(self, ): self._set()
Setter method for origin, mapped from YANG variable /routing_system/route_map/content/set/origin (container) If this variable is read-only (config: false) in the source YANG file, then _set_origin is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_origin() directly. YANG Description: BGP origin code
375,488
def create_file_api(conf): api_key = conf.config.get(, os.environ.get()) project_id = conf.config.get(, os.environ.get()) if not project_id or not api_key: raise SmarterlingError() proxy_settings=None if conf.config.has_key(): proxy_settings = ProxySettings( conf.config.get().get(, ), conf.config.get().get(, ), conf.config.get().get(, ), int(conf.config.get().get(, ))) return SmartlingFileApiFactory().getSmartlingTranslationApi( not conf.config.get(, False), api_key, project_id, proxySettings=proxy_settings)
Creates a SmartlingFileApi from the given config
375,489
def session_hook(exception): safeprint( "The resource you are trying to access requires you to " "re-authenticate with specific identities." ) params = exception.raw_json["authorization_parameters"] message = params.get("session_message") if message: safeprint("message: {}".format(message)) identities = params.get("session_required_identities") if identities: id_str = " ".join(identities) safeprint( "Please run\n\n" " globus session update {}\n\n" "to re-authenticate with the required identities".format(id_str) ) else: safeprint( "with specific identities".format(id_str) ) exit_with_mapped_status(exception.http_status)
Expects an exception with an authorization_paramaters field in its raw_json
375,490
def wait(self, jobs=None, timeout=-1): tic = time.time() if jobs is None: theids = self.outstanding else: if isinstance(jobs, (int, basestring, AsyncResult)): jobs = [jobs] theids = set() for job in jobs: if isinstance(job, int): job = self.history[job] elif isinstance(job, AsyncResult): map(theids.add, job.msg_ids) continue theids.add(job) if not theids.intersection(self.outstanding): return True self.spin() while theids.intersection(self.outstanding): if timeout >= 0 and ( time.time()-tic ) > timeout: break time.sleep(1e-3) self.spin() return len(theids.intersection(self.outstanding)) == 0
waits on one or more `jobs`, for up to `timeout` seconds. Parameters ---------- jobs : int, str, or list of ints and/or strs, or one or more AsyncResult objects ints are indices to self.history strs are msg_ids default: wait on all outstanding messages timeout : float a time in seconds, after which to give up. default is -1, which means no timeout Returns ------- True : when all msg_ids are done False : timeout reached, some msg_ids still outstanding
375,491
def argument_run(self, sp_r): arg_run = [] for line in sp_r: logging.debug("argument run: handling: " + str(line)) if(line[1] == "data"): arg_run.append( (line[0], line[1], line[2], line[2].get_words(line[3]))) continue if(line[1] == "command"): self.checkargs(line[0], line[2], line[3]) arg_run.append( (line[0], line[1], line[2], [a for a in self.convert_args(line[2], line[3])])) return arg_run
.. _argument_run: Converts Arguments according to ``to_int``
375,492
def spawn(self, args, executable=None, stdin=None, stdout=None, stderr=None, shell=False, cwd=None, env=None, flags=0, extra_handles=None): if self._process: raise RuntimeError() self._child_exited.clear() self._closed.clear() self._exit_status = None self._term_signal = None hub = get_hub() if isinstance(args, str): args = [args] flags |= pyuv.UV_PROCESS_WINDOWS_VERBATIM_ARGUMENTS else: args = list(args) if shell: if hasattr(os, ): if executable is None: executable = args = [executable, ] + args else: if executable is None: executable = os.environ.get(, ) args = [executable, ] + args if executable is None: executable = args[0] kwargs = {} if env is not None: kwargs[] = env if cwd is not None: kwargs[] = cwd kwargs[] = flags handles = self._get_child_handles(hub.loop, stdin, stdout, stderr, extra_handles) kwargs[] = handles process = pyuv.Process.spawn(hub.loop, args, executable, exit_callback=self._on_child_exit, **kwargs) if handles[0].stream: self._stdin = self._connect_child_handle(handles[0]) if handles[1].stream: self._stdout = self._connect_child_handle(handles[1]) if handles[2].stream: self._stderr = self._connect_child_handle(handles[2]) self._process = process
Spawn a new child process. The executable to spawn and its arguments are determined by *args*, *executable* and *shell*. When *shell* is set to ``False`` (the default), *args* is normally a sequence and it contains both the program to execute (at index 0), and its arguments. When *shell* is set to ``True``, then *args* is normally a string and it indicates the command to execute through the shell. The *executable* argument can be used to override the executable to execute. If *shell* is ``False``, it overrides ``args[0]``. This is sometimes used on Unix to implement "fat" executables that behave differently based on argv[0]. If *shell* is ``True``, it overrides the shell to use. The default shell is ``'/bin/sh'`` on Unix, and the value of $COMSPEC (or ``'cmd.exe'`` if it is unset) on Windows. The *stdin*, *stdout* and *stderr* arguments specify how to handle standard input, output, and error, respectively. If set to None, then the child will inherit our respective stdio handle. If set to the special constant ``PIPE`` then a pipe is created. The pipe will be connected to a :class:`gruvi.StreamProtocol` which you can use to read or write from it. The stream protocol instance is available under either :attr:`stdin`, :attr:`stdout` or :attr:`stderr`. All 3 stdio arguments can also be a file descriptor, a file-like object, or a pyuv ``Stream`` instance. The *extra_handles* specifies any extra handles to pass to the client. It must be a sequence where each element is either a file descriptor, a file-like objects, or a ``pyuv.Stream`` instance. The position in the sequence determines the file descriptor in the client. The first position corresponds to FD 3, the second to 4, etc. This places these file descriptors directly after the stdio handles. The *cwd* argument specifies the directory to change to before executing the child. If not provided, the current directory is used. The *env* argument specifies the environment to use when executing the child. If provided, it must be a dictionary. By default, the current environment is used. The *flags* argument can be used to specify optional libuv ``uv_process_flags``. The only relevant flags are ``pyuv.UV_PROCESS_DETACHED`` and ``pyuv.UV_PROCESS_WINDOWS_HIDE``. Both are Windows specific and are silently ignored on Unix.
375,493
def replace_surrogate_encode(mystring, exc): decoded = [] for ch in mystring: code = ord(ch) if not 0xD800 <= code <= 0xDCFF: raise exc if 0xDC00 <= code <= 0xDC7F: decoded.append(_unichr(code - 0xDC00)) elif code <= 0xDCFF: decoded.append(_unichr(code - 0xDC00)) else: raise NotASurrogateError return str().join(decoded)
Returns a (unicode) string, not the more logical bytes, because the codecs register_error functionality expects this.
375,494
def _interception(self, joinpoint): if self.pre_cond is not None: self.pre_cond(joinpoint) result = joinpoint.proceed() if self.post_cond is not None: joinpoint.exec_ctx[Condition.RESULT] = result self.post_cond(joinpoint) return result
Intercept call of joinpoint callee in doing pre/post conditions.
375,495
def Betainc(a, b, x): return sp.special.betainc(a, b, x),
Complemented, incomplete gamma op.
375,496
def merge_insert(ins_chunks, doc): doc[-1] += doc.append() if balanced and balanced[-1].endswith(): balanced[-1] = balanced[-1][:-1] doc.extend(balanced) doc.append() doc.extend(unbalanced_end)
doc is the already-handled document (as a list of text chunks); here we add <ins>ins_chunks</ins> to the end of that.
375,497
def get_permission_required(cls): if cls.permission_required is None: raise ImproperlyConfigured( "{0} is missing the permission_required attribute. " "Define {0}.permission_required, or override " "{0}.get_permission_required().".format(cls.__name__) ) if isinstance(cls.permission_required, six.string_types): if cls.permission_required != "": perms = (cls.permission_required,) else: perms = () else: perms = cls.permission_required return perms
Get permission required property. Must return an iterable.
375,498
def collapse(self, indices, values): indices = np.atleast_1d(indices).astype(, casting=) values = np.atleast_1d(values) if len(indices) != len(values): raise ValueError( .format(indices, values, len(indices), len(values))) for axis, index in enumerate(indices): if not 0 <= index <= self.ndim: raise IndexError( .format(axis, index, self.ndim - 1)) if np.any(values < self.min_pt[indices]): raise ValueError( .format(values, self.min_pt[indices])) if np.any(values > self.max_pt[indices]): raise ValueError( .format(values, self.max_pt[indices])) b_new = self.min_pt.copy() b_new[indices] = values e_new = self.max_pt.copy() e_new[indices] = values return IntervalProd(b_new, e_new)
Partly collapse the interval product to single values. Note that no changes are made in-place. Parameters ---------- indices : int or sequence of ints The indices of the dimensions along which to collapse. values : `array-like` or float The values to which to collapse. Must have the same length as ``indices``. Values must lie within the interval boundaries. Returns ------- collapsed : `IntervalProd` The collapsed set. Examples -------- >>> min_pt, max_pt = [-1, 0, 2], [-0.5, 1, 3] >>> rbox = IntervalProd(min_pt, max_pt) >>> rbox.collapse(1, 0) IntervalProd([-1., 0., 2.], [-0.5, 0. , 3. ]) >>> rbox.collapse([1, 2], [0, 2.5]) IntervalProd([-1. , 0. , 2.5], [-0.5, 0. , 2.5])
375,499
def prepare(self, strict=True): self.__root = self.prepare_obj(self.raw, self.__url) self.validate(strict=strict) if hasattr(self.__root, ) and self.__root.schemes: if len(self.__root.schemes) > 0: self.__schemes = self.__root.schemes else: self.__schemes = [six.moves.urlparse(self.__url).schemes] s = Scanner(self) s.scan(root=self.__root, route=[Merge()]) s.scan(root=self.__root, route=[PatchObject()]) s.scan(root=self.__root, route=[Aggregate()]) tr = TypeReduce(self.__sep) cy = CycleDetector() s.scan(root=self.__root, route=[tr, cy]) self.__op = utils.ScopeDict(tr.op) if hasattr(self.__root, ) and self.__root.definitions != None: self.__m = utils.ScopeDict(self.__root.definitions) else: self.__m = utils.ScopeDict({}) self.__m.sep = self.__sep self.__op.sep = self.__sep if len(cy.cycles[]) > 0 and strict: raise errs.CycleDetectionError(.format(cy.cycles[]))
preparation for loaded json :param bool strict: when in strict mode, exception would be raised if not valid.