Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
387,500
def calc_custom(custom, genome, scaffold, sequence, scaffold_coverage, total_bases): index = 0 if scaffold in scaffold_coverage: if genome not in custom: custom[genome] = [[] for i in scaffold_coverage[scaffold]] for cov in scaffold_coverage[scaffold]: length = float(len(sequence[1])) bases = cov * length custom_value = ((bases) / (total_bases[index])) / length custom[genome][index].append(custom_value) index += 1 return custom
custom = {(reads mapped to scaffold)/(total reads for sample)}/(length of scaffold)
387,501
def get_reports_by_type(self, account_id, report_type): url = ACCOUNTS_API.format(account_id) + "/reports/{}".format( report_type) reports = [] for datum in self._get_resource(url): datum["account_id"] = account_id reports.append(Report(data=datum)) return reports
Shows all reports of the passed report_type that have been run for the canvas account id. https://canvas.instructure.com/doc/api/account_reports.html#method.account_reports.index
387,502
def start(self): self.startTime = time.time() self.configure(text=.format(0)) self.update()
Starts the timer from zero
387,503
def finalized_canonical_averages_dtype(spanning_cluster=True): fields = list() fields.extend([ (, ), (, ), (, ), ]) if spanning_cluster: fields.extend([ (, ), (, ), (, ), ]) fields.extend([ (, ), (, ), (, ), (, ), (, ), (, ), ]) return _ndarray_dtype(fields)
The NumPy Structured Array type for finalized canonical averages over several runs Helper function Parameters ---------- spanning_cluster : bool, optional Whether to detect a spanning cluster or not. Defaults to ``True``. Returns ------- ret : list of pairs of str A list of tuples of field names and data types to be used as ``dtype`` argument in numpy ndarray constructors See Also -------- http://docs.scipy.org/doc/numpy/user/basics.rec.html canonical_averages_dtype
387,504
async def proxy_new(connection, flags, info, name, object_path, interface_name): future = Future() cancellable = None Gio.DBusProxy.new( connection, flags, info, name, object_path, interface_name, cancellable, gio_callback, future, ) result = await future value = Gio.DBusProxy.new_finish(result) if value is None: raise RuntimeError("Failed to connect DBus object!") return value
Asynchronously call the specified method on a DBus proxy object.
387,505
def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9): velocity = np.zeros(len(x)) for i in range(num_iters): g = grad(x, i) if callback: callback(x, i, g) velocity = mass * velocity - (1.0 - mass) * g x = x + step_size * velocity return x
Stochastic gradient descent with momentum. grad() must have signature grad(x, i), where i is the iteration number.
387,506
def positions_func(inputs, pad=0): position_inputs = kb.cumsum(kb.ones_like(inputs, dtype="float32"), axis=1) position_inputs *= kb.cast(kb.not_equal(inputs, pad), "float32") return kb.log(1.0 + position_inputs)
A layer filling i-th column of a 2D tensor with 1+ln(1+i) when it contains a meaningful symbol and with 0 when it contains PAD
387,507
def itertypes(iterable): seen = set() for entry in iterable: if isinstance(entry, tuple): for type_ in entry: if type_ not in seen: seen.add(type_) yield type_ else: if entry not in seen: seen.add(entry) yield entry
Iterates over an iterable containing either type objects or tuples of type objects and yields once for every type object found.
387,508
def atlas_peer_update_health( peer_hostport, received_response, peer_table=None ): with AtlasPeerTableLocked(peer_table) as ptbl: if peer_hostport not in ptbl.keys(): return False now = time_now() new_times = [] for (t, r) in ptbl[peer_hostport][]: if t + atlas_peer_lifetime_interval() < now: continue new_times.append((t, r)) new_times.append((now, received_response)) ptbl[peer_hostport][] = new_times return True
Mark the given peer as alive at this time. Update times at which we contacted it, and update its health score. Use the global health table by default, or use the given health info if set.
387,509
def analyze(self): precision = if self.kernel.datatype == else self.calculate_cache_access() self.results[] = self.conv_perf(self.machine[] * self.cores * \ self.machine[][precision][])
Run analysis.
387,510
def close(self): if not self.connected: return None if self.config is not None: if self.config.changed() and not self.config.committed(): try: self.config.discard() except pyPluribus.exceptions.ConfigurationDiscardError as discarderr: raise pyPluribus.exceptions.ConnectionError("Could not discard the configuration: \ {err}".format(err=discarderr)) self._connection.close() self.config = None self._connection = None self.connected = False
Closes the SSH connection if the connection is UP.
387,511
def md5sum(self): cmd = .format( dir=self.DESTDIR, bin=self.image) run = self.device.api.exec_opcmd try: got = run(cmd) return got.get().strip() except: return None
Check to see if the file exists on the device :return:
387,512
def coordinator(self): if self.coordinator_id is None: return None elif self._client.is_disconnected(self.coordinator_id): self.coordinator_dead() return None else: return self.coordinator_id
Get the current coordinator Returns: the current coordinator id or None if it is unknown
387,513
def _setup_transport(self): if HAVE_PY26_SSL: if hasattr(self, ): self.sslobj = ssl.wrap_socket(self.sock, **self.sslopts) else: self.sslobj = ssl.wrap_socket(self.sock) self.sslobj.do_handshake() else: self.sslobj = socket.ssl(self.sock)
Wrap the socket in an SSL object, either the new Python 2.6 version, or the older Python 2.5 and lower version.
387,514
def hash(self): hashed = super(RandomBits, self).hash() return khash(hashed, self._min_length, self._max_length, self._num_mutations, self._step, self._seed)
:rtype: int :return: hash of the field
387,515
def get_jobs_events_from_sequence(user, sequence): args = schemas.args(flask.request.args.to_dict()) if user.is_not_super_admin(): raise dci_exc.Unauthorized() query = sql.select([models.JOBS_EVENTS]). \ select_from(models.JOBS_EVENTS.join(models.JOBS, models.JOBS.c.id == models.JOBS_EVENTS.c.job_id)). \ where(_TABLE.c.id >= sequence) sort_list = v1_utils.sort_query(args[], _JOBS_EVENTS_COLUMNS, default=) query = v1_utils.add_sort_to_query(query, sort_list) if args.get(, None): query = query.limit(args.get()) if args.get(, None): query = query.offset(args.get()) rows = flask.g.db_conn.execute(query).fetchall() query_nb_rows = sql.select([func.count(models.JOBS_EVENTS.c.id)]) nb_rows = flask.g.db_conn.execute(query_nb_rows).scalar() return json.jsonify({: rows, : {: nb_rows}})
Get all the jobs events from a given sequence number.
387,516
def id_pools_ipv4_subnets(self): if not self.__id_pools_ipv4_subnets: self.__id_pools_ipv4_subnets = IdPoolsIpv4Subnets(self.__connection) return self.__id_pools_ipv4_subnets
Gets the IdPoolsIpv4Subnets API client. Returns: IdPoolsIpv4Subnets:
387,517
def create_user(user, name, create=None): if connexion.request.is_json: create = Create.from_dict(connexion.request.get_json()) response = errorIfUnauthorized(role=) if response: return response else: response = ApitaxResponse() driver: Driver = LoadedDrivers.getDriver(name) user: User = mapUserToUser(create.script) if driver.createApitaxUser(user): return Response(status=200, body=response.getResponseBody()) return ErrorResponse(status=500, message=)
Create a new script Create a new script # noqa: E501 :param user: Get user with this name :type user: str :param name: Get status of a driver with this name :type name: str :param create: The data needed to create this user :type create: dict | bytes :rtype: Response
387,518
def _grab_concretization_results(cls, state): if cls._should_add_constraints(state): addr = state.inspect.address_concretization_expr result = state.inspect.address_concretization_result if result is None: l.warning("addr concretization result is None") return state.preconstrainer.address_concretization.append((addr, result))
Grabs the concretized result so we can add the constraint ourselves.
387,519
def f_i18n_citation_type(string, lang="eng"): s = " ".join(string.strip("%").split("|")) return s.capitalize()
Take a string of form %citation_type|passage% and format it for human :param string: String of formation %citation_type|passage% :param lang: Language to translate to :return: Human Readable string .. note :: To Do : Use i18n tools and provide real i18n
387,520
async def get_friendly_name(self) -> Text: u = await self._get_user() f = u.get(, ).strip() l = u.get(, ).strip() return f or l
The friendly name is mapped to Facebook's first name. If the first name is missing, use the last name.
387,521
def _adjust_prt_flds(self, kws_xlsx, desc2nts, shade_hdrgos): if "prt_flds" in kws_xlsx: return kws_xlsx["prt_flds"] dont_print = set([, , ]) prt_flds_adjusted = [] nt_flds = self.sortobj.get_fields(desc2nts) for nt_fld in nt_flds: if nt_fld not in dont_print: if nt_fld == "format_txt": if shade_hdrgos is True: prt_flds_adjusted.append(nt_fld) else: prt_flds_adjusted.append(nt_fld) kws_xlsx[] = prt_flds_adjusted
Print user-requested fields or provided fields minus info fields.
387,522
def midi(self): result = int(round(12 * log2(self.frequency / 440) + 69)) if 0 <= result < 128: return result raise ValueError( % self.frequency)
Return the (nearest) MIDI note to the tone's frequency. This will be an integer number in the range 0 to 127. If the frequency is outside the range represented by MIDI notes (which is approximately 8Hz to 12.5KHz) :exc:`ValueError` exception will be raised.
387,523
def _send(self, javascript): message = % (id(self), javascript) response = self.connection.send(message) return self._handle_response(response)
Establishes a socket connection to the zombie.js server and sends Javascript instructions. :param js: the Javascript string to execute
387,524
def refresh(self): j = self.vera_request(id=, output_format=).json() devices = j.get() for device_data in devices: if device_data.get() == self.device_id: self.update(device_data)
Refresh the dev_info data used by get_value. Only needed if you're not using subscriptions.
387,525
def similarity_by_path(sense1: "wn.Synset", sense2: "wn.Synset", option: str = "path") -> float: if option.lower() in ["path", "path_similarity"]: return max(wn.path_similarity(sense1, sense2, if_none_return=0), wn.path_similarity(sense2, sense1, if_none_return=0)) elif option.lower() in ["wup", "wupa", "wu-palmer", "wu-palmer"]: return max(wn.wup_similarity(sense1, sense2, if_none_return=0), wn.wup_similarity(sense2, sense1, if_none_return=0)) elif option.lower() in [, "leacock-chordorow"]: if sense1.pos != sense2.pos: return 0 return wn.lch_similarity(sense1, sense2, if_none_return=0)
Returns maximum path similarity between two senses. :param sense1: A synset. :param sense2: A synset. :param option: String, one of ('path', 'wup', 'lch'). :return: A float, similarity measurement.
387,526
def markdown_changelog(version: str, changelog: dict, header: bool = False) -> str: debug(.format(version, header)) output = if header: output += .format(version) for section in CHANGELOG_SECTIONS: if not changelog[section]: continue output += .format(section.capitalize()) for item in changelog[section]: output += .format(item[1], item[0]) return output
Generates a markdown version of the changelog. Takes a parsed changelog dict from generate_changelog. :param version: A string with the version number. :param changelog: A dict from generate_changelog. :param header: A boolean that decides whether a header should be included or not. :return: The markdown formatted changelog.
387,527
def lastmod(self, tag): lastitems = EntryModel.objects.published().order_by().filter(tags=tag).only() return lastitems[0].modification_date
Return the last modification of the entry.
387,528
def MeshArrows(*inputobj, **options): s = options.pop("s", None) scale = options.pop("scale", 1) c = options.pop("c", "gray") alpha = options.pop("alpha", 1) res = options.pop("res", 12) mesh, u = _inputsort(inputobj) startPoints = mesh.coordinates() u_values = np.array([u(p) for p in mesh.coordinates()]) if not utils.isSequence(u_values[0]): printc("~times Error: cannot show Arrows for 1D scalar values!", c=1) exit() endPoints = mesh.coordinates() + u_values if u_values.shape[1] == 2: u_values = np.insert(u_values, 2, 0, axis=1) startPoints = np.insert(startPoints, 2, 0, axis=1) endPoints = np.insert(endPoints, 2, 0, axis=1) actor = shapes.Arrows( startPoints, endPoints, s=s, scale=scale, c=c, alpha=alpha, res=res ) actor.mesh = mesh actor.u = u actor.u_values = u_values return actor
Build arrows representing displacements. :param float s: cross-section size of the arrow :param float rescale: apply a rescaling factor to the length
387,529
def start_capture(self, adapter_number, output_file): try: adapter = self._ethernet_adapters[adapter_number] except IndexError: raise QemuError(.format(name=self._name, adapter_number=adapter_number)) nio = adapter.get_nio(0) if not nio: raise QemuError("Adapter {} is not connected".format(adapter_number)) if nio.capturing: raise QemuError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number)) nio.startPacketCapture(output_file) if self.ubridge: yield from self._ubridge_send(.format(name="QEMU-{}-{}".format(self._id, adapter_number), output_file=output_file)) log.info("QEMU VM [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name, id=self.id, adapter_number=adapter_number))
Starts a packet capture. :param adapter_number: adapter number :param output_file: PCAP destination file for the capture
387,530
def snapshotToMovie(snap,filename,*args,**kwargs): if kwargs.has_key(): tmpdir= kwargs[] kwargs.pop() else: tmpdir= if kwargs.has_key(): framerate= kwargs[] kwargs.pop() else: framerate= 25 if kwargs.has_key(): bitrate= kwargs[] kwargs.pop() else: bitrate= 1000 if kwargs.has_key() and kwargs[]: thumbnail= True kwargs.pop() elif kwargs.has_key(): kwargs.pop() thumbnail= False else: thumbnail= False if kwargs.has_key(): thumbsize= kwargs[] else: thumbsize= 300 tempdir= tempfile.mkdtemp(dir=tmpdir) tmpfiles= [] nsnap= len(snap) file_length= int(m.ceil(m.log10(nsnap))) if not kwargs.has_key(): pass if not kwargs.has_key(): pass for ii in range(nsnap): tmpfiles.append(os.path.join(tempdir, str(ii).zfill(file_length))) bovy_plot.bovy_print() snap[ii].plot(*args,**kwargs) bovy_plot.bovy_end_print(tmpfiles[ii]+) try: subprocess.check_call([, tmpfiles[ii]+, tmpfiles[ii]+]) except subprocess.CalledProcessError: print(" failed") raise subprocess.CalledProcessError try: subprocess.check_call([, ,str(framerate), , str(bitrate), , os.path.join(tempdir, + % file_length), , filename]) if thumbnail: thumbnameTemp= re.split(r,filename) thumbnameTemp= thumbnameTemp[0:len(thumbnameTemp)-1] thumbname= for t in thumbnameTemp: thumbname+= t thumbname+= subprocess.check_call([, ,,, ,filename, , , ,, , , , , % (thumbsize,thumbsize), thumbname]) except subprocess.CalledProcessError: print(" failed") _cleanupMovieTempdir(tempdir) raise subprocess.CalledProcessError finally: _cleanupMovieTempdir(tempdir)
NAME: snapshotToMovie PURPOSE: turn a list of snapshots into a movie INPUT: snap - the snapshots (list) filename - name of the file to save the movie to framerate= in fps bitrate= ? thumbnail=False : create thumbnail image (filename-extension+.jpg) thumbsize= size of thumbnail +Snapshot.plot args and kwargs OUTPUT: movie is saved to file DEPENDENCIES: this procedure uses ffmpeg and convert BUGS: matplotlib's 'Agg' backend has a memory leak that prevents it from creating hundred's of figures. It is recommended to call import matplotlib matplotlib.use('PDF') at the beginning of the movie creating script as the PDF backend does not have the same memory leak. HISTORY: 2011-02-06 - Written - Bovy (NYU)
387,531
def cp(source, bucket, checksum, key_prefix): from .models import Bucket from .helpers import populate_from_path for object_version in populate_from_path( Bucket.get(bucket), source, checksum=checksum, key_prefix=key_prefix): click.secho(str(object_version)) db.session.commit()
Create new bucket from all files in directory.
387,532
def parse_release_id(release_id): if "@" in release_id: release, base_product = release_id.split("@") else: release = release_id base_product = None result = _parse_release_id_part(release) if base_product is not None: result.update(_parse_release_id_part(base_product, prefix="bp_")) return result
Parse release_id to parts: {short, version, type} or {short, version, type, bp_short, bp_version, bp_type} :param release_id: Release ID string :type release_id: str :rtype: dict
387,533
def member(Imported, **Config): r __ec_member__ = Imported.__ec_member__ __ec_member__.Config.update(**Config) state.ActiveModuleMemberQ.insert(0, __ec_member__)
r"""Helps with adding imported members to Scripts. Note: Config depends upon the Imported. It could be that of a **task** or a **group**.
387,534
def dumps(data, escape=False, **kwargs): if not in kwargs: kwargs[] = True converted = json.dumps(data, default=_converter, **kwargs) if escape: return cgi.escape(converted) return converted
A wrapper around `json.dumps` that can handle objects that json module is not aware. This function is aware of a list of custom serializers that can be registered by the API user, making it possible to convert any kind of object to types that the json library can handle.
387,535
def keys(self, name_start, name_end, limit=10): limit = get_positive_integer(, limit) return self.execute_command(, name_start, name_end, limit)
Return a list of the top ``limit`` keys between ``name_start`` and ``name_end`` Similiar with **Redis.KEYS** .. note:: The range is (``name_start``, ``name_end``]. ``name_start`` isn't in the range, but ``name_end`` is. :param string name_start: The lower bound(not included) of keys to be returned, empty string ``''`` means -inf :param string name_end: The upper bound(included) of keys to be returned, empty string ``''`` means +inf :param int limit: number of elements will be returned. :return: a list of keys :rtype: list >>> ssdb.keys('set_x1', 'set_x3', 10) ['set_x2', 'set_x3'] >>> ssdb.keys('set_x ', 'set_xx', 3) ['set_x1', 'set_x2', 'set_x3'] >>> ssdb.keys('set_x ', '', 3) ['set_x1', 'set_x2', 'set_x3', 'set_x4'] >>> ssdb.keys('set_zzzzz ', '', ) []
387,536
def decrypt(self, key, data, mode, padding): if hasattr(key, "public_bytes"): raise NotImplementedError() try: return key.decrypt(data, padding.build()) except Exception: error_message = "Decryption failed" _LOGGER.exception(error_message) raise DecryptionError(error_message)
Decrypt data using the supplied values. :param bytes key: Loaded decryption key :param bytes data: IV prepended to encrypted data :param JavaMode mode: Decryption mode to use (not used by :class:`JavaAsymmetricEncryptionAlgorithm`) :param JavaPadding padding: Padding mode to use :returns: Decrypted data :rtype: bytes
387,537
def _choose_random_direction(current_state_parts, batch_rank, seed=None): seed_gen = distributions.SeedStream(seed, salt=) rnd_direction_parts = [ tf.random.normal( tf.shape(input=current_state_part), dtype=tf.float32, seed=seed_gen()) for current_state_part in current_state_parts ] sum_squares = sum( tf.reduce_sum( input_tensor=rnd_direction**2., axis=tf.range(batch_rank, tf.rank(rnd_direction)), keepdims=True) for rnd_direction in rnd_direction_parts) rnd_direction_parts = [rnd_direction / tf.sqrt(sum_squares) for rnd_direction in rnd_direction_parts] return rnd_direction_parts
Chooses a random direction in the event space.
387,538
def data(self): return self.model.state.entity_data( self.entity_type, self.entity_id, self._history_index)
The data dictionary for this entity.
387,539
def add_metric(self, labels, value, created=None, timestamp=None): self.samples.append(Sample(self.name + , dict(zip(self._labelnames, labels)), value, timestamp)) if created is not None: self.samples.append(Sample(self.name + , dict(zip(self._labelnames, labels)), created, timestamp))
Add a metric to the metric family. Args: labels: A list of label values value: The value of the metric created: Optional unix timestamp the child was created at.
387,540
def silence(cls, *modules, **kwargs): level = kwargs.pop("level", logging.WARNING) for mod in modules: name = mod.__name__ if hasattr(mod, "__name__") else mod logging.getLogger(name).setLevel(level)
Args: *modules: Modules, or names of modules to silence (by setting their log level to WARNING or above) **kwargs: Pass as kwargs due to python 2.7, would be level=logging.WARNING otherwise
387,541
def get_state_in_ec_string(self, ec_index, add_colour=True): with self._mutex: if ec_index >= len(self.owned_ecs): ec_index -= len(self.owned_ecs) if ec_index >= len(self.participating_ecs): raise exceptions.BadECIndexError(ec_index) state = self.participating_ec_states[ec_index] else: state = self.owned_ec_states[ec_index] if state == self.INACTIVE: result = , [, ] elif state == self.ACTIVE: result = , [, ] elif state == self.ERROR: result = , [, , ] elif state == self.UNKNOWN: result = , [, ] elif state == self.CREATED: result = , [] if add_colour: return utils.build_attr_string(result[1], supported=add_colour) + \ result[0] + utils.build_attr_string(, supported=add_colour) else: return result[0]
Get the state of the component in an execution context as a string. @param ec_index The index of the execution context to check the state in. This index is into the total array of contexts, that is both owned and participating contexts. If the value of ec_index is greater than the length of @ref owned_ecs, that length is subtracted from ec_index and the result used as an index into @ref participating_ecs.
387,542
def purge_docs(cls, app, env, docname): state = getattr(env, cls.directive_name, None) if state and docname in state.doc_names: state.doc_names.remove(docname)
Handler for Sphinx's env-purge-doc event. This event is emitted when all traces of a source file should be cleaned from the environment (that is, if the source file is removed, or before it is freshly read). This is for extensions that keep their own caches in attributes of the environment. For example, there is a cache of all modules on the environment. When a source file has been changed, the cache's entries for the file are cleared, since the module declarations could have been removed from the file.
387,543
def remove_tags(self, server, tags): uuid = str(server) tags = [str(tag) for tag in tags] url = .format(uuid, .join(tags)) return self.post_request(url)
Remove tags from a server. - server: Server object or UUID string - tags: list of Tag objects or strings
387,544
def add_template_filter(self, func: Callable, name: Optional[str]=None) -> None: self.jinja_env.filters[name or func.__name__] = func
Add a template filter. This is designed to be used on the application directly. An example usage, .. code-block:: python def to_upper(value): return value.upper() app.add_template_filter(to_upper) Arguments: func: The function that is the filter. name: The filter name (defaults to function name).
387,545
def update(self, date, data=None, inow=None): self.root.stale = False newpt = False if self.now == 0: newpt = True elif date != self.now: self._net_flows = 0 self._last_price = self._price self._last_value = self._value self._last_fee = 0.0 newpt = True self.now = date if inow is None: if self.now == 0: inow = 0 else: inow = self.data.index.get_loc(date) val = self._capital if self.children is not None: for c in self._childrenv: if c._issec and not c._needupdate: continue c.update(date, data, inow) val += c.value if self.root == self: if (val < 0) and not self.bankrupt: self.bankrupt = True self.flatten() self._last_value, self._net_flows, self._value)) self._price = self._last_price * (1 + ret) self._prices.values[inow] = self._price if self.children is not None: for c in self._childrenv: if c._issec and not c._needupdate: continue if val != 0: c._weight = c.value / val else: c._weight = 0.0 if self._has_strat_children: for c in self._strat_children: self._universe.loc[date, c] = self.children[c].price self._cash.values[inow] = self._capital self._fees.values[inow] = self._last_fee if newpt and self._paper_trade: self._paper.update(date) self._paper.run() self._paper.update(date) self._price = self._paper.price self._prices.values[inow] = self._price
Update strategy. Updates prices, values, weight, etc.
387,546
def _load_plt(self, filename): g = gOpenMol.Plt() g.read(filename) grid, edges = g.histogramdd() self.__init__(grid=grid, edges=edges, metadata=self.metadata)
Initialize Grid from gOpenMol plt file.
387,547
def push(self, remote, branch=None): pb = ProgressBar() pb.setup(self.name, ProgressBar.Action.PUSH) if branch: result = remote.push(branch, progress=pb) else: result = remote.push(progress=pb) print() return result, pb.other_lines
Push a repository :param remote: git-remote instance :param branch: name of the branch to push :return: PushInfo, git push output lines
387,548
def scroll_one_line_up(event): w = find_window_for_buffer_name(event.cli, event.cli.current_buffer_name) b = event.cli.current_buffer if w: if w.render_info: info = w.render_info if w.vertical_scroll > 0: first_line_height = info.get_height_for_line(info.first_visible_line()) cursor_up = info.cursor_position.y - (info.window_height - 1 - first_line_height - info.configured_scroll_offsets.bottom) for _ in range(max(0, cursor_up)): b.cursor_position += b.document.get_cursor_up_position() w.vertical_scroll -= 1
scroll_offset -= 1
387,549
def _iter_interleaved_items(self, elements): subtotals = self._subtotals for subtotal in subtotals.iter_for_anchor("top"): yield subtotal for element in elements: yield element for subtotal in subtotals.iter_for_anchor(element.element_id): yield subtotal for subtotal in subtotals.iter_for_anchor("bottom"): yield subtotal
Generate element or subtotal items in interleaved order. This ordering corresponds to how value "rows" (or columns) are to appear after subtotals have been inserted at their anchor locations. Where more than one subtotal is anchored to the same location, they appear in their document order in the cube response. Only elements in the passed *elements* collection appear, which allows control over whether missing elements are included by choosing `.all_elements` or `.valid_elements`.
387,550
def parse_component_reference(self, node): if in node.lattrib: name = node.lattrib[] else: self.raise_error( + ) if in node.lattrib: type_ = node.lattrib[] else: self.raise_error( + ) if in node.lattrib: local = node.lattrib[] else: local = None self.current_component_type.add_component_reference(ComponentReference(name, type_, local))
Parses <ComponentReference> @param node: Node containing the <ComponentTypeRef> element @type node: xml.etree.Element
387,551
def qos_map_cos_mutation_cos5(self, **kwargs): config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") cos_mutation = ET.SubElement(map, "cos-mutation") name_key = ET.SubElement(cos_mutation, "name") name_key.text = kwargs.pop() cos5 = ET.SubElement(cos_mutation, "cos5") cos5.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
387,552
def findunique(lst, key): return sorted(set([item[key.lower()] for item in lst]))
Find all unique key values for items in lst. Parameters ---------- lst: list A list of composite dictionaries e.g. ``layers``, ``classes`` key: string The key name to search each dictionary in the list Returns ------- list A sorted Python list of unique keys in the list Example ------- To find all ``GROUP`` values for ``CLASS`` in a ``LAYER``:: s = ''' LAYER CLASS GROUP "group1" NAME "Class1" COLOR 0 0 0 END CLASS GROUP "group2" NAME "Class2" COLOR 0 0 0 END CLASS GROUP "group1" NAME "Class3" COLOR 0 0 0 END END ''' d = mappyfile.loads(s) groups = mappyfile.findunique(d["classes"], "group") assert groups == ["group1", "group2"]
387,553
def force_clean(self, remove_rw=False, allow_lazy=False, retries=5, sleep_interval=0.5): while True: try: self.clean(remove_rw=remove_rw, allow_lazy=allow_lazy) except ImageMounterError: if retries == 0: raise retries -= 1 time.sleep(sleep_interval) else: return
Attempts to call the clean method, but will retry automatically if an error is raised. When the attempts run out, it will raise the last error. Note that the method will only catch :class:`ImageMounterError` exceptions. :param bool remove_rw: indicates whether a read-write cache should be removed :param bool allow_lazy: indicates whether lazy unmounting is allowed :param retries: Maximum amount of retries while unmounting :param sleep_interval: The sleep interval between attempts. :raises SubsystemError: when one of the underlying commands fails. Some are swallowed. :raises CleanupError: when actual cleanup fails. Some are swallowed.
387,554
def generate_iv_for_export(self, client_random, server_random, con_end, read_or_write, req_len): s = con_end + read_or_write s = (s == "clientwrite" or s == "serverread") if self.tls_version < 0x0300: return None elif self.tls_version == 0x0300: if s: tbh = client_random + server_random else: tbh = server_random + client_random iv = _tls_hash_algs["MD5"]().digest(tbh)[:req_len] else: iv_block = self.prf("", b"IV block", client_random + server_random, 2 * req_len) if s: iv = iv_block[:req_len] else: iv = iv_block[req_len:] return iv
Generate IV for EXPORT ciphersuite, i.e. weakens it. An export IV generation example is given in section 6.3.1 of RFC 2246. See also page 86 of EKR's book.
387,555
def run(self): config = self.state.document.settings.env.config processes = get_processes(config.autoprocess_process_dir, config.autoprocess_source_base_url) processes.sort(key=itemgetter()) processes_by_types = {k: list(g) for k, g in groupby(processes, itemgetter())} listnode = nodes.bullet_list() for typ in sorted(processes_by_types.keys()): par = nodes.paragraph() par += nodes.literal(typ, typ) par += nodes.Text() processes = sorted(processes_by_types[typ], key=itemgetter()) last_process = processes[-1] for process in processes: node = nodes.reference(, process[], internal=True) node[] = config.autoprocess_definitions_uri + + process[] node[] = process[] par += node if process != last_process: par += nodes.Text() listnode += nodes.list_item(, par) return [listnode]
Create a type list.
387,556
def _debug_dump_dom(el): import xml.dom.minidom s = [el.nodeName] att_container = el.attributes for i in range(att_container.length): attr = att_container.item(i) s.append(.format(a=attr.name, v=attr.value)) for c in el.childNodes: if c.nodeType == xml.dom.minidom.Node.TEXT_NODE: s.append(.format(a=c.nodeName, d=c.data)) else: s.append(.format(a=c.nodeName)) return .join(s)
Debugging helper. Prints out `el` contents.
387,557
def analyze(self, text, tokenizer=str.split): if not self.tagger: self.tagger = Tagger(self.model, preprocessor=self.p, tokenizer=tokenizer) return self.tagger.analyze(text)
Analyze text and return pretty format. Args: text: string, the input text. tokenizer: Tokenize input sentence. Default tokenizer is `str.split`. Returns: res: dict.
387,558
def projective_measurement_constraints(*parties): substitutions = {} if isinstance(parties[0][0][0], list): parties = parties[0] for party in parties: for measurement in party: for projector1 in measurement: for projector2 in measurement: if projector1 == projector2: substitutions[projector1**2] = projector1 else: substitutions[projector1*projector2] = 0 substitutions[projector2*projector1] = 0 for n1 in range(len(parties)): for n2 in range(n1+1, len(parties)): for measurement1 in parties[n1]: for measurement2 in parties[n2]: for projector1 in measurement1: for projector2 in measurement2: substitutions[projector2*projector1] = \ projector1*projector2 return substitutions
Return a set of constraints that define projective measurements. :param parties: Measurements of different parties. :type A: list or tuple of list of list of :class:`sympy.physics.quantum.operator.HermitianOperator`. :returns: substitutions containing idempotency, orthogonality and commutation relations.
387,559
def correction(self, word): return max(self.candidates(word), key=self.word_probability)
The most probable correct spelling for the word Args: word (str): The word to correct Returns: str: The most likely candidate
387,560
def Read(self, timeout=None): if not self.Shown: self.Shown = True self.TrayIcon.show() if timeout is None: self.App.exec_() elif timeout == 0: self.App.processEvents() else: self.timer = start_systray_read_timer(self, timeout) self.App.exec_() if self.timer: stop_timer(self.timer) item = self.MenuItemChosen self.MenuItemChosen = TIMEOUT_KEY return item
Reads the context menu :param timeout: Optional. Any value other than None indicates a non-blocking read :return:
387,561
def system_info(query): proc = subprocess.Popen(["uname -o"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "operating system : "+str(out), proc = subprocess.Popen(["uname"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "kernel : "+str(out), proc = subprocess.Popen(["uname -r"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "kernel release : "+str(out), proc = subprocess.Popen(["uname -m"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "architecture : "+str(out), proc = subprocess.Popen(["uname -n"], stdout=subprocess.PIPE, shell=True) (out, err) = proc.communicate() print "network node name : "+str(out),
system_info(query) -- print system specific information like OS, kernel, architecture etc.
387,562
def put(self, key, value, cache=None, options={}): if cache is None: cache = self.name if cache is None: raise ValueError("Cache name must be set") if not isinstance(value, str_type) and not isinstance(value, int_types): value = json.dumps(value) options["value"] = value body = json.dumps(options) cache = quote_plus(cache) key = quote_plus(key) result = self.client.put("caches/%s/items/%s" % (cache, key), body, {"Content-Type": "application/json"}) return Item(cache=cache, key=key, value=value)
Query the server to set the key specified to the value specified in the specified cache. Keyword arguments: key -- the name of the key to be set. Required. value -- the value to set key to. Must be a string or JSON serialisable. Required. cache -- the cache to store the item in. Defaults to None, which uses self.name. If no name is set, raises a ValueError. options -- a dict of arguments to send with the request. See http://dev.iron.io/cache/reference/api/#put_item for more information on defaults and possible values.
387,563
def analyze_theory(V, x0list=[], plot=False): T = 4. ndT = 8. neval = 3e5 nitn = 6 alpha = 0.1 integrand = PathIntegrand(V=V, T=T, ndT=ndT) integ = vegas.Integrator(integrand.region, alpha=alpha) integ(integrand, neval=neval, nitn=nitn / 2, alpha=2 * alpha) integrand = PathIntegrand(V=V, x0list=x0list, T=T, ndT=ndT) results = integ(integrand, neval=neval, nitn=nitn, alpha=alpha) print(results.summary()) E0 = -np.log(results[]) / T print( % (E0, results.Q)) if len(x0list) <= 0: return E0 psi2 = results[] / results[] print( % (, , )) print(27 * ) for i, (x0i, psi2i) in enumerate(zip(x0list, psi2)): exact = np.exp(- x0i ** 2) / np.sqrt(np.pi) print( "%5.1f %-12s %-10.5f" % (x0i, psi2i, exact) ) if plot: plot_results(E0, x0list, psi2, T) return E0
Extract ground-state energy E0 and psi**2 for potential V.
387,564
def makeLinearxFunc(self,mLvl,pLvl,MedShk,xLvl): pCount = mLvl.shape[1] MedCount = mLvl.shape[0] xFunc_by_pLvl_and_MedShk = [] for i in range(pCount): temp_list = [] pLvl_i = pLvl[0,i,0] mLvlMin_i = self.BoroCnstNat(pLvl_i) for j in range(MedCount): m_temp = mLvl[j,i,:] - mLvlMin_i x_temp = xLvl[j,i,:] temp_list.append(LinearInterp(m_temp,x_temp)) xFunc_by_pLvl_and_MedShk.append(deepcopy(temp_list)) pLvl_temp = pLvl[0,:,0] MedShk_temp = MedShk[:,0,0] xFuncUncBase = BilinearInterpOnInterp1D(xFunc_by_pLvl_and_MedShk,pLvl_temp,MedShk_temp) xFuncUnc = VariableLowerBoundFunc3D(xFuncUncBase,self.BoroCnstNat) return xFuncUnc
Constructs the (unconstrained) expenditure function for this period using bilinear interpolation (over permanent income and the medical shock) among an array of linear interpolations over market resources. Parameters ---------- mLvl : np.array Corresponding market resource points for interpolation. pLvl : np.array Corresponding permanent income level points for interpolation. MedShk : np.array Corresponding medical need shocks for interpolation. xLvl : np.array Expenditure points for interpolation, corresponding to those in mLvl, pLvl, and MedShk. Returns ------- xFuncUnc : BilinearInterpOnInterp1D Unconstrained total expenditure function for this period.
387,565
def split_by_percent(self, spin_systems_list): chunk_sizes = [int((i*len(spin_systems_list))/100) for i in self.plsplit] if sum(chunk_sizes) < len(spin_systems_list): difference = len(spin_systems_list) - sum(chunk_sizes) chunk_sizes[chunk_sizes.index(min(chunk_sizes))] += difference assert sum(chunk_sizes) == len(spin_systems_list), \ "sum of chunk sizes must be equal to spin systems list length." intervals = self.calculate_intervals(chunk_sizes) chunks_of_spin_systems_by_percentage = [itertools.islice(spin_systems_list, *interval) for interval in intervals] return chunks_of_spin_systems_by_percentage
Split list of spin systems by specified percentages. :param list spin_systems_list: List of spin systems. :return: List of spin systems divided into sub-lists corresponding to specified split percentages. :rtype: :py:class:`list`
387,566
def forward_committor(T, A, B): r X = set(range(T.shape[0])) A = set(A) B = set(B) AB = A.intersection(B) notAB = X.difference(A).difference(B) if len(AB) > 0: raise ValueError("Sets A and B have to be disjoint") L = T - eye(T.shape[0], T.shape[0]) W = 1.0 * L W = W.todok() W[list(A), :] = 0.0 W.tocsr() W = W + coo_matrix((np.ones(len(A)), (list(A), list(A))), shape=W.shape).tocsr() W = W.todok() W[list(B), :] = 0.0 W.tocsr() W = W + coo_matrix((np.ones(len(B)), (list(B), list(B))), shape=W.shape).tocsr() r = np.zeros(T.shape[0]) r[list(B)] = 1.0 u = spsolve(W, r) return u
r"""Forward committor between given sets. The forward committor u(x) between sets A and B is the probability for the chain starting in x to reach B before reaching A. Parameters ---------- T : (M, M) scipy.sparse matrix Transition matrix A : array_like List of integer state labels for set A B : array_like List of integer state labels for set B Returns ------- u : (M, ) ndarray Vector of forward committor probabilities Notes ----- The forward committor is a solution to the following boundary-value problem .. math:: \sum_j L_{ij} u_{j}=0 for i in X\(A u B) (I) u_{i}=0 for i \in A (II) u_{i}=1 for i \in B (III) with generator matrix L=(P-I).
387,567
def load_from_file(self, yamlfile, _override=True, _allow_undeclared=False): self._logger.info(, yamlfile) try: parsed_yaml = self._modules[].safe_load(yamlfile.read()) except self._modules[].YAMLError: self._logger.exception() raise self.ConfigurationInvalidError( % yamlfile) if not isinstance(parsed_yaml, dict): self.load_from_dict( parsed_yaml, _override=_override, _allow_undeclared=_allow_undeclared)
Loads the configuration from a file. Parsed contents must be a single dict mapping config key to value. Args: yamlfile: The opened file object to load configuration from. See load_from_dict() for other args' descriptions. Raises: ConfigurationInvalidError: If configuration file can't be read, or can't be parsed as either YAML (or JSON, which is a subset of YAML).
387,568
def predict_proba(self, X): if self.n_features_ != X.shape[1]: raise ValueError("Number of features of the model must " "match the input. Model n_features is {0} and " "input n_features is {1}." "".format(self.n_features_, X.shape[1])) n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators, self.n_jobs) all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)( delayed(_parallel_predict_proba)( self.estimators_[starts[i]:starts[i + 1]], self.estimators_features_[starts[i]:starts[i + 1]], X, self.n_classes_, self.combination, self.estimators_weight_[starts[i]:starts[i + 1]]) for i in range(n_jobs)) if self.combination in [, ]: proba = sum(all_proba) / self.n_estimators elif self.combination in [, ]: proba = sum(all_proba) elif self.combination in [, , , ]: X_stacking = _create_stacking_set(self.estimators_, self.estimators_features_, self.estimators_weight_, X, self.combination) proba = self.f_staking.predict_proba(X_stacking) return proba
Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the mean predicted class probabilities of the base estimators in the ensemble. If base estimators do not implement a ``predict_proba`` method, then it resorts to voting and the predicted class probabilities of a an input sample represents the proportion of estimators predicting each class. Parameters ---------- X : {array-like, sparse matrix} of shape = [n_samples, n_features] The training input samples. Sparse matrices are accepted only if they are supported by the base estimator. Returns ------- p : array of shape = [n_samples, n_classes] The class probabilities of the input samples. The order of the classes corresponds to that in the attribute `classes_`.
387,569
def generateMethods(self): for i in range(1, 5): self.make_grid_slot(i, i) for cl in self.mvision_classes: self.make_mvision_slot(cl)
Generate some member functions
387,570
def _generate_signature(self, nonce, method, path, data): data_json = "" endpoint = path if method == "get": if data: query_string = self._get_params_for_sig(data) endpoint = "{}?{}".format(path, query_string) elif data: data_json = compact_json_dict(data) sig_str = ("{}{}{}{}".format(nonce, method.upper(), endpoint, data_json)).encode() m = hmac.new(self.API_SECRET.encode(), sig_str, hashlib.sha256) return base64.b64encode(m.digest())
Generate the call signature :param path: :param data: :param nonce: :return: signature string
387,571
def map_collection(func, collection): datatype = type(collection) if isinstance(collection, Mapping): return datatype((key, func(val)) for key, val in collection.items()) if is_string(collection): return collection elif isinstance(collection, Iterable): return datatype(map(func, collection)) else: return collection
Apply func to each element of a collection, or value of a dictionary. If the value is not a collection, return it unmodified
387,572
def getfigsize(self, opt): if opt.xmin is None: opt.xmin = self.plotman.grid.grid[].min() if opt.xmax is None: opt.xmax = self.plotman.grid.grid[].max() if opt.zmin is None: opt.zmin = self.plotman.grid.grid[].min() if opt.zmax is None: opt.zmax = self.plotman.grid.grid[].max() if np.abs(opt.zmax - opt.zmin) < np.abs(opt.xmax - opt.xmin): self.sizex = 2 / 2.54 self.sizez = self.sizex * ( np.abs(opt.zmax - opt.zmin) / np.abs(opt.xmax - opt.xmin)) else: self.sizez = 2 / 2.54 self.sizex = 0.5 * self.sizez * ( np.abs(opt.xmax - opt.xmin) / np.abs(opt.zmax - opt.zmin)) print() self.sizex += 4 * .5 self.sizex *= 4 self.sizez *= self.rows self.sizez += 5
calculate appropriate sizes for the subfigures
387,573
def __purge(): global __receivers newreceivers = collections.defaultdict(list) for signal, receivers in six.iteritems(__receivers): alive = [x for x in receivers if not __is_dead(x)] newreceivers[signal] = alive __receivers = newreceivers
Remove all dead signal receivers from the global receivers collection. Note: It is assumed that the caller holds the __lock.
387,574
def _fillVolumesAndPaths(self, paths): self.diffs = collections.defaultdict((lambda: [])) self.extraKeys = {} for key in self.bucket.list(): if key.name.startswith(theTrashPrefix): continue keyInfo = self._parseKeyName(key.name) if keyInfo is None: if key.name[-1:] != : logger.warning("Ignoring in S3", key.name) continue if keyInfo[] == : stream = io.BytesIO() key.get_contents_to_file(stream) Store.Volume.readInfo(stream) continue if keyInfo[] == : keyInfo[] = None path = self._relativePath("/" + keyInfo[]) if path is None: continue diff = Store.Diff(self, keyInfo[], keyInfo[], key.size) logger.debug("Adding %s in %s", diff, path) self.diffs[diff.fromVol].append(diff) paths[diff.toVol].append(path) self.extraKeys[diff] = path
Fill in paths. :arg paths: = { Store.Volume: ["linux path",]}
387,575
def fcoe_get_interface_output_fcoe_intf_total_interfaces(self, **kwargs): config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_total_interfaces = ET.SubElement(output, "fcoe-intf-total-interfaces") fcoe_intf_total_interfaces.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
387,576
def grouper(n, iterable, padvalue=None): "grouper(3, , ) --> (,,), (,,), (,,)" return zip_longest(*[iter(iterable)]*n, fillvalue=padvalue)
grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')
387,577
def dependence_plot(ind, shap_values, features, feature_names=None, display_features=None, interaction_index="auto", color=" dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, show=True): if str(type(features)).endswith(">"): if feature_names is None: feature_names = features.columns features = features.values if str(type(display_features)).endswith(">"): if feature_names is None: feature_names = display_features.columns display_features = display_features.values elif display_features is None: display_features = features if feature_names is None: feature_names = [labels[] % str(i) for i in range(shap_values.shape[1])] if len(shap_values.shape) == 1: shap_values = np.reshape(shap_values, len(shap_values), 1) if len(features.shape) == 1: features = np.reshape(features, len(features), 1) ind = convert_name(ind, shap_values, feature_names) if len(shap_values.shape) == 3 and len(ind) == 2: ind1 = convert_name(ind[0], shap_values, feature_names) ind2 = convert_name(ind[1], shap_values, feature_names) if ind1 == ind2: proj_shap_values = shap_values[:, ind2, :] else: proj_shap_values = shap_values[:, ind2, :] * 2 dependence_plot( ind1, proj_shap_values, features, feature_names=feature_names, interaction_index=ind2, display_features=display_features, show=False, xmin=xmin, xmax=xmax ) if ind1 == ind2: pl.ylabel(labels[] % feature_names[ind1]) else: pl.ylabel(labels[] % (feature_names[ind1], feature_names[ind2])) if show: pl.show() return assert shap_values.shape[0] == features.shape[0], \ " and values must have the same number of rows!" assert shap_values.shape[1] == features.shape[1], \ " must have the same number of columns as !" oinds = np.arange(shap_values.shape[0]) np.random.shuffle(oinds) xv = features[oinds, ind].astype(np.float64) xd = display_features[oinds, ind] s = shap_values[oinds, ind] if type(xd[0]) == str: name_map = {} for i in range(len(xv)): name_map[xd[i]] = xv[i] xnames = list(name_map.keys()) if type(feature_names) == str: feature_names = [feature_names] name = feature_names[ind] if interaction_index == "auto": interaction_index = approximate_interactions(ind, shap_values, features)[0] interaction_index = convert_name(interaction_index, shap_values, feature_names) categorical_interaction = False color_norm = None if interaction_index is not None: cv = features[:, interaction_index] cd = display_features[:, interaction_index] clow = np.nanpercentile(cv.astype(np.float), 5) chigh = np.nanpercentile(cv.astype(np.float), 95) if type(cd[0]) == str: cname_map = {} for i in range(len(cv)): cname_map[cd[i]] = cv[i] cnames = list(cname_map.keys()) categorical_interaction = True elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10: categorical_interaction = True if categorical_interaction and clow != chigh: clow = np.nanmin(cv.astype(np.float)) chigh = np.nanmax(cv.astype(np.float)) bounds = np.linspace(clow, chigh, int(chigh - clow + 2)) color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1) if x_jitter > 0: if x_jitter > 1: x_jitter = 1 xvals = xv.copy() if isinstance(xvals[0], float): xvals = xvals.astype(np.float) xvals = xvals[~np.isnan(xvals)] xvals = np.unique(xvals) if len(xvals) >= 2: smallest_diff = np.min(np.diff(np.sort(xvals))) jitter_amount = x_jitter * smallest_diff xv += (np.random.ranf(size = len(xv))*jitter_amount) - (jitter_amount/2) xv_nan = np.isnan(xv) xv_notnan = np.invert(xv_nan) if interaction_index is not None: cvals = features[oinds, interaction_index].astype(np.float64) cvals_imp = cvals.copy() cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0 cvals[cvals_imp > chigh] = chigh cvals[cvals_imp < clow] = clow p = pl.scatter( xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan], cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh, norm=color_norm, rasterized=len(xv) > 500 ) p.set_array(cvals[xv_notnan]) else: pl.scatter(xv, s, s=dot_size, linewidth=0, color=color, alpha=alpha, rasterized=len(xv) > 500) if interaction_index != ind and interaction_index is not None: if type(cd[0]) == str: tick_positions = [cname_map[n] for n in cnames] if len(tick_positions) == 2: tick_positions[0] -= 0.25 tick_positions[1] += 0.25 cb = pl.colorbar(ticks=tick_positions) cb.set_ticklabels(cnames) else: cb = pl.colorbar() cb.set_label(feature_names[interaction_index], size=13) cb.ax.tick_params(labelsize=11) if categorical_interaction: cb.ax.tick_params(length=0) cb.set_alpha(1) cb.outline.set_visible(False) bbox = cb.ax.get_window_extent().transformed(pl.gcf().dpi_scale_trans.inverted()) cb.ax.set_aspect((bbox.height - 0.7) * 20) if xmin is not None or xmax is not None: if type(xmin) == str and xmin.startswith("percentile"): xmin = np.nanpercentile(xv, float(xmin[11:-1])) if type(xmax) == str and xmax.startswith("percentile"): xmax = np.nanpercentile(xv, float(xmax[11:-1])) if xmin is None or xmin == np.nanmin(xv): xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20 if xmax is None or xmax == np.nanmax(xv): xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20 pl.xlim(xmin, xmax) xlim = pl.xlim() if interaction_index is not None: p = pl.scatter( xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1, linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh ) p.set_array(cvals[xv_nan]) else: pl.scatter( xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1, linewidth=2, color=color, alpha=alpha ) pl.xlim(*xlim) if interaction_index != ind: pl.gcf().set_size_inches(7.5, 5) else: pl.gcf().set_size_inches(6, 5) pl.xlabel(name, color=axis_color, fontsize=13) pl.ylabel(labels[] % name, color=axis_color, fontsize=13) if title is not None: pl.title(title, color=axis_color, fontsize=13) pl.gca().xaxis.set_ticks_position() pl.gca().yaxis.set_ticks_position() pl.gca().spines[].set_visible(False) pl.gca().spines[].set_visible(False) pl.gca().tick_params(color=axis_color, labelcolor=axis_color, labelsize=11) for spine in pl.gca().spines.values(): spine.set_edgecolor(axis_color) if type(xd[0]) == str: pl.xticks([name_map[n] for n in xnames], xnames, rotation=, fontsize=11) if show: with warnings.catch_warnings(): warnings.simplefilter("ignore", RuntimeWarning) pl.show()
Create a SHAP dependence plot, colored by an interaction feature. Plots the value of the feature on the x-axis and the SHAP value of the same feature on the y-axis. This shows how the model depends on the given feature, and is like a richer extenstion of the classical parital dependence plots. Vertical dispersion of the data points represents interaction effects. Grey ticks along the y-axis are data points where the feature's value was NaN. Parameters ---------- ind : int or string If this is an int it is the index of the feature to plot. If this is a string it is either the name of the feature to plot, or it can have the form "rank(int)" to specify the feature with that rank (ordered by mean absolute SHAP value over all the samples). shap_values : numpy.array Matrix of SHAP values (# samples x # features). features : numpy.array or pandas.DataFrame Matrix of feature values (# samples x # features). feature_names : list Names of the features (length # features). display_features : numpy.array or pandas.DataFrame Matrix of feature values for visual display (such as strings instead of coded values). interaction_index : "auto", None, int, or string The index of the feature used to color the plot. The name of a feature can also be passed as a string. If "auto" then shap.common.approximate_interactions is used to pick what seems to be the strongest interaction (note that to find to true stongest interaction you need to compute the SHAP interaction values). x_jitter : float (0 - 1) Adds random jitter to feature values. May increase plot readability when feature is discrete. alpha : float The transparency of the data points (between 0 and 1). This can be useful to the show density of the data points when using a large dataset. xmin : float or string Represents the lower bound of the plot's x-axis. It can be a string of the format "percentile(float)" to denote that percentile of the feature's value used on the x-axis. xmax : float or string Represents the upper bound of the plot's x-axis. It can be a string of the format "percentile(float)" to denote that percentile of the feature's value used on the x-axis.
387,578
def _ed25519_key_from_file(fn, path): try: return fn(read_from_file(path, exception=ScriptWorkerEd25519Error)) except ScriptWorkerException as exc: raise ScriptWorkerEd25519Error("Failed calling {} for {}: {}!".format(fn, path, str(exc)))
Create an ed25519 key from the contents of ``path``. ``path`` is a filepath containing a base64-encoded ed25519 key seed. Args: fn (callable): the function to call with the contents from ``path`` path (str): the file path to the base64-encoded key seed. Returns: obj: the appropriate key type from ``path`` Raises: ScriptWorkerEd25519Error
387,579
def histogram(a, bins=10, range=None, **kwargs): if isinstance(bins, Symbol): return _internal._histogram(data=a, bins=bins, **kwargs) elif isinstance(bins, integer_types): if range is None: raise ValueError("null range is not supported in symbol mode") return _internal._histogram(data=a, bin_cnt=bins, range=range, **kwargs) raise ValueError("bins argument should be either an integer or an NDArray")
Compute the histogram of the input data. Parameters ---------- a : NDArray Input data. The histogram is computed over the flattened array. bins : int or sequence of scalars If bins is an int, it defines the number of equal-width bins in the given range (10, by default). If bins is a sequence, it defines the bin edges, including the rightmost edge, allowing for non-uniform bin widths. range : (float, float), required if bins is an integer The lower and upper range of the bins. If not provided, range is simply (a.min(), a.max()). Values outside the range are ignored. The first element of the range must be less than or equal to the second. range affects the automatic bin computation as well, the range will be equally divided by the number of bins. Returns ------- out : Symbol The created Symbol
387,580
def init_default(required, default, optional_default): if not required and default == NOTHING: default = optional_default return default
Returns optional default if field is not required and default was not provided. :param bool required: whether the field is required in a given model. :param default: default provided by creator of field. :param optional_default: default for the data type if none provided. :return: default or optional default based on inputs
387,581
def size(self, store_hashes=True): if self.modified: self.__cache_content(store_hashes) return len(self.cached_content)
Retrieves the size in bytes of this ZIP content. :return: Size of the zip content in bytes
387,582
def create_fake_mirror(src, dst): src = os.path.abspath(src) if not (os.path.exists(src) and (not os.path.exists(dst)) ): raise Exception("source not exist or distination already exist") folder_to_create = list() file_to_create = list() for current_folder, _, file_list in os.walk(src): new_folder = os.path.join(dst, os.path.relpath(current_folder, src)) folder_to_create.append(new_folder) for basename in file_list: file_to_create.append(os.path.join(new_folder, basename)) for abspath in folder_to_create: os.mkdir(abspath) for abspath in file_to_create: with open(abspath, "w") as _: pass
Copy all dir, files from ``src`` to ``dst``. But only create a empty file with same file name. Of course, the tree structure doesn't change. A recipe gadget to create some test data set. Make sure to use absolute path. **中文文档** 复制整个src目录下的文件树结构到dst目录。但实际上并不复制内容, 只复制 文件名。即, 全是空文件, 但目录结构一致。
387,583
def skyimage_figure(cluster): pf_image = figure(x_range=(0, 1), y_range=(0, 1), title=.format(cluster.name)) pf_image.image_url(url=[cluster.image_path], x=0, y=0, w=1, h=1, anchor=) pf_image.toolbar_location = None pf_image.axis.visible = False return pf_image
Given a cluster create a Bokeh plot figure using the cluster's image.
387,584
def argmax(self, axis=None, skipna=True): nv.validate_minmax_axis(axis) return nanops.nanargmax(self._values, skipna=skipna)
Return an ndarray of the maximum argument indexer. Parameters ---------- axis : {None} Dummy argument for consistency with Series skipna : bool, default True See Also -------- numpy.ndarray.argmax
387,585
def _get_instructions_bytes(code, varnames=None, names=None, constants=None, cells=None, linestarts=None, line_offset=0): labels = dis.findlabels(code) extended_arg = 0 starts_line = None free = None n = len(code) i = 0 while i < n: op = code[i] offset = i if linestarts is not None: starts_line = linestarts.get(i, None) if starts_line is not None: starts_line += line_offset is_jump_target = i in labels i = i + 1 arg = None argval = None argrepr = if op >= dis.HAVE_ARGUMENT: arg = code[i] + code[i + 1] * 256 + extended_arg extended_arg = 0 i = i + 2 if op == dis.EXTENDED_ARG: extended_arg = arg * 65536 argval = arg if op in dis.hasconst: argval, argrepr = dis._get_const_info(arg, constants) elif op in dis.hasname: argval, argrepr = dis._get_name_info(arg, names) elif op in dis.hasjrel: argval = i + arg argrepr = "to " + repr(argval) elif op in dis.haslocal: argval, argrepr = dis._get_name_info(arg, varnames) elif op in dis.hascompare: argval = dis.cmp_op[arg] argrepr = argval elif op in dis.hasfree: argval, argrepr = dis._get_name_info(arg, cells) elif op in dis.hasnargs: argrepr = "%d positional, %d keyword pair" % (code[i - 2], code[i - 1]) yield dis.Instruction(dis.opname[op], op, arg, argval, argrepr, offset, starts_line, is_jump_target)
Iterate over the instructions in a bytecode string. Generates a sequence of Instruction namedtuples giving the details of each opcode. Additional information about the code's runtime environment (e.g. variable names, constants) can be specified using optional arguments.
387,586
def create(klass, account, name): audience = klass(account) getattr(audience, )(name) try: return audience.reload() except BadRequest as e: audience.delete() raise e
Creates a new tailored audience.
387,587
def make_at_least_n_items_valid(flag_list, n): flag_list = np.array(flag_list) num_valid = flag_list.sum() num_extra = min(len(flag_list) - num_valid, n - num_valid) for index in range(len(flag_list)): if num_extra <= 0: break if not flag_list[index]: flag_list[index] = True num_extra -= 1 return flag_list
tries to make at least min(len(flag_list, n) items True in flag_list Args: flag_list (list): list of booleans n (int): number of items to ensure are True CommandLine: python -m utool.util_dev --test-make_at_least_n_items_valid Example: >>> # ENABLE_DOCTEST >>> from utool.util_dev import * # NOQA >>> # build test data >>> flag_list = [False, True, False, False, False, False, False, True] >>> n = 5 >>> # execute function >>> flag_list = make_at_least_n_items_valid(flag_list, n) >>> # verify results >>> result = str(flag_list) >>> print(result) [ True True True True False False False True]
387,588
def run(self): while self._base.is_running: if self._worker: self._worker() time.sleep(self._sleep_duration)
Runs its worker method. This method will be terminated once its parent's is_running property turns False.
387,589
def cmdloop(self, intro: Optional[str] = None) -> None: callopts, callargs = parser.parse_known_args() if callopts.test: self._transcript_files = callargs if callargs: self.cmdqueue.extend(callargs) self.terminal_lock.acquire() for func in self._preloop_hooks: func() self.preloop() if self._transcript_files is not None: self.run_transcript_tests([os.path.expanduser(tf) for tf in self._transcript_files]) else: if intro is not None: self.intro = intro if self.intro is not None: self.poutput(str(self.intro) + "\n") self._cmdloop() for func in self._postloop_hooks: func() self.postloop() self.terminal_lock.release() signal.signal(signal.SIGINT, original_sigint_handler) if self.exit_code is not None: sys.exit(self.exit_code)
This is an outer wrapper around _cmdloop() which deals with extra features provided by cmd2. _cmdloop() provides the main loop equivalent to cmd.cmdloop(). This is a wrapper around that which deals with the following extra features provided by cmd2: - commands at invocation - transcript testing - intro banner :param intro: if provided this overrides self.intro and serves as the intro banner printed once at start
387,590
def _execute_get_url(self, request_url, append_sid=True): self._debuglog("Requesting URL: ") if append_sid: self._debuglog("Appending access_token (SID: " + self.access_token + ") to url") request_url = "%s&_sid=%s" % ( request_url, self.access_token) try: resp = self._session.get(request_url) self._debuglog("Request executed: " + str(resp.status_code)) if resp.status_code == 200: json_data = json.loads(resp.text) if json_data["success"]: self._debuglog("Succesfull returning data") self._debuglog(str(json_data)) return json_data else: if json_data["error"]["code"] in {105, 106, 107, 119}: self._debuglog("Session error: " + str(json_data["error"]["code"])) self._session_error = True else: self._debuglog("Failed: " + resp.text) else: return None except: return None
Function to execute and handle a GET request
387,591
def _cleanup_ca_temp_file(self): if os.name == : if isinstance(self.ca_verify_filename, (binary_type, text_type)): os.unlink(self.ca_verify_filename) else: self._ca_verify_file_handle.close()
Function to clean up ca temp file for requests. **Returns:** Removes TEMP ca file, no return
387,592
def define_task(name, tick_script, task_type=, database=None, retention_policy=, dbrps=None): streamdatabaseretention_policydefault* if not database and not dbrps: log.error("Providing database name or dbrps is mandatory.") return False if version() < : cmd = .format(name) else: cmd = .format(name) if tick_script.startswith(): tick_script = __salt__[](tick_script, __env__) cmd += .format(tick_script) if task_type: cmd += .format(task_type) if not dbrps: dbrps = [] if database and retention_policy: dbrp = .format(database, retention_policy) dbrps.append(dbrp) if dbrps: for dbrp in dbrps: cmd += .format(dbrp) return _run_cmd(cmd)
Define a task. Serves as both create/update. name Name of the task. tick_script Path to the TICK script for the task. Can be a salt:// source. task_type Task type. Defaults to 'stream' dbrps A list of databases and retention policies in "dbname"."rpname" format to fetch data from. For backward compatibility, the value of 'database' and 'retention_policy' will be merged as part of dbrps. .. versionadded:: 2019.2.0 database Which database to fetch data from. retention_policy Which retention policy to fetch data from. Defaults to 'default'. CLI Example: .. code-block:: bash salt '*' kapacitor.define_task cpu salt://kapacitor/cpu.tick database=telegraf
387,593
def createDirStruct(paths, verbose=True): for k, path in paths.items(): p = None try: pathlist = path if type(path) is list else [ path ] for p in pathlist: os.makedirs(p) if verbose: log.info( + p) except OSError, e: if e.errno == errno.EEXIST and os.path.isdir(p): pass else: raise return True
Loops ait.config._datapaths from AIT_CONFIG and creates a directory. Replaces year and doy with the respective year and day-of-year. If neither are given as arguments, current UTC day and year are used. Args: paths: [optional] list of directory paths you would like to create. doy and year will be replaced by the datetime day and year, respectively. datetime: UTC Datetime string in ISO 8601 Format YYYY-MM-DDTHH:mm:ssZ
387,594
def order_upgrades(self, upgrades, history=None): history = history or {} graph_incoming, graph_outgoing = self._create_graph(upgrades, history) for node_id in six.iterkeys(history): start_nodes = [node_id, ] while start_nodes: node = start_nodes.pop() try: for d in graph_outgoing[node]: graph_incoming[d] = [x for x in graph_incoming[d] if x != node] except KeyError: warnings.warn("Ghost upgrade %s detected" % node) if node in graph_incoming: depends_on = graph_incoming[node] for d in depends_on: graph_outgoing[d] = [x for x in graph_outgoing[d] if x != node] start_nodes.append(d) del graph_incoming[node] for node_id, depends_on in six.iteritems(graph_incoming): for d in depends_on: if d not in graph_incoming: raise RuntimeError("Upgrade %s depends on an unknown" " upgrade %s" % (node_id, d)) start_nodes = [x for x in six.iterkeys(graph_incoming) if len(graph_incoming[x]) == 0] topo_order = [] while start_nodes: node_n = start_nodes.pop() topo_order.append(node_n) for node_m in graph_outgoing[node_n]: graph_incoming[node_m] = [x for x in graph_incoming[node_m] if x != node_n] if not graph_incoming[node_m]: start_nodes.append(node_m) for node, edges in six.iteritems(graph_incoming): if edges: raise RuntimeError("The upgrades have at least one cyclic " "dependency involving %s." % node) return map(lambda x: upgrades[x], topo_order)
Order upgrades according to their dependencies. (topological sort using Kahn's algorithm - http://en.wikipedia.org/wiki/Topological_sorting). :param upgrades: Dict of upgrades :param history: Dict of applied upgrades
387,595
def native_decode_source(text): if ((only_python3 and isinstance(text, bytes)) or (only_python2 and isinstance(text, str))): text = decode_source_to_unicode(text) if only_python2: return text.encode(, ) return text
Use codec specified in file to decode to unicode Then, encode unicode to native str: Python 2: bytes Python 3: unicode
387,596
def ProcessLine(filename, file_extension, clean_lines, line, include_state, function_state, nesting_state, error, extra_check_functions=[]): raw_lines = clean_lines.raw_lines ParseNolintSuppressions(filename, raw_lines[line], line, error) nesting_state.Update(filename, clean_lines, line, error) CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line, error) if nesting_state.InAsmBlock(): return CheckForFunctionLengths(filename, clean_lines, line, function_state, error) CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) CheckLanguage(filename, clean_lines, line, file_extension, include_state, nesting_state, error) CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) CheckForNonStandardConstructs(filename, clean_lines, line, nesting_state, error) CheckVlogArguments(filename, clean_lines, line, error) CheckPosixThreading(filename, clean_lines, line, error) CheckInvalidIncrement(filename, clean_lines, line, error) CheckMakePairUsesDeduction(filename, clean_lines, line, error) CheckDefaultLambdaCaptures(filename, clean_lines, line, error) CheckRedundantVirtual(filename, clean_lines, line, error) CheckRedundantOverrideOrFinal(filename, clean_lines, line, error) for check_fn in extra_check_functions: check_fn(filename, clean_lines, line, error)
Processes a single line in the file. Args: filename: Filename of the file that is being processed. file_extension: The extension (dot not included) of the file. clean_lines: An array of strings, each representing a line of the file, with comments stripped. line: Number of line being processed. include_state: An _IncludeState instance in which the headers are inserted. function_state: A _FunctionState instance which counts function lines, etc. nesting_state: A NestingState instance which maintains information about the current stack of nested blocks being parsed. error: A callable to which errors are reported, which takes 4 arguments: filename, line number, error level, and message extra_check_functions: An array of additional check functions that will be run on each source line. Each function takes 4 arguments: filename, clean_lines, line, error
387,597
def write_java_message(key,val,text_file): text_file.write(key) text_file.write() if (len(val[0]) > 0) and (len(val) >= 3): for index in range(len(val[0])): text_file.write("Java Message Type: ") text_file.write(val[1][index]) text_file.write() text_file.write("Java Message: ") for jmess in val[2][index]: text_file.write(jmess) text_file.write() text_file.write()
Loop through all java messages that are not associated with a unit test and write them into a log file. Parameters ---------- key : str 9.general_bad_java_messages val : list of list of str contains the bad java messages and the message types. :return: none
387,598
def random_str(size=10): return .join(random.choice(string.ascii_lowercase) for _ in range(size))
create random string of selected size :param size: int, length of the string :return: the string
387,599
def serialize(self, method="urlencoded", lev=0, **kwargs): return getattr(self, "to_%s" % method)(lev=lev, **kwargs)
Convert this instance to another representation. Which representation is given by the choice of serialization method. :param method: A serialization method. Presently 'urlencoded', 'json', 'jwt' and 'dict' is supported. :param lev: :param kwargs: Extra key word arguments :return: THe content of this message serialized using a chosen method