Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
383,600
def assign_rates(self, mu=1.0, pi=None, W=None): n = len(self.alphabet) self.mu = mu if pi is not None and len(pi)==n: Pi = np.array(pi) else: if pi is not None and len(pi)!=n: self.logger("length of equilibrium frequency vector does not match alphabet length", 4, warn=True) self.logger("Ignoring input equilibrium frequencies", 4, warn=True) Pi = np.ones(shape=(n,)) self.Pi = Pi/np.sum(Pi) if W is None or W.shape!=(n,n): if (W is not None) and W.shape!=(n,n): self.logger("Substitution matrix size does not match alphabet size", 4, warn=True) self.logger("Ignoring input substitution matrix", 4, warn=True) W = np.ones((n,n)) np.fill_diagonal(W, 0.0) np.fill_diagonal(W, - W.sum(axis=0)) else: W=np.array(W) self.W = 0.5*(W+W.T) self._check_fix_Q(fixed_mu=True) self._eig()
Overwrite the GTR model given the provided data Parameters ---------- mu : float Substitution rate W : nxn matrix Substitution matrix pi : n vector Equilibrium frequencies
383,601
def deregister_entity_from_group(self, entity, group): if entity in self._entities: if entity in self._groups[group]: self._groups[group].remove(entity) else: raise UnmanagedEntityError(entity)
Removes entity from group
383,602
def get_unique_schema_id(schema): assert isinstance(schema, GraphQLSchema), ( "Must receive a GraphQLSchema as schema. Received {}" ).format(repr(schema)) if schema not in _cached_schemas: _cached_schemas[schema] = sha1(str(schema).encode("utf-8")).hexdigest() return _cached_schemas[schema]
Get a unique id given a GraphQLSchema
383,603
def remove_child_objective_banks(self, objective_bank_id): if self._catalog_session is not None: return self._catalog_session.remove_child_catalogs(catalog_id=objective_bank_id) return self._hierarchy_session.remove_children(id_=objective_bank_id)
Removes all children from an objective bank. arg: objective_bank_id (osid.id.Id): the ``Id`` of an objective bank raise: NotFound - ``objective_bank_id`` not in hierarchy raise: NullArgument - ``objective_bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
383,604
def updateColumnName(self, networkId, tableType, body, verbose=None): response=api(url=self.___url++str(networkId)++str(tableType)+, method="PUT", body=body, verbose=verbose) return response
Renames an existing column in the table specified by the `tableType` and `networkId` parameters. :param networkId: SUID of the network containing the table :param tableType: Table Type :param body: Old and new column name :param verbose: print more :returns: default: successful operation
383,605
def BE8(value, min_value=None, max_value=None, fuzzable=True, name=None, full_range=False): return UInt8(value, min_value=min_value, max_value=max_value, encoder=ENC_INT_BE, fuzzable=fuzzable, name=name, full_range=full_range)
8-bit field, Big endian encoded
383,606
def from_dict(cls, d): s = Structure.from_dict(d) return cls(s, history=d["history"], other_parameters=d.get("other_parameters", None))
Creates a TransformedStructure from a dict.
383,607
def str_id(self): "str: This key's string id." id_or_name = self.id_or_name if id_or_name is not None and isinstance(id_or_name, str): return id_or_name return None
str: This key's string id.
383,608
def determine_end_point(http_request, url): if url.endswith() or url.endswith(): return else: return if is_detail_url(http_request, url) else
returns detail, list or aggregates
383,609
def On_close_criteria_box(self, dia): criteria_list = list(self.acceptance_criteria.keys()) criteria_list.sort() avg_by = dia.set_average_by_sample_or_site.GetValue() if avg_by == : for crit in [, , , , ]: self.acceptance_criteria[crit][] = -999 if avg_by == : for crit in [, , , , ]: self.acceptance_criteria[crit][] = -999 for i in range(len(criteria_list)): crit = criteria_list[i] value, accept = dia.get_value_for_crit(crit, self.acceptance_criteria) if accept: self.acceptance_criteria.update(accept) if dia.set_stdev_opt.GetValue() == True: self.acceptance_criteria[][] = elif dia.set_bs.GetValue() == True: self.acceptance_criteria[][] = elif dia.set_bs_par.GetValue() == True: self.acceptance_criteria[][] = dlg1 = wx.MessageDialog( self, caption="Warning:", message="changes are saved to the criteria file\n ", style=wx.OK) result = self.show_dlg(dlg1) if result == wx.ID_OK: try: self.clear_boxes() except IndexError: pass try: self.write_acceptance_criteria_to_boxes() except IOError: pass if self.data_model == 3: crit_file = else: crit_file = try: pmag.write_criteria_to_file(os.path.join( self.WD, crit_file), self.acceptance_criteria, data_model=self.data_model, prior_crits=self.crit_data) except AttributeError as ex: print(ex) print("no criteria given to save") dlg1.Destroy() dia.Destroy() self.fig4.texts[0].remove() txt = "{} data".format(avg_by).capitalize() self.fig4.text(0.02, 0.96, txt, { : self.font_type, : 10, : , : , : }) self.recalculate_satistics() try: self.update_GUI_with_new_interpretation() except KeyError: pass
after criteria dialog window is closed. Take the acceptance criteria values and update self.acceptance_criteria
383,610
def open_stream(stream): global stream_fd try: stream_fd = stream.open() except StreamError as err: raise StreamError("Could not open stream: {0}".format(err)) try: log.debug("Pre-buffering 8192 bytes") prebuffer = stream_fd.read(8192) except IOError as err: stream_fd.close() raise StreamError("Failed to read data from stream: {0}".format(err)) if not prebuffer: stream_fd.close() raise StreamError("No data returned from stream") return stream_fd, prebuffer
Opens a stream and reads 8192 bytes from it. This is useful to check if a stream actually has data before opening the output.
383,611
def result_to_components(self, result, model, island_data, isflags): global_data = self.global_data isle_num = island_data.isle_num idata = island_data.i xmin, xmax, ymin, ymax = island_data.offsets box = slice(int(xmin), int(xmax)), slice(int(ymin), int(ymax)) rms = global_data.rmsimg[box] bkg = global_data.bkgimg[box] residual = np.median(result.residual), np.std(result.residual) is_flag = isflags sources = [] j = 0 for j in range(model[].value): src_flags = is_flag source = OutputSource() source.island = isle_num source.source = j self.log.debug(" component {0}".format(j)) prefix = "c{0}_".format(j) xo = model[prefix + ].value yo = model[prefix + ].value sx = model[prefix + ].value sy = model[prefix + ].value theta = model[prefix + ].value amp = model[prefix + ].value src_flags |= model[prefix + ].value source.residual_mean = residual[0] source.residual_std = residual[1] source.flags = src_flags x_pix = xo + xmin + 1 y_pix = yo + ymin + 1 model[prefix + ].set(value=x_pix, max=np.inf) model[prefix + ].set(value=y_pix, max=np.inf) y = max(min(int(round(y_pix - ymin)), bkg.shape[1] - 1), 0) x = max(min(int(round(x_pix - xmin)), bkg.shape[0] - 1), 0) source.background = bkg[x, y] source.local_rms = rms[x, y] source.peak_flux = amp source.ra, source.dec, source.a, source.b, source.pa = global_data.wcshelper.pix2sky_ellipse((x_pix, y_pix), sx * CC2FHWM, sy * CC2FHWM, theta) source.a *= 3600 source.b *= 3600 fix_shape(source) source.pa = pa_limit(source.pa) if not all(np.isfinite((source.ra, source.dec, source.a, source.b, source.pa))): src_flags |= flags.WCSERR if source.ra < 0: source.ra += 360 source.dec = radec[1] source.ra_str = dec2hms(source.ra) source.dec_str = dec2dms(source.dec) source.background = bkg[positions[0][0], positions[1][0]] source.local_rms = rms[positions[0][0], positions[1][0]] source.x_width, source.y_width = idata.shape source.pixels = int(sum(np.isfinite(kappa_sigma).ravel() * 1.0)) source.extent = [xmin, xmax, ymin, ymax] bl = global_data.wcshelper.pix2sky([xmax, ymin]) tl = global_data.wcshelper.pix2sky([xmax, ymax]) tr = global_data.wcshelper.pix2sky([xmin, ymax]) height = gcd(tl[0], tl[1], bl[0], bl[1]) width = gcd(tl[0], tl[1], tr[0], tr[1]) area = height * width source.area = area * source.pixels / source.x_width / source.y_width msq = MarchingSquares(idata) source.contour = [(a[0] + xmin, a[1] + ymin) for a in msq.perimeter] source.max_angular_size = 0 for i, pos1 in enumerate(source.contour): radec1 = global_data.wcshelper.pix2sky(pos1) for j, pos2 in enumerate(source.contour[i:]): radec2 = global_data.wcshelper.pix2sky(pos2) dist = gcd(radec1[0], radec1[1], radec2[0], radec2[1]) if dist > source.max_angular_size: source.max_angular_size = dist source.pa = bear(radec1[0], radec1[1], radec2[0], radec2[1]) source.max_angular_size_anchors = [pos1[0], pos1[1], pos2[0], pos2[1]] self.log.debug("- peak position {0}, {1} [{2},{3}]".format(source.ra_str, source.dec_str, positions[0][0], positions[1][0])) beam_area = global_data.psfhelper.get_beamarea_deg2(source.ra, source.dec) isize = source.pixels self.log.debug("- pixels used {0}".format(isize)) source.int_flux = np.nansum(kappa_sigma) self.log.debug("- sum of pixles {0}".format(source.int_flux)) source.int_flux *= beam_area self.log.debug("- integrated flux {0}".format(source.int_flux)) eta = erf(np.sqrt(-1 * np.log(abs(source.local_rms * outerclip / source.peak_flux)))) ** 2 self.log.debug("- eta {0}".format(eta)) source.eta = eta source.beam_area = beam_area source.err_int_flux = np.nan sources.append(source) return sources
Convert fitting results into a set of components Parameters ---------- result : lmfit.MinimizerResult The fitting results. model : lmfit.Parameters The model that was fit. island_data : :class:`AegeanTools.models.IslandFittingData` Data about the island that was fit. isflags : int Flags that should be added to this island (in addition to those within the model) Returns ------- sources : list A list of components, and islands if requested.
383,612
def is_local(self): local_repo = package_repository_manager.get_repository( self.config.local_packages_path) return (self.resource._repository.uid == local_repo.uid)
Returns True if the package is in the local package repository
383,613
def suspended_updates(): if getattr(local_storage, "bulk_queue", None) is None: local_storage.bulk_queue = defaultdict(list) try: yield finally: for index, items in local_storage.bulk_queue.items(): index.bulk(chain(*items)) local_storage.bulk_queue = None
This allows you to postpone updates to all the search indexes inside of a with: with suspended_updates(): model1.save() model2.save() model3.save() model4.delete()
383,614
def recv_raw(self, x=MTU): pkt, sa_ll = self.ins.recvfrom(x) if self.outs and sa_ll[2] == socket.PACKET_OUTGOING: return None, None, None ts = get_last_packet_timestamp(self.ins) return self.LL, pkt, ts
Receives a packet, then returns a tuple containing (cls, pkt_data, time)
383,615
def GET(self, token=None, **kwargs): s event stream .. http:get:: /ws/(token) :query format_events: The event stream will undergo server-side formatting if the ``format_events`` URL parameter is included in the request. This can be useful to avoid formatting on the client-side: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws?format_events :reqheader X-Auth-Token: an authentication token from :py:class:`~Login`. :status 101: switching to the websockets protocol :status 401: |401| :status 406: |406| **Example request:** :: curl -NsSk \\ -H \\ -H \\ -H \\ -H \\ -H \\ -H \\ -H "$(echo -n $RANDOM | base64)" \\ localhost:8000/ws .. code-block:: text GET /ws HTTP/1.1 Connection: Upgrade Upgrade: websocket Host: localhost:8000 Origin: https://localhost:8000 Sec-WebSocket-Version: 13 Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA== X-Auth-Token: ffedf49d **Example response**: .. code-block:: text HTTP/1.1 101 Switching Protocols Upgrade: websocket Connection: Upgrade Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE= Sec-WebSocket-Version: 13 An authentication token **may optionally** be passed as part of the URL for browsers that cannot be configured to send the authentication header or cookie: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws/ffedf49d The event stream can be easily consumed via JavaScript: .. code-block:: javascript // Note, you must be authenticated! var source = new Websocket(); source.onerror = function(e) { console.debug(, e); }; source.onmessage = function(e) { console.debug(e.data); }; source.send() source.close(); Or via Python, using the Python module `websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example. .. code-block:: python from websocket import create_connection ws = create_connection() ws.send() while listening_to_events: print ws.recv() ws.close() Above examples show how to establish a websocket connection to Salt and activating real time updates from Salt if token: orig_session, _ = cherrypy.session.cache.get(token, ({}, None)) salt_token = orig_session.get() else: salt_token = cherrypy.session.get() if not salt_token or not self.auth.get_tok(salt_token): raise cherrypy.HTTPError(401) cherrypy.session.release_lock() handler = cherrypy.request.ws_handler def event_stream(handler, pipe): pipe.recv() event = salt.utils.event.get_event( , sock_dir=self.opts[], transport=self.opts[], opts=self.opts, listen=True) stream = event.iter_events(full=True, auto_reconnect=True) SaltInfo = event_processor.SaltInfo(handler) def signal_handler(signal, frame): os._exit(0) signal.signal(signal.SIGTERM, signal_handler) while True: data = next(stream) if data: try: if in kwargs: SaltInfo.process(data, salt_token, self.opts) else: handler.send( str().format(salt.utils.json.dumps(data)), False ) except UnicodeDecodeError: logger.error( "Error: Salt event has non UTF-8 data:\n%s", data) parent_pipe, child_pipe = Pipe() handler.pipe = parent_pipe handler.opts = self.opts proc = Process(target=event_stream, args=(handler, child_pipe)) proc.start()
Return a websocket connection of Salt's event stream .. http:get:: /ws/(token) :query format_events: The event stream will undergo server-side formatting if the ``format_events`` URL parameter is included in the request. This can be useful to avoid formatting on the client-side: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws?format_events :reqheader X-Auth-Token: an authentication token from :py:class:`~Login`. :status 101: switching to the websockets protocol :status 401: |401| :status 406: |406| **Example request:** :: curl -NsSk \\ -H 'X-Auth-Token: ffedf49d' \\ -H 'Host: localhost:8000' \\ -H 'Connection: Upgrade' \\ -H 'Upgrade: websocket' \\ -H 'Origin: https://localhost:8000' \\ -H 'Sec-WebSocket-Version: 13' \\ -H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\ localhost:8000/ws .. code-block:: text GET /ws HTTP/1.1 Connection: Upgrade Upgrade: websocket Host: localhost:8000 Origin: https://localhost:8000 Sec-WebSocket-Version: 13 Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA== X-Auth-Token: ffedf49d **Example response**: .. code-block:: text HTTP/1.1 101 Switching Protocols Upgrade: websocket Connection: Upgrade Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE= Sec-WebSocket-Version: 13 An authentication token **may optionally** be passed as part of the URL for browsers that cannot be configured to send the authentication header or cookie: .. code-block:: bash curl -NsS <...snip...> localhost:8000/ws/ffedf49d The event stream can be easily consumed via JavaScript: .. code-block:: javascript // Note, you must be authenticated! var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a'); source.onerror = function(e) { console.debug('error!', e); }; source.onmessage = function(e) { console.debug(e.data); }; source.send('websocket client ready') source.close(); Or via Python, using the Python module `websocket-client <https://pypi.python.org/pypi/websocket-client/>`_ for example. .. code-block:: python # Note, you must be authenticated! from websocket import create_connection ws = create_connection('ws://localhost:8000/ws/d0ce6c1a') ws.send('websocket client ready') # Look at https://pypi.python.org/pypi/websocket-client/ for more # examples. while listening_to_events: print ws.recv() ws.close() Above examples show how to establish a websocket connection to Salt and activating real time updates from Salt's event stream by signaling ``websocket client ready``.
383,616
def uniq2orderipix(uniq): order = ((np.log2(uniq//4)) // 2) order = order.astype(int) ipix = uniq - 4 * (4**order) return order, ipix
convert a HEALPix pixel coded as a NUNIQ number to a (norder, ipix) tuple
383,617
def reindex(clear: bool, progressive: bool, batch_size: int): reindexer = Reindexer(clear, progressive, batch_size) reindexer.reindex_all()
Reindex all content; optionally clear index before. All is done in asingle transaction by default. :param clear: clear index content. :param progressive: don't run in a single transaction. :param batch_size: number of documents to process before writing to the index. Unused in single transaction mode. If `None` then all documents of same content type are written at once.
383,618
def create(name, url, backend, frequency=None, owner=None, org=None): log.info(, name) source = actions.create_source(name, url, backend, frequency=frequency, owner=owner, organization=org) log.info(.format(source))
Create a new harvest source
383,619
def create_instance(self, body, project_id=None): response = self.get_conn().instances().insert( project=project_id, body=body ).execute(num_retries=self.num_retries) operation_name = response["name"] self._wait_for_operation_to_complete(project_id=project_id, operation_name=operation_name)
Creates a new Cloud SQL instance. :param body: Body required by the Cloud SQL insert API, as described in https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances/insert#request-body. :type body: dict :param project_id: Project ID of the project that contains the instance. If set to None or missing, the default project_id from the GCP connection is used. :type project_id: str :return: None
383,620
def deltran(tree, feature): ps_feature = get_personalized_feature_name(feature, PARS_STATES) for node in tree.traverse(): if not node.is_root(): node_states = getattr(node, ps_feature) parent_states = getattr(node.up, ps_feature) state_intersection = node_states & parent_states if state_intersection: node.add_feature(ps_feature, state_intersection)
DELTRAN (delayed transformation) (Swofford & Maddison, 1987) aims at reducing the number of ambiguities in the parsimonious result. DELTRAN makes the changes as close as possible to the leaves, hence prioritizing parallel mutations. DELTRAN is performed after DOWNPASS. if N is not a root: P <- parent(N) if intersection(S(N), S(P)) is not empty: S(N) <- intersection(S(N), S(P)) if N is not a tip: L, R <- left and right children of N DELTRAN(L) DELTRAN(R) :param tree: ete3.Tree, the tree of interest :param feature: str, character for which the parsimonious states are reconstructed :return: void, modifies get_personalized_feature_name(feature, PARS_STATES) feature of the tree nodes
383,621
def linkify_h_by_hg(self, hostgroups): for host in self: new_hostgroups = [] if hasattr(host, ) and host.hostgroups != []: hgs = [n.strip() for n in host.hostgroups if n.strip()] for hg_name in hgs: hostgroup = hostgroups.find_by_name(hg_name) if hostgroup is not None: new_hostgroups.append(hostgroup.uuid) else: err = ("the hostgroup of the host is " "unknown" % (hg_name, host.host_name)) host.add_error(err) host.hostgroups = new_hostgroups
Link hosts with hostgroups :param hostgroups: hostgroups object to link with :type hostgroups: alignak.objects.hostgroup.Hostgroups :return: None
383,622
def transform_file_output(result): from collections import OrderedDict new_result = [] iterable = result if isinstance(result, list) else result.get(, result) for item in iterable: new_entry = OrderedDict() entity_type = item[] is_dir = entity_type == new_entry[] = item[] + if is_dir else item[] new_entry[] = if is_dir else item[][] new_entry[] = item[] new_entry[] = item[][] or new_result.append(new_entry) return sorted(new_result, key=lambda k: k[])
Transform to convert SDK file/dir list output to something that more clearly distinguishes between files and directories.
383,623
def get_display(display): modname = _display_mods.get(platform, _default_display_mod) mod = _relative_import(modname) return mod.get_display(display)
dname, protocol, host, dno, screen = get_display(display) Parse DISPLAY into its components. If DISPLAY is None, use the default display. The return values are: DNAME -- the full display name (string) PROTOCOL -- the protocol to use (None if automatic) HOST -- the host name (string, possibly empty) DNO -- display number (integer) SCREEN -- default screen number (integer)
383,624
def get_pstats(pstatfile, n): with tempfile.TemporaryFile(mode=) as stream: ps = pstats.Stats(pstatfile, stream=stream) ps.sort_stats() ps.print_stats(n) stream.seek(0) lines = list(stream) for i, line in enumerate(lines): if line.startswith(): break data = [] for line in lines[i + 2:]: columns = line.split() if len(columns) == 6: data.append(PStatData(*columns)) rows = [(rec.ncalls, rec.cumtime, rec.path) for rec in data] return views.rst_table(rows, header=.split())
Return profiling information as an RST table. :param pstatfile: path to a .pstat file :param n: the maximum number of stats to retrieve
383,625
def authenticate_with_certificate(reactor, base_url, client_cert, client_key, ca_cert): return authenticate_with_certificate_chain( reactor, base_url, [client_cert], client_key, ca_cert, )
See ``authenticate_with_certificate_chain``. :param pem.Certificate client_cert: The client certificate to use.
383,626
def sio(mag_file, dir_path=".", input_dir_path="", meas_file="measurements.txt", spec_file="specimens.txt", samp_file="samples.txt", site_file="sites.txt", loc_file="locations.txt", samp_infile="", institution="", syn=False, syntype="", instrument="", labfield=0, phi=0, theta=0, peakfield=0, specnum=0, samp_con=, location="unknown", lat="", lon="", noave=False, codelist="", cooling_rates="", coil=, timezone="UTC", user=""): methcode = "LP-NO" pTRM, MD = 0, 0 dec = [315, 225, 180, 135, 45, 90, 270, 270, 270, 90, 180, 180, 0, 0, 0] inc = [0, 0, 0, 0, 0, -45, -45, 0, 45, 45, 45, -45, -90, -45, 45] tdec = [0, 90, 0, 180, 270, 0, 0, 90, 0] tinc = [0, 0, 90, 0, 0, -90, 0, 0, 90] missing = 1 demag = "N" citations = fmt = Samps = [] trm = 0 irm = 0 input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) meas_file = pmag.resolve_file_name(meas_file, output_dir_path) spec_file = pmag.resolve_file_name(spec_file, output_dir_path) samp_file = pmag.resolve_file_name(samp_file, output_dir_path) site_file = pmag.resolve_file_name(site_file, output_dir_path) loc_file = pmag.resolve_file_name(loc_file, output_dir_path) mag_file = pmag.resolve_file_name(mag_file, input_dir_path) labfield = float(labfield) * 1e-6 phi = float(phi) theta = float(theta) peakfield = float(peakfield) * 1e-3 specnum = int(specnum) samp_con = str(samp_con) if samp_infile: Samps, file_type = pmag.magic_read(samp_infile) if coil: coil = str(coil) methcode = "LP-IRM" irmunits = "V" if coil not in ["1", "2", "3"]: print(__doc__) print() return False, .format(coil) if mag_file: lines = pmag.open_file(mag_file) if not lines: print("you must provide a valid mag_file") return False, "you must provide a valid mag_file" if not mag_file: print(__doc__) print("mag_file field is required option") return False, "mag_file field is required option" if specnum != 0: specnum = -specnum if "4" == samp_con[0]: if "-" not in samp_con: print( "naming convention option [4] must be in form 4-Z where Z is an integer") print() return False, "naming convention option [4] must be in form 4-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "4" if "7" == samp_con[0]: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "option [7] must be in form 7-Z where Z is an integer" else: Z = samp_con.split("-")[1] samp_con = "7" else: Z = 0 if codelist: codes = codelist.split() if "AF" in codes: demag = if not in sys.argv: methcode = "LT-AF-Z" if in sys.argv: methcode = "LT-AF-I" if "T" in codes: demag = "T" if not in sys.argv: methcode = "LT-T-Z" if in sys.argv: methcode = "LT-T-I" if "I" in codes: methcode = "LP-IRM" irmunits = "mT" if "I3d" in codes: methcode = "LT-T-Z:LP-IRM-3D" if "S" in codes: demag = "S" methcode = "LP-PI-TRM:LP-PI-ALT-AFARM" trm_labfield = labfield ans = input("DC lab field for ARM step: [50uT] ") if ans == "": arm_labfield = 50e-6 else: arm_labfield = float(ans)*1e-6 ans = input("temperature for total trm step: [600 C] ") if ans == "": trm_peakT = 600+273 else: trm_peakT = float(ans)+273 if "G" in codes: methcode = "LT-AF-G" if "D" in codes: methcode = "LT-AF-D" if "TRM" in codes: demag = "T" trm = 1 if "CR" in codes: demag = "T" cooling_rate_experiment = 1 cooling_rates_list = cooling_rates.split() if demag == "T" and "ANI" in codes: methcode = "LP-AN-TRM" if demag == "T" and "CR" in codes: methcode = "LP-CR-TRM" if demag == "AF" and "ANI" in codes: methcode = "LP-AN-ARM" if labfield == 0: labfield = 50e-6 if peakfield == 0: peakfield = .180 MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] version_num = pmag.get_version() for line in lines: instcode = "" if len(line) > 2: MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} MeasRec[] = version_num MeasRec["description"] = "" MeasRec["treat_temp"] = % (273) MeasRec["meas_temp"] = % (273) MeasRec["treat_ac_field"] = MeasRec["treat_dc_field"] = MeasRec["treat_dc_field_phi"] = MeasRec["treat_dc_field_theta"] = meas_type = "LT-NO" rec = line.split() try: float(rec[0]) print("No specimen name for line lines.index(line)) continue except ValueError: pass if rec[1] == ".00": rec[1] = "0.00" treat = rec[1].split() if methcode == "LP-IRM": if irmunits == : labfield = float(treat[0])*1e-3 else: labfield = pmag.getfield(irmunits, coil, treat[0]) if rec[1][0] != "-": phi, theta = 0., 90. else: phi, theta = 0., -90. meas_type = "LT-IRM" MeasRec["treat_dc_field"] = % (labfield) MeasRec["treat_dc_field_phi"] = % (phi) MeasRec["treat_dc_field_theta"] = % (theta) if len(rec) > 6: code1 = rec[6].split() if len(code1) == 2: missing = 0 code2 = code1[0].split() code3 = rec[7].split() yy = int(code2[2]) if yy < 90: yyyy = str(2000+yy) else: yyyy = str(1900+yy) mm = int(code2[0]) if mm < 10: mm = "0"+str(mm) else: mm = str(mm) dd = int(code2[1]) if dd < 10: dd = "0"+str(dd) else: dd = str(dd) time = code1[1].split() hh = int(time[0]) if code3[0] == "PM": hh = hh+12 if hh < 10: hh = "0"+str(hh) else: hh = str(hh) min = int(time[1]) if min < 10: min = "0"+str(min) else: min = str(min) dt = yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00" local = pytz.timezone(timezone) naive = datetime.datetime.strptime(dt, "%Y:%m:%d:%H:%M:%S") local_dt = local.localize(naive, is_dst=None) utc_dt = local_dt.astimezone(pytz.utc) MeasRec["timestamp"] = utc_dt.strftime( "%Y-%m-%dT%H:%M:%S")+"Z" if instrument == "": if code3[1][0] == : instcode = if code3[1][0] == : instcode = else: instcode = MeasRec["meas_n_orient"] = code3[1][2] elif len(code1) > 2: if "LP-AN-ARM" not in methcode: labfield = 0 fmt = date = code1[0].split() yy = int(date[2]) if yy < 90: yyyy = str(2000+yy) else: yyyy = str(1900+yy) mm = int(date[0]) if mm < 10: mm = "0"+str(mm) else: mm = str(mm) dd = int(date[1]) if dd < 10: dd = "0"+str(dd) else: dd = str(dd) time = code1[1].split() hh = int(time[0]) if hh < 10: hh = "0"+str(hh) else: hh = str(hh) min = int(time[1]) if min < 10: min = "0"+str(min) else: min = str(min) dt = yyyy+":"+mm+":"+dd+":"+hh+":"+min+":00" local = pytz.timezone(timezone) naive = datetime.datetime.strptime(dt, "%Y:%m:%d:%H:%M:%S") local_dt = local.localize(naive, is_dst=None) utc_dt = local_dt.astimezone(pytz.utc) MeasRec["timestamp"] = utc_dt.strftime( "%Y-%m-%dT%H:%M:%S")+"Z" if instrument == "": if code1[6][0] == : instcode = if code1[6][0] == : instcode = else: instcode = if len(code1) > 1: MeasRec["meas_n_orient"] = code1[6][2] else: MeasRec["meas_n_orient"] = code1[7] if user == "": user = code1[5] if code1[2][-1].upper() == : demag = "T" if code1[4] == and float(code1[3]) != 0. and "LP-AN-ARM" not in methcode: labfield = float(code1[3])*1e-6 if code1[2] == and methcode != "LP-IRM": demag = "AF" if code1[4] == and float(code1[3]) != 0.: labfield = float(code1[3])*1e-6 if code1[4] == and labfield != 0. and meas_type != "LT-IRM": phi, theta = 0., -90. if demag == "T": meas_type = "LT-T-I" if demag == "AF": meas_type = "LT-AF-I" MeasRec["treat_dc_field"] = % (labfield) MeasRec["treat_dc_field_phi"] = % (phi) MeasRec["treat_dc_field_theta"] = % (theta) if code1[4] == or labfield == 0. and meas_type != "LT-IRM": if demag == : meas_type = "LT-T-Z" if demag == "AF": meas_type = "LT-AF-Z" MeasRec["treat_dc_field"] = if not syn: specimen = rec[0] MeasRec["specimen"] = specimen if specnum != 0: sample = rec[0][:specnum] else: sample = rec[0] if samp_infile and Samps: samp = pmag.get_dictitem(Samps, , sample, ) if len(samp) > 0: location = samp[0]["location"] site = samp[0]["site"] else: location = site = else: site = pmag.parse_site(sample, samp_con, Z) if location != and location not in [x[] if in list(x.keys()) else for x in LocRecs]: LocRec[] = location LocRec[] = lat LocRec[] = lat LocRec[] = lon LocRec[] = lon LocRecs.append(LocRec) if site != and site not in [x[] if in list(x.keys()) else for x in SiteRecs]: SiteRec[] = location SiteRec[] = site SiteRec[] = lat SiteRec[] = lon SiteRecs.append(SiteRec) if sample != and sample not in [x[] if in list(x.keys()) else for x in SampRecs]: SampRec[] = site SampRec[] = sample SampRecs.append(SampRec) if specimen != and specimen not in [x[] if in list(x.keys()) else for x in SpecRecs]: SpecRec["specimen"] = specimen SpecRec[] = sample SpecRecs.append(SpecRec) else: specimen = rec[0] MeasRec["specimen"] = specimen if specnum != 0: sample = rec[0][:specnum] else: sample = rec[0] site = pmag.parse_site(sample, samp_con, Z) if location != and location not in [x[] if in list(x.keys()) else for x in LocRecs]: LocRec[] = location LocRec[] = lat LocRec[] = lat LocRec[] = lon LocRec[] = lon LocRecs.append(LocRec) if site != and site not in [x[] if in list(x.keys()) else for x in SiteRecs]: SiteRec[] = location SiteRec[] = site SiteRec[] = lat SiteRec[] = lon SiteRecs.append(SiteRec) if sample != and sample not in [x[] if in list(x.keys()) else for x in SampRecs]: SampRec[] = site SampRec[] = sample SampRecs.append(SampRec) if specimen != and specimen not in [x[] if in list(x.keys()) else for x in SpecRecs]: SpecRec["specimen"] = specimen SpecRec[] = sample SpecRecs.append(SpecRec) SampRec["institution"] = institution SampRec["material_type"] = syntype if float(rec[1]) == 0: pass elif demag == "AF": if methcode != "LP-AN-ARM": MeasRec["treat_ac_field"] = % ( float(rec[1])*1e-3) if meas_type == "LT-AF-Z": MeasRec["treat_dc_field"] = else: if treat[1][0] == : meas_type = "LT-AF-Z:LP-AN-ARM:" MeasRec["treat_ac_field"] = % ( peakfield) MeasRec["treat_dc_field"] = % (0) if labfield != 0 and methcode != "LP-AN-ARM": print( "Warning - inconsistency in mag file with lab field - overriding file with 0") else: meas_type = "LT-AF-I:LP-AN-ARM" ipos = int(treat[0])-1 MeasRec["treat_dc_field_phi"] = % (dec[ipos]) MeasRec["treat_dc_field_theta"] = % (inc[ipos]) MeasRec["treat_dc_field"] = % (labfield) MeasRec["treat_ac_field"] = % ( peakfield) elif demag == "T" and methcode == "LP-AN-TRM": MeasRec["treat_temp"] = % ( float(treat[0])+273.) if treat[1][0] == : meas_type = "LT-T-Z:LP-AN-TRM" MeasRec["treat_dc_field"] = % (0) MeasRec["treat_dc_field_phi"] = MeasRec["treat_dc_field_theta"] = else: MeasRec["treat_dc_field"] = % (labfield) if treat[1][0] == : meas_type = "LT-PTRM-I:LP-AN-TRM" else: meas_type = "LT-T-I:LP-AN-TRM" ipos_code = int(treat[1][0])-1 DEC = float(rec[4]) INC = float(rec[5]) if INC < 45 and INC > -45: if DEC > 315 or DEC < 45: ipos_guess = 0 if DEC > 45 and DEC < 135: ipos_guess = 1 if DEC > 135 and DEC < 225: ipos_guess = 3 if DEC > 225 and DEC < 315: ipos_guess = 4 else: if INC > 45: ipos_guess = 2 if INC < -45: ipos_guess = 5 ipos = ipos_guess MeasRec["treat_dc_field_phi"] = % (tdec[ipos]) MeasRec["treat_dc_field_theta"] = % (tinc[ipos]) if ipos_guess != ipos_code and treat[1][0] != : print("-E- ERROR: check specimen %s step %s, ATRM measurements, coding does not match the direction of the lab field!" % (rec[0], ".".join(list(treat)))) elif demag == "S": if treat[1][1] == : if int(treat[0]) != 0: MeasRec["treat_ac_field"] = % ( float(treat[0])*1e-3) MeasRec["treat_dc_field"] = meas_type = "LT-AF-Z" else: meas_type = "LT-NO" MeasRec["treat_ac_field"] = MeasRec["treat_dc_field"] = elif treat[1][1] == : if int(treat[0]) == 0: MeasRec["treat_ac_field"] = % ( peakfield) MeasRec["treat_dc_field"] = % (arm_labfield) MeasRec["treat_dc_field_phi"] = % (phi) MeasRec["treat_dc_field_theta"] = % (theta) meas_type = "LT-AF-I" else: MeasRec["treat_ac_field"] = % ( float(treat[0])*1e-3) MeasRec["treat_dc_field"] = meas_type = "LT-AF-Z" elif treat[1][1] == : if int(treat[0]) == 0: MeasRec["treat_ac_field"] = MeasRec["treat_dc_field"] = % (trm_labfield) MeasRec["treat_dc_field_phi"] = % (phi) MeasRec["treat_dc_field_theta"] = % (theta) MeasRec["treat_temp"] = % (trm_peakT) meas_type = "LT-T-I" else: MeasRec["treat_ac_field"] = % ( float(treat[0])*1e-3) MeasRec["treat_dc_field"] = meas_type = "LT-AF-Z" elif treat[1][1] == : if int(treat[0]) == 0: MeasRec["treat_ac_field"] = % ( peakfield) MeasRec["treat_dc_field"] = % (arm_labfield) MeasRec["treat_dc_field_phi"] = % (phi) MeasRec["treat_dc_field_theta"] = % (theta) meas_type = "LT-AF-I" else: MeasRec["treat_ac_field"] = % ( float(treat[0])*1e-3) MeasRec["treat_dc_field"] = meas_type = "LT-AF-Z" elif demag == "T" and methcode == "LP-CR-TRM": MeasRec["treat_temp"] = % ( float(treat[0])+273.) if treat[1][0] == : meas_type = "LT-T-Z:LP-CR-TRM" MeasRec["treat_dc_field"] = % (0) MeasRec["treat_dc_field_phi"] = MeasRec["treat_dc_field_theta"] = else: MeasRec["treat_dc_field"] = % (labfield) if treat[1][0] == : meas_type = "LT-PTRM-I:LP-CR-TRM" else: meas_type = "LT-T-I:LP-CR-TRM" MeasRec["treat_dc_field_phi"] = % ( phi) MeasRec["treat_dc_field_theta"] = % ( theta) indx = int(treat[1][0])-1 if indx == 6: cooling_time = cooling_rates_list[-1] else: cooling_time = cooling_rates_list[indx] MeasRec["description"] = "cooling_rate" + \ ":"+cooling_time+":"+"K/min" noave = 1 elif demag != : if len(treat) == 1: treat.append() MeasRec["treat_temp"] = % ( float(treat[0])+273.) if trm == 0: if treat[1][0] == : meas_type = "LT-T-Z" else: MeasRec["treat_dc_field"] = % (labfield) MeasRec["treat_dc_field_phi"] = % ( phi) MeasRec["treat_dc_field_theta"] = % ( theta) if treat[1][0] == : meas_type = "LT-T-I" if treat[1][0] == : meas_type = "LT-PTRM-I" pTRM = 1 if treat[1][0] == : MeasRec["treat_dc_field"] = meas_type = "LT-PTRM-MD" else: labfield = float(treat[1])*1e-6 MeasRec["treat_dc_field"] = % (labfield) MeasRec["treat_dc_field_phi"] = % ( phi) MeasRec["treat_dc_field_theta"] = % ( theta) meas_type = "LT-T-I:LP-TRM" MeasRec["dir_csd"] = rec[2] MeasRec["magn_moment"] = % ( float(rec[3])*1e-3) MeasRec["dir_dec"] = rec[4] MeasRec["dir_inc"] = rec[5] MeasRec["instrument_codes"] = instcode MeasRec["analysts"] = user MeasRec["citations"] = citations if "LP-IRM-3D" in methcode: meas_type = methcode MeasRec["method_codes"] = meas_type MeasRec["quality"] = if in rec[0]: MeasRec["standard"] = else: MeasRec["standard"] = MeasRec["treat_step_num"] = 0 MeasRecs.append(MeasRec) con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype=, data=SpecRecs) con.add_magic_table_from_data(dtype=, data=SampRecs) con.add_magic_table_from_data(dtype=, data=SiteRecs) con.add_magic_table_from_data(dtype=, data=LocRecs) MeasOuts = pmag.measurements_methods3(MeasRecs, noave) con.add_magic_table_from_data(dtype=, data=MeasOuts) con.tables[].write_magic_file(custom_name=spec_file,dir_path=dir_path) con.tables[].write_magic_file(custom_name=samp_file,dir_path=dir_path) con.tables[].write_magic_file(custom_name=site_file,dir_path=dir_path) con.tables[].write_magic_file(custom_name=loc_file,dir_path=dir_path) meas_file = con.tables[].write_magic_file( custom_name=meas_file,dir_path=dir_path) return True, meas_file
converts Scripps Institution of Oceanography measurement files to MagIC data base model 3.0 Parameters _________ magfile : input measurement file dir_path : output directory path, default "." input_dir_path : input file directory IF different from dir_path, default "" meas_file : output file measurement file name, default "measurements.txt" spec_file : output file specimen file name, default "specimens.txt" samp_file : output file sample file name, default "samples.tt" site_file : output file site file name, default "sites.txt" loc_file : output file location file name, default "locations.txt" samp_infile : output file to append to, default "" syn : if True, this is a synthetic specimen, default False syntype : sample material type, default "" instrument : instrument on which the measurements were made (e.g., "SIO-2G"), default "" labfield : lab field in microtesla for TRM, default 0 phi, theta : direction of lab field [-1,-1 for anisotropy experiments], default 0, 0 peakfield : peak af field in mT for ARM, default 0 specnum : number of terminal characters distinguishing specimen from sample, default 0 samp_con : sample/site naming convention, default '1' "1" XXXXY: where XXXX is an arbitr[ary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] "2" XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) "3" XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) "4-Z" XXXX[YYY]: YYY is sample designation with Z characters from site XXX "5" site name same as sample "6" site is entered under a separate column NOT CURRENTLY SUPPORTED "7-Z" [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY NB: all others you will have to customize your self or e-mail [email protected] for help. "8" synthetic - has no site name "9" ODP naming convention location : location name for study, default "unknown" lat : latitude of sites, default "" lon : longitude of sites, default "" noave : boolean, if False, average replicates, default False codelist : colon delimited string of lab protocols (e.g., codelist="AF"), default "" AF: af demag T: thermal including thellier but not trm acquisition S: Shaw method I: IRM (acquisition) N: NRM only TRM: trm acquisition ANI: anisotropy experiment D: double AF demag G: triple AF demag (GRM protocol) CR: cooling rate experiment. The treatment coding of the measurement file should be: XXX.00,XXX.10, XXX.20 ...XX.70 etc. (XXX.00 is optional) where XXX in the temperature and .10,.20... are running numbers of the cooling rates steps. XXX.00 is optional zerofield baseline. XXX.70 is alteration check. syntax in sio_magic is: -LP CR xxx,yyy,zzz,..... xxx -A where xxx, yyy, zzz...xxx are cooling time in [K/minutes], seperated by comma, ordered at the same order as XXX.10,XXX.20 ...XX.70 if you use a zerofield step then no need to specify the cooling rate for the zerofield It is important to add to the command line the -A option so the measurements will not be averaged. But users need to make sure that there are no duplicate measurements in the file cooling_rates : cooling rate in K/sec for cooling rate dependence studies (K/minutes) in comma separated list for each cooling rate (e.g., "43.6,1.3,43.6") coil : 1,2, or 3 unist of IRM field in volts using ASC coil #1,2 or 3 the fast and slow experiments in comma separated string (e.g., fast: 43.6 K/min, slow: 1.3 K/min) timezone : timezone of date/time string in comment string, default "UTC" user : analyst, default "" Effects _______ creates MagIC formatted tables
383,627
def get_timerange_formatted(self, now): later = now + datetime.timedelta(days=self.days) return now.isoformat(), later.isoformat()
Return two ISO8601 formatted date strings, one for timeMin, the other for timeMax (to be consumed by get_events)
383,628
def list_traces( self, project_id, view=None, page_size=None, start_time=None, end_time=None, filter_=None, order_by=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "list_traces" not in self._inner_api_calls: self._inner_api_calls[ "list_traces" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_traces, default_retry=self._method_configs["ListTraces"].retry, default_timeout=self._method_configs["ListTraces"].timeout, client_info=self._client_info, ) request = trace_pb2.ListTracesRequest( project_id=project_id, view=view, page_size=page_size, start_time=start_time, end_time=end_time, filter=filter_, order_by=order_by, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("project_id", project_id)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_traces"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="traces", request_token_field="page_token", response_token_field="next_page_token", ) return iterator
Returns of a list of traces that match the specified filter conditions. Example: >>> from google.cloud import trace_v1 >>> >>> client = trace_v1.TraceServiceClient() >>> >>> # TODO: Initialize `project_id`: >>> project_id = '' >>> >>> # Iterate over all results >>> for element in client.list_traces(project_id): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_traces(project_id).pages: ... for element in page: ... # process element ... pass Args: project_id (str): ID of the Cloud project where the trace data is stored. view (~google.cloud.trace_v1.types.ViewType): Type of data returned for traces in the list. Optional. Default is ``MINIMAL``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. start_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): Start of the time interval (inclusive) during which the trace data was collected from the application. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v1.types.Timestamp` end_time (Union[dict, ~google.cloud.trace_v1.types.Timestamp]): End of the time interval (inclusive) during which the trace data was collected from the application. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.trace_v1.types.Timestamp` filter_ (str): An optional filter against labels for the request. By default, searches use prefix matching. To specify exact match, prepend a plus symbol (``+``) to the search term. Multiple terms are ANDed. Syntax: - ``root:NAME_PREFIX`` or ``NAME_PREFIX``: Return traces where any root span starts with ``NAME_PREFIX``. - ``+root:NAME`` or ``+NAME``: Return traces where any root span's name is exactly ``NAME``. - ``span:NAME_PREFIX``: Return traces where any span starts with ``NAME_PREFIX``. - ``+span:NAME``: Return traces where any span's name is exactly ``NAME``. - ``latency:DURATION``: Return traces whose overall latency is greater or equal to than ``DURATION``. Accepted units are nanoseconds (``ns``), milliseconds (``ms``), and seconds (``s``). Default is ``ms``. For example, ``latency:24ms`` returns traces whose overall latency is greater than or equal to 24 milliseconds. - ``label:LABEL_KEY``: Return all traces containing the specified label key (exact match, case-sensitive) regardless of the key:value pair's value (including empty values). - ``LABEL_KEY:VALUE_PREFIX``: Return all traces containing the specified label key (exact match, case-sensitive) whose value starts with ``VALUE_PREFIX``. Both a key and a value must be specified. - ``+LABEL_KEY:VALUE``: Return all traces containing a key:value pair exactly matching the specified text. Both a key and a value must be specified. - ``method:VALUE``: Equivalent to ``/http/method:VALUE``. - ``url:VALUE``: Equivalent to ``/http/url:VALUE``. order_by (str): Field used to sort the returned traces. Optional. Can be one of the following: - ``trace_id`` - ``name`` (``name`` field of root span in the trace) - ``duration`` (difference between ``end_time`` and ``start_time`` fields of the root span) - ``start`` (``start_time`` field of the root span) Descending order can be specified by appending ``desc`` to the sort field (for example, ``name desc``). Only one sort field is permitted. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.gax.PageIterator` instance. By default, this is an iterable of :class:`~google.cloud.trace_v1.types.Trace` instances. This object can also be configured to iterate over the pages of the response through the `options` parameter. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
383,629
def get_values(self, obj): if is_exception(obj.node): label = r"\fb\f09%s\fn" % obj.title else: label = r"\fb%s\fn" % obj.title if obj.shape == "interface": shape = "ellipse" else: shape = "box" if not self.config.only_classnames: attrs = obj.attrs methods = [func.name for func in obj.methods] maxlen = max(len(name) for name in [obj.title] + methods + attrs) line = "_" * (maxlen + 2) label = r"%s\n\f%s" % (label, line) for attr in attrs: label = r"%s\n\f08%s" % (label, attr) if attrs: label = r"%s\n\f%s" % (label, line) for func in methods: label = r"%s\n\f10%s()" % (label, func) return dict(label=label, shape=shape)
get label and shape for classes. The label contains all attributes and methods
383,630
def flag_forgotten_entries(session, today=None): today = date.today() if today is None else today forgotten = ( session .query(Entry) .filter(Entry.time_out.is_(None)) .filter(Entry.forgot_sign_out.is_(False)) .filter(Entry.date < today) ) for entry in forgotten: e = sign_out(entry, forgot=True) logger.debug(.format(e)) session.add(e) session.commit()
Flag any entries from previous days where users forgot to sign out. :param session: SQLAlchemy session through which to access the database. :param today: (optional) The current date as a `datetime.date` object. Used for testing.
383,631
def step_forward_with_function(self, uv0fun, uv1fun, dt): dx, dy = self._rk4_integrate(self.x, self.y, uv0fun, uv1fun, dt) self.x = self._wrap_x(self.x + dx) self.y = self._wrap_y(self.y + dy)
Advance particles using a function to determine u and v. Parameters ---------- uv0fun : function Called like ``uv0fun(x,y)``. Should return the velocity field u, v at time t. uv1fun(x,y) : function Called like ``uv1fun(x,y)``. Should return the velocity field u, v at time t + dt. dt : number Timestep.
383,632
def scale_rows(A, v, copy=True): v = np.ravel(v) M, N = A.shape if not isspmatrix(A): raise ValueError() if M != len(v): raise ValueError() if copy: A = A.copy() A.data = np.asarray(A.data, dtype=upcast(A.dtype, v.dtype)) else: v = np.asarray(v, dtype=A.dtype) if isspmatrix_csr(A): csr_scale_rows(M, N, A.indptr, A.indices, A.data, v) elif isspmatrix_bsr(A): R, C = A.blocksize bsr_scale_rows(int(M/R), int(N/C), R, C, A.indptr, A.indices, np.ravel(A.data), v) elif isspmatrix_csc(A): pyamg.amg_core.csc_scale_rows(M, N, A.indptr, A.indices, A.data, v) else: fmt = A.format A = scale_rows(csr_matrix(A), v).asformat(fmt) return A
Scale the sparse rows of a matrix. Parameters ---------- A : sparse matrix Sparse matrix with M rows v : array_like Array of M scales copy : {True,False} - If copy=True, then the matrix is copied to a new and different return matrix (e.g. B=scale_rows(A,v)) - If copy=False, then the matrix is overwritten deeply (e.g. scale_rows(A,v,copy=False) overwrites A) Returns ------- A : sparse matrix Scaled sparse matrix in original format See Also -------- scipy.sparse._sparsetools.csr_scale_rows, scale_columns Notes ----- - if A is a csc_matrix, the transpose A.T is passed to scale_columns - if A is not csr, csc, or bsr, it is converted to csr and sent to scale_rows Examples -------- >>> import numpy as np >>> from scipy.sparse import spdiags >>> from pyamg.util.utils import scale_rows >>> n=5 >>> e = np.ones((n,1)).ravel() >>> data = [ -1*e, 2*e, -1*e ] >>> A = spdiags(data,[-1,0,1],n,n-1).tocsr() >>> B = scale_rows(A,5*np.ones((A.shape[0],1)))
383,633
def stack_memory(data, n_steps=2, delay=1, **kwargs): if n_steps < 1: raise ParameterError() if delay == 0: raise ParameterError() data = np.atleast_2d(data) t = data.shape[1] kwargs.setdefault(, ) if kwargs[] == : kwargs.setdefault(, [0]) if delay > 0: padding = (int((n_steps - 1) * delay), 0) else: padding = (0, int((n_steps - 1) * -delay)) data = np.pad(data, [(0, 0), padding], **kwargs) history = data for i in range(1, n_steps): history = np.vstack([np.roll(data, -i * delay, axis=1), history]) if delay > 0: history = history[:, :t] else: history = history[:, -t:] return np.ascontiguousarray(history.T).T
Short-term history embedding: vertically concatenate a data vector or matrix with delayed copies of itself. Each column `data[:, i]` is mapped to:: data[:, i] -> [data[:, i], data[:, i - delay], ... data[:, i - (n_steps-1)*delay]] For columns `i < (n_steps - 1) * delay` , the data will be padded. By default, the data is padded with zeros, but this behavior can be overridden by supplying additional keyword arguments which are passed to `np.pad()`. Parameters ---------- data : np.ndarray [shape=(t,) or (d, t)] Input data matrix. If `data` is a vector (`data.ndim == 1`), it will be interpreted as a row matrix and reshaped to `(1, t)`. n_steps : int > 0 [scalar] embedding dimension, the number of steps back in time to stack delay : int != 0 [scalar] the number of columns to step. Positive values embed from the past (previous columns). Negative values embed from the future (subsequent columns). kwargs : additional keyword arguments Additional arguments to pass to `np.pad`. Returns ------- data_history : np.ndarray [shape=(m * d, t)] data augmented with lagged copies of itself, where `m == n_steps - 1`. Notes ----- This function caches at level 40. Examples -------- Keep two steps (current and previous) >>> data = np.arange(-3, 3) >>> librosa.feature.stack_memory(data) array([[-3, -2, -1, 0, 1, 2], [ 0, -3, -2, -1, 0, 1]]) Or three steps >>> librosa.feature.stack_memory(data, n_steps=3) array([[-3, -2, -1, 0, 1, 2], [ 0, -3, -2, -1, 0, 1], [ 0, 0, -3, -2, -1, 0]]) Use reflection padding instead of zero-padding >>> librosa.feature.stack_memory(data, n_steps=3, mode='reflect') array([[-3, -2, -1, 0, 1, 2], [-2, -3, -2, -1, 0, 1], [-1, -2, -3, -2, -1, 0]]) Or pad with edge-values, and delay by 2 >>> librosa.feature.stack_memory(data, n_steps=3, delay=2, mode='edge') array([[-3, -2, -1, 0, 1, 2], [-3, -3, -3, -2, -1, 0], [-3, -3, -3, -3, -3, -2]]) Stack time-lagged beat-synchronous chroma edge padding >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> chroma = librosa.feature.chroma_stft(y=y, sr=sr) >>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr, hop_length=512) >>> beats = librosa.util.fix_frames(beats, x_min=0, x_max=chroma.shape[1]) >>> chroma_sync = librosa.util.sync(chroma, beats) >>> chroma_lag = librosa.feature.stack_memory(chroma_sync, n_steps=3, ... mode='edge') Plot the result >>> import matplotlib.pyplot as plt >>> beat_times = librosa.frames_to_time(beats, sr=sr, hop_length=512) >>> librosa.display.specshow(chroma_lag, y_axis='chroma', x_axis='time', ... x_coords=beat_times) >>> plt.yticks([0, 12, 24], ['Lag=0', 'Lag=1', 'Lag=2']) >>> plt.title('Time-lagged chroma') >>> plt.colorbar() >>> plt.tight_layout()
383,634
def get_modscag_fn_list(dem_dt, tile_list=(, , , , ), pad_days=7): import re import requests from bs4 import BeautifulSoup auth = iolib.get_auth() pad_days = timedelta(days=pad_days) dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1)) outdir = os.path.join(datadir, ) if not os.path.exists(outdir): os.makedirs(outdir) out_vrt_fn_list = [] for dt in dt_list: out_vrt_fn = os.path.join(outdir, dt.strftime()) if os.path.exists(out_vrt_fn): vrt_ds = gdal.Open(out_vrt_fn) if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]): out_vrt_fn_list.append(out_vrt_fn) continue modscag_fn_list = [] for tile in tile_list: modscag_url_str = modscag_url_base = dt.strftime(modscag_url_str) print("Trying: %s" % modscag_url_base) r = requests.get(modscag_url_base, auth=auth) modscag_url_fn = [] if r.ok: parsed_html = BeautifulSoup(r.content, "html.parser") modscag_url_fn = parsed_html.findAll(text=re.compile( % tile)) if not modscag_url_fn: cmd.extend(modscag_fn_list) print(cmd) subprocess.call(cmd, shell=False) out_vrt_fn_list.append(out_vrt_fn) return out_vrt_fn_list
Function to fetch and process MODSCAG fractional snow cover products for input datetime Products are tiled in MODIS sinusoidal projection example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
383,635
def get_user_profile_photos(self, user_id, offset=None, limit=None): assert_type_or_raise(user_id, int, parameter_name="user_id") assert_type_or_raise(offset, None, int, parameter_name="offset") assert_type_or_raise(limit, None, int, parameter_name="limit") result = self.do("getUserProfilePhotos", user_id=user_id, offset=offset, limit=limit) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) from pytgbot.api_types.receivable.media import UserProfilePhotos try: return UserProfilePhotos.from_array(result) except TgApiParseException: logger.debug("Failed parsing as api_type UserProfilePhotos", exc_info=True) raise TgApiParseException("Could not parse result.") return result
Use this method to get a list of profile pictures for a user. Returns a UserProfilePhotos object. https://core.telegram.org/bots/api#getuserprofilephotos Parameters: :param user_id: Unique identifier of the target user :type user_id: int Optional keyword parameters: :param offset: Sequential number of the first photo to be returned. By default, all photos are returned. :type offset: int :param limit: Limits the number of photos to be retrieved. Values between 1—100 are accepted. Defaults to 100. :type limit: int Returns: :return: Returns a UserProfilePhotos object :rtype: pytgbot.api_types.receivable.media.UserProfilePhotos
383,636
def synced(func): def wrapper(self, *args, **kwargs): task = DataManagerTask(func, *args, **kwargs) self.submit_task(task) return task.get_results() return wrapper
Decorator for functions that should be called synchronously from another thread :param func: function to call
383,637
def validate_bool(b): if isinstance(b, six.string_types): b = b.lower() if b in (, , , , , , 1, True): return True elif b in (, , , , , , 0, False): return False else: raise ValueError( % b)
Convert b to a boolean or raise
383,638
def trees_by_subpath(self, sub_path): matches = ( self.path_db[tree_path].keys() for tree_path in self.path_db.iterkeys() if tree_path.startswith(sub_path) ) return set(sum(matches, []))
Search trees by `sub_path` using ``Tree.path.startswith(sub_path)`` comparison. Args: sub_path (str): Part of the :attr:`.Tree.path` property of :class:`.Tree`. Returns: set: Set of matching :class:`Tree` instances.
383,639
def preprocess_source(base_dir=os.curdir): source_path = os.path.join(base_dir, SOURCE_DIR) destination_path = os.path.join(base_dir, PREPROCESSED_DIR) shutil.rmtree(os.path.join(base_dir, ), ignore_errors=True) shutil.rmtree(os.path.join(base_dir, ), ignore_errors=True) directories = [] directory_patterns = [, ] for root, dirs, files in os.walk(destination_path): for adir in dirs: for pattern in directory_patterns: if fnmatch.fnmatch(adir, pattern): directories.append(os.path.join(root, adir)) break for adir in directories: shutil.rmtree(adir, ignore_errors=True) if sys.version_info[0] >= 3: return source_path try: from lib3to2.main import main as lib3to2_main except ImportError: try: from pip import main as pipmain except: from pip._internal import main as pipmain pipmain([, ]) from lib3to2.main import main as lib3to2_main if not os.path.exists(destination_path): __copy_tree(source_path, destination_path) lib3to2_main("lib3to2.fixes", ["-w", "-n", "--no-diffs"] + [destination_path]) else: files = [] dirs = [] cmp_result = filecmp.dircmp(source_path, destination_path) dirs.append(cmp_result) while len(dirs) > 0: cmp_result = dirs[-1] del dirs[-1] dirs.extend(list(cmp_result.subdirs.values())) for file_name in cmp_result.right_only: file_path = os.path.join(cmp_result.right, file_name) if os.path.isdir(file_path): shutil.rmtree(file_path, ignore_errors=True) continue try: os.remove(file_path) except: pass for file_name in (cmp_result.left_only + cmp_result.diff_files): left_file_path = os.path.join(cmp_result.left, file_name) right_file_path = os.path.join(cmp_result.right, file_name) if os.path.isdir(left_file_path): __copy_tree(left_file_path, right_file_path) files.append(right_file_path) continue if not fnmatch.fnmatch(file_name, "*.py"): continue try: os.remove(right_file_path) except: pass shutil.copy2(left_file_path, right_file_path) files.append(right_file_path) if len(files) > 0: lib3to2_main("lib3to2.fixes", ["-w", "-n", "--no-diffs"] + files) return destination_path
A special method for convert all source files to compatible with current python version during installation time. The source directory layout must like this : base_dir --+ | +-- src (All sources are placed into this directory) | +-- preprocessed (Preprocessed sources are placed into this | directory) | +-- setup.py | ... @return Preprocessed source directory
383,640
def sync_imports( self, quiet = False ): self.open() return self._client[:].sync_imports(quiet = quiet)
Return a context manager to control imports onto all the engines in the underlying cluster. This method is used within a ``with`` statement. Any imports should be done with no experiments running, otherwise the method will block until the cluster is quiet. Generally imports will be one of the first things done when connecting to a cluster. (But be careful not to accidentally try to re-import if re-connecting to a running cluster.) :param quiet: if True, suppresses messages (defaults to False) :returns: a context manager
383,641
def gradient(self): L = self.L n = self.L.shape[0] grad = {"Lu": zeros((n, n, n * self._L.shape[1]))} for ii in range(self._L.shape[0] * self._L.shape[1]): row = ii // self._L.shape[1] col = ii % self._L.shape[1] grad["Lu"][row, :, ii] = L[:, col] grad["Lu"][:, row, ii] += L[:, col] return grad
Derivative of the covariance matrix over the lower triangular, flat part of L. It is equal to ∂K/∂Lᵢⱼ = ALᵀ + LAᵀ, where Aᵢⱼ is an n×m matrix of zeros except at [Aᵢⱼ]ᵢⱼ=1. Returns ------- Lu : ndarray Derivative of K over the lower-triangular, flat part of L.
383,642
def clamp(inclusive_lower_bound: int, inclusive_upper_bound: int, value: int) -> int: if value <= inclusive_lower_bound: return inclusive_lower_bound elif value >= inclusive_upper_bound: return inclusive_upper_bound else: return value
Bound the given ``value`` between ``inclusive_lower_bound`` and ``inclusive_upper_bound``.
383,643
def init(filename=ConfigPath): section, parts = "DEFAULT", filename.rsplit(":", 1) if len(parts) > 1 and os.path.isfile(parts[0]): filename, section = parts if not os.path.isfile(filename): return vardict, parser = globals(), configparser.RawConfigParser() parser.optionxform = str try: def parse_value(raw): try: return json.loads(raw) except ValueError: return raw txt = open(filename).read() if not re.search("\\[\\w+\\]", txt): txt = "[DEFAULT]\n" + txt parser.readfp(StringIO.StringIO(txt), filename) for k, v in parser.items(section): vardict[k] = parse_value(v) except Exception: logging.warn("Error reading config from %s.", filename, exc_info=True)
Loads INI configuration into this module's attributes.
383,644
def createFromSource(cls, vs, name=None): s faster. Normally version will be empty, unless the original url was of the form: or , which can be used to grab a particular tagged version. (Note that for github components we ignore the component name - it doesn return GithubComponent(vs.location, vs.spec, vs.semantic_spec, name)
returns a github component for any github url (including git+ssh:// git+http:// etc. or None if this is not a Github URL. For all of these we use the github api to grab a tarball, because that's faster. Normally version will be empty, unless the original url was of the form: 'owner/repo @version' or 'url://...#version', which can be used to grab a particular tagged version. (Note that for github components we ignore the component name - it doesn't have to match the github module name)
383,645
def _concat_reps(self, kpop, max_var_multiple, quiet, **kwargs): outf = os.path.join(self.workdir, "{}-K-{}.indfile".format(self.name, kpop)) excluded = 0 reps = [] with open(outf, ) as outfile: repfiles = glob.glob( os.path.join(self.workdir, self.name+"-K-{}-rep-*_f".format(kpop))) for rep in repfiles: result = Rep(rep, kpop=kpop) reps.append(result) newreps = [] if len(reps) > 1: min_var_across_reps = np.min([i.var_lnlik for i in reps]) else: min_var_across_reps = reps[0].var_lnlik for rep in reps: if not max_var_multiple: newreps.append(rep) outfile.write(rep.stable) else: if (rep.var_lnlik / min_var_across_reps) <= max_var_multiple: newreps.append(rep) outfile.write(rep.stable) else: excluded += 1 return newreps, excluded
Combine structure replicates into a single indfile, returns nreps, ninds. Excludes reps with too high of variance (set with max_variance_multiplier) to exclude runs that did not converge.
383,646
def correct_structure(self, atol=1e-8): return np.allclose(self.structure.lattice.matrix, self.prim.lattice.matrix, atol=atol)
Determine if the structure matches the standard primitive structure. The standard primitive will be different between seekpath and pymatgen high-symmetry paths, but this is handled by the specific subclasses. Args: atol (:obj:`float`, optional): Absolute tolerance used to compare the input structure with the primitive standard structure. Returns: bool: ``True`` if the structure is the same as the standard primitive, otherwise ``False``.
383,647
def _y_axis(self, draw_axes=True): if not self._y_labels or not self.show_y_labels: return axis = self.svg.node(self.nodes[], class_="axis y web") for label, r in reversed(self._y_labels): major = r in self._y_labels_major if not (self.show_minor_y_labels or major): continue guides = self.svg.node( axis, class_= % ( if self.logarithmic else ) ) if self.show_y_guides: self.svg.line( guides, [self.view((r, theta)) for theta in self._x_pos], close=True, class_= % ( if major else ) ) x, y = self.view((r, self._x_pos[0])) x -= 5 text = self.svg.node( guides, , x=x, y=y, class_= if major else ) text.text = label if self.y_label_rotation: text.attrib[ ] = "rotate(%d %f %f)" % (self.y_label_rotation, x, y) self.svg.node( guides, , ).text = self._y_format(r)
Override y axis to make it polar
383,648
def retrieve(cls, *args, **kwargs): return super(BankAccount, cls).retrieve(*args, **kwargs)
Return parent method.
383,649
def transition_loop(n_states, prob): s self-transition. Returns ------- transition : np.ndarray [shape=(n_states, n_states)] The transition matrix Examples -------- >>> librosa.sequence.transition_loop(3, 0.5) array([[0.5 , 0.25, 0.25], [0.25, 0.5 , 0.25], [0.25, 0.25, 0.5 ]]) >>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25]) array([[0.8 , 0.1 , 0.1 ], [0.25 , 0.5 , 0.25 ], [0.375, 0.375, 0.25 ]]) n_states={} must be a positive integer > 1s a float, make it a vector prob = np.asarray(prob, dtype=np.float) if prob.ndim == 0: prob = np.tile(prob, n_states) if prob.shape != (n_states,): raise ParameterError(.format(prob, n_states)) if np.any(prob < 0) or np.any(prob > 1): raise ParameterError(.format(prob)) for i, prob_i in enumerate(prob): transition[i] = (1. - prob_i) / (n_states - 1) transition[i, i] = prob_i return transition
Construct a self-loop transition matrix over `n_states`. The transition matrix will have the following properties: - `transition[i, i] = p` for all i - `transition[i, j] = (1 - p) / (n_states - 1)` for all `j != i` This type of transition matrix is appropriate when states tend to be locally stable, and there is no additional structure between different states. This is primarily useful for de-noising frame-wise predictions. Parameters ---------- n_states : int > 1 The number of states prob : float in [0, 1] or iterable, length=n_states If a scalar, this is the probability of a self-transition. If a vector of length `n_states`, `p[i]` is the probability of state `i`'s self-transition. Returns ------- transition : np.ndarray [shape=(n_states, n_states)] The transition matrix Examples -------- >>> librosa.sequence.transition_loop(3, 0.5) array([[0.5 , 0.25, 0.25], [0.25, 0.5 , 0.25], [0.25, 0.25, 0.5 ]]) >>> librosa.sequence.transition_loop(3, [0.8, 0.5, 0.25]) array([[0.8 , 0.1 , 0.1 ], [0.25 , 0.5 , 0.25 ], [0.375, 0.375, 0.25 ]])
383,650
def split_string(self, string): self.actions = [] start = 0 last_char = if len(string) > 0 and string[-1] == else None string = string[:-1] if last_char is not None else string for match in ANSI_OR_SPECIAL_PATTERN.finditer(string): raw = string[start:match.start()] substring = SPECIAL_PATTERN.sub(self._replace_special, raw) if substring or self.actions: yield substring self.actions = [] start = match.end() groups = filter(lambda x: x is not None, match.groups()) g0 = groups[0] if g0 == : self.actions.append(BeepAction()) yield None self.actions = [] elif g0 == : self.actions.append(CarriageReturnAction()) yield None self.actions = [] elif g0 == : self.actions.append(BackSpaceAction()) yield None self.actions = [] elif g0 == or g0 == : self.actions.append(NewLineAction()) yield g0 self.actions = [] else: params = [ param for param in groups[1].split() if param ] if g0.startswith(): try: params = map(int, params) except ValueError: pass else: self.set_csi_code(groups[2], params) elif g0.startswith(): self.set_osc_code(params) raw = string[start:] substring = SPECIAL_PATTERN.sub(self._replace_special, raw) if substring or self.actions: yield substring if last_char is not None: self.actions.append(NewLineAction()) yield last_char
Yields substrings for which the same escape code applies.
383,651
def generate_api_key(self): endpoint = .join((self.server_url, , , )) resp = self.r_session.post(endpoint) resp.raise_for_status() return response_to_json_dict(resp)
Creates and returns a new API Key/pass pair. :returns: API key/pass pair in JSON format
383,652
def resolveFilenameConflicts(self): taken_names = set() resolved = False for item, dp in self.getItemDPList(): if dp.policy not in ["remove", "ignore", "banish"]: name0 = str(item.text(self.ColRename)) name = _makeUniqueFilename(taken_names, name0) if name != name0: item.setText(self.ColRename, name) resolved = True self.emit(SIGNAL("updated")) return resolved
Goes through list of DPs to make sure that their destination names do not clash. Adjust names as needed. Returns True if some conflicts were resolved.
383,653
def list_active_vms(cwd=None): * vms = [] cmd = reply = __salt__[](cmd, cwd=cwd) log.info(, reply) for line in reply.split(): tokens = line.strip().split() if len(tokens) > 1: if tokens[1] == : vms.append(tokens[0]) return vms
Return a list of machine names for active virtual machine on the host, which are defined in the Vagrantfile at the indicated path. CLI Example: .. code-block:: bash salt '*' vagrant.list_active_vms cwd=/projects/project_1
383,654
def rolling_restart(self, slave_batch_size=None, slave_fail_count_threshold=None, sleep_seconds=None, stale_configs_only=None, unupgraded_only=None, restart_role_types=None, restart_role_names=None): args = dict() if slave_batch_size: args[] = slave_batch_size if slave_fail_count_threshold: args[] = slave_fail_count_threshold if sleep_seconds: args[] = sleep_seconds if stale_configs_only: args[] = stale_configs_only if unupgraded_only: args[] = unupgraded_only if restart_role_types: args[] = restart_role_types if restart_role_names: args[] = restart_role_names return self._cmd(, data=args)
Rolling restart the roles of a service. The sequence is: 1. Restart all the non-slave roles 2. If slaves are present restart them in batches of size specified 3. Perform any post-command needed after rolling restart @param slave_batch_size: Number of slave roles to restart at a time Must be greater than 0. Default is 1. For HDFS, this number should be less than the replication factor (default 3) to ensure data availability during rolling restart. @param slave_fail_count_threshold: The threshold for number of slave batches that are allowed to fail to restart before the entire command is considered failed. Must be >= 0. Default is 0. @param sleep_seconds: Number of seconds to sleep between restarts of slave role batches. Must be >=0. Default is 0. @param stale_configs_only: Restart roles with stale configs only. Default is false. @param unupgraded_only: Restart roles that haven't been upgraded yet. Default is false. @param restart_role_types: Role types to restart. If not specified, all startable roles are restarted. @param restart_role_names: List of specific roles to restart. If none are specified, then all roles of specified role types are restarted. @return: Reference to the submitted command. @since: API v3
383,655
def _calculate_gas(owners: List[str], safe_setup_data: bytes, payment_token: str) -> int: base_gas = 205000 if payment_token != NULL_ADDRESS: payment_token_gas = 55000 else: payment_token_gas = 0 data_gas = 68 * len(safe_setup_data) gas_per_owner = 20000 return base_gas + data_gas + payment_token_gas + len(owners) * gas_per_owner
Calculate gas manually, based on tests of previosly deployed safes :param owners: Safe owners :param safe_setup_data: Data for proxy setup :param payment_token: If payment token, we will need more gas to transfer and maybe storage if first time :return: total gas needed for deployment
383,656
def configure_logger(logger, filename, folder, log_level): fmt = logging.Formatter() if folder is not None: log_file = os.path.join(folder, filename) hdl = logging.FileHandler(log_file) hdl.setFormatter(fmt) hdl.setLevel(log_level) logger.addHandler(hdl) shdl = logging.StreamHandler() shdl.setLevel(log_level) shdl.setFormatter(fmt) logger.addHandler(shdl) logger.setLevel(log_level)
Configure logging behvior for the simulations.
383,657
def sh3(cmd): p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True, env=sub_environment()) out, err = p.communicate() retcode = p.returncode if retcode: raise CalledProcessError(retcode, cmd) else: return out.rstrip(), err.rstrip()
Execute command in a subshell, return stdout, stderr If anything appears in stderr, print it out to sys.stderr
383,658
def write_hash_file_for_path(path, recompute=False): r hash_fpath_list = [] for root, dname_list, fname_list in os.walk(path): for fname in sorted(fname_list): fpath = os.path.join(root, fname) hash_fpath = write_hash_file(fpath, recompute=recompute) if hash_fpath is not None: hash_fpath_list.append(hash_fpath) return hash_fpath_list
r""" Creates a hash file for each file in a path CommandLine: python -m utool.util_hash --test-write_hash_file_for_path Example: >>> # DISABLE_DOCTEST >>> import os >>> import utool as ut >>> from utool.util_hash import * # NOQA >>> fpath = ut.grab_test_imgpath('patsy.jpg') >>> path, _ = os.path.split(fpath) >>> hash_fpath_list = write_hash_file_for_path(path) >>> for hash_fpath in hash_fpath_list: >>> assert os.path.exists(hash_fpath) >>> ut.delete(hash_fpath)
383,659
def _emit_message(cls, message): sys.stdout.write(message) sys.stdout.flush()
Print a message to STDOUT.
383,660
def get_path(filename): path = abspath(filename) if os.path.isdir(filename) else dirname(abspath(filename)) return path
Get absolute path for filename. :param filename: file :return: path
383,661
def getISAAssay(assayNum, studyNum, pathToISATABFile): from isatools import isatab import copy try: isa = isatab.load(pathToISATABFile, skip_load_tables=True) std = isa.studies[studyNum - 1] return copy.deepcopy(std.assays[assayNum - 1]) except FileNotFoundError as err: raise err
This function returns an Assay object given the assay and study numbers in an ISA file Typically, you should use the exploreISA function to check the contents of the ISA file and retrieve the assay and study numbers you are interested in! :param assayNum: The Assay number (notice it's not zero-based index). :type assayNum: int :param studyNum: The Study number (notice it's not zero-based index). :type studyNum: int :param pathToISATABFile: The path to the ISATAB file :type pathToISATABFile: str :raise FileNotFoundError: If pathToISATABFile does not contain file 'i_Investigation.txt'.
383,662
def oidcCredentials(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["oidcCredentials"], *args, **kwargs)
Get Taskcluster credentials given a suitable `access_token` Given an OIDC `access_token` from a trusted OpenID provider, return a set of Taskcluster credentials for use on behalf of the identified user. This method is typically not called with a Taskcluster client library and does not accept Hawk credentials. The `access_token` should be given in an `Authorization` header: ``` Authorization: Bearer abc.xyz ``` The `access_token` is first verified against the named :provider, then passed to the provider's APIBuilder to retrieve a user profile. That profile is then used to generate Taskcluster credentials appropriate to the user. Note that the resulting credentials may or may not include a `certificate` property. Callers should be prepared for either alternative. The given credentials will expire in a relatively short time. Callers should monitor this expiration and refresh the credentials if necessary, by calling this endpoint again, if they have expired. This method gives output: ``v1/oidc-credentials-response.json#`` This method is ``experimental``
383,663
def get_id(self): if self._dxid is not None: return self._dxid else: return + self._name + + self._alias
:returns: Object ID of associated app :rtype: string Returns the object ID of the app that the handler is currently associated with.
383,664
def _render_having(having_conditions): if not having_conditions: return "" rendered_conditions = [] for condition in having_conditions: field = condition.get() field_type = condition.get() comparators = condition.get() if None in (field, field_type, comparators) or not comparators: logger.warn( % condition) continue rendered_conditions.append( _render_condition(field, field_type, comparators)) if not rendered_conditions: return "" return "HAVING %s" % (" AND ".join(rendered_conditions))
Render the having part of a query. Parameters ---------- having_conditions : list A ``list`` of ``dict``s to filter the rows Returns ------- str A string that represents the "having" part of a query. See Also -------- render_query : Further clarification of `conditions` formatting.
383,665
def problem(self): if self.api and self.problem_id: return self.api._get_problem(self.problem_id)
| Comment: For tickets of type "incident", the ID of the problem the incident is linked to
383,666
def token_scan(cls, result_key, token): def _scan(self): return token in self cls.scan(result_key, _scan)
Define a property that is set to true if the given token is found in the log file. Uses the __contains__ method of the log file.
383,667
def n_executions(self): pipeline = self.tiger.connection.pipeline() pipeline.exists(self.tiger._key(, self.id)) pipeline.llen(self.tiger._key(, self.id, )) exists, n_executions = pipeline.execute() if not exists: raise TaskNotFound(.format( self.id )) return n_executions
Queries and returns the number of past task executions.
383,668
def resubmit(self, job_ids = None, also_success = False, running_jobs = False, new_command=None, verbosity=0, keep_logs=False, **kwargs): self.lock() jobs = self.get_jobs(job_ids) if new_command is not None: if len(jobs) == 1: jobs[0].set_command_line(new_command) else: logger.warn("Ignoring new command since no single job id was specified") accepted_old_status = (, , ) if also_success else (, ,) for job in jobs: if running_jobs or job.status in accepted_old_status: grid_status = qstat(job.id, context=self.context) if len(grid_status) != 0: logger.warn("Deleting job since it was still running in the grid." % job.unique) qdel(job.id, context=self.context) arguments = job.get_arguments() arguments.update(**kwargs) if ( not in arguments or arguments[] == ): for arg in (, , ): if arg in arguments: del arguments[arg] job.set_arguments(kwargs=arguments) if not keep_logs: self.delete_logs(job) job.submit() if job.queue_name == and not in arguments: logger.warn("Re-submitting job locally (since no queue name is specified)." % job) else: deps = [dep.unique for dep in job.get_jobs_we_wait_for()] logger.debug("Re-submitting job with dependencies to the grid." % (job, deps)) self._submit_to_grid(job, job.name, job.get_array(), deps, job.log_dir, verbosity, **arguments) self.session.commit() self.unlock()
Re-submit jobs automatically
383,669
def make_display_lines(self): self.screen_height, self.screen_width = self.linesnum() display_lines = [] display_lines.append(self._title + ) top = self.topline bottom = self.topline + self.screen_height - 3 for index, i in enumerate(self._lines[top:bottom]): if index == self.markline: prefix = self._prefix_selected i = color_func(self.c[][])(i) else: prefix = self._prefix_deselected if index + self.topline == self.displayline: suffix = self._suffix_selected else: suffix = self._suffix_deselected line = % (prefix, i, suffix) line = color_func(self.c[][])(line) display_lines.append(line + ) return_num = self.screen_height - 3 - len(self._lines) for _ in range(return_num): display_lines.append() self.display_lines = display_lines
生成输出行 注意: 多线程终端同时输出会有bug, 导致起始位置偏移, 需要在每行加\r
383,670
def _decrypt(private_key, ciphertext, padding): if not isinstance(private_key, PrivateKey): raise TypeError(pretty_message( , type_name(private_key) )) if not isinstance(ciphertext, byte_cls): raise TypeError(pretty_message( , type_name(ciphertext) )) if not padding: raise ValueError() cf_data = None sec_transform = None try: cf_data = CFHelpers.cf_data_from_bytes(ciphertext) error_pointer = new(CoreFoundation, ) sec_transform = Security.SecDecryptTransformCreate( private_key.sec_key_ref, error_pointer ) handle_cf_error(error_pointer) Security.SecTransformSetAttribute( sec_transform, Security.kSecPaddingKey, padding, error_pointer ) handle_cf_error(error_pointer) Security.SecTransformSetAttribute( sec_transform, Security.kSecTransformInputAttributeName, cf_data, error_pointer ) handle_cf_error(error_pointer) plaintext = Security.SecTransformExecute(sec_transform, error_pointer) handle_cf_error(error_pointer) return CFHelpers.cf_data_to_bytes(plaintext) finally: if cf_data: CoreFoundation.CFRelease(cf_data) if sec_transform: CoreFoundation.CFRelease(sec_transform)
Decrypts RSA ciphertext using a private key :param private_key: A PrivateKey object :param ciphertext: The ciphertext - a byte string :param padding: The padding mode to use, specified as a kSecPadding*Key value :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the plaintext
383,671
def get_all_player_ids(ids="shots"): url = "http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=0&LeagueID=00&Season=2015-16" response = requests.get(url, headers=HEADERS) response.raise_for_status() headers = response.json()[][0][] players = response.json()[][0][] df = pd.DataFrame(players, columns=headers) if in pd.__version__: df = df.apply(pd.to_numeric, args=(,)) else: df = df.convert_objects(convert_numeric=True) if ids == "shots": df = df.query("(FROM_YEAR >= 2001) or (TO_YEAR >= 2001)") df = df.reset_index(drop=True) df = df.iloc[:, 0:2] return df if ids == "all_players": df = df.iloc[:, 0:2] return df if ids == "all_data": return df else: er = "Invalid value. It must be , , or ." raise ValueError(er)
Returns a pandas DataFrame containing the player IDs used in the stats.nba.com API. Parameters ---------- ids : { "shots" | "all_players" | "all_data" }, optional Passing in "shots" returns a DataFrame that contains the player IDs of all players have shot chart data. It is the default parameter value. Passing in "all_players" returns a DataFrame that contains all the player IDs used in the stats.nba.com API. Passing in "all_data" returns a DataFrame that contains all the data accessed from the JSON at the following url: http://stats.nba.com/stats/commonallplayers?IsOnlyCurrentSeason=0&LeagueID=00&Season=2015-16 The column information for this DataFrame is as follows: PERSON_ID: The player ID for that player DISPLAY_LAST_COMMA_FIRST: The player's name. ROSTERSTATUS: 0 means player is not on a roster, 1 means he's on a roster FROM_YEAR: The first year the player played. TO_YEAR: The last year the player played. PLAYERCODE: A code representing the player. Unsure of its use. Returns ------- df : pandas DataFrame The pandas DataFrame object that contains the player IDs for the stats.nba.com API.
383,672
def get_gtf_db(gtf, in_memory=False): db_file = gtf + ".db" if file_exists(db_file): return gffutils.FeatureDB(db_file) if not os.access(os.path.dirname(db_file), os.W_OK | os.X_OK): in_memory = True db_file = ":memory:" if in_memory else db_file if in_memory or not file_exists(db_file): infer_extent = guess_infer_extent(gtf) disable_extent = not infer_extent db = gffutils.create_db(gtf, dbfn=db_file, disable_infer_genes=disable_extent, disable_infer_transcripts=disable_extent) if in_memory: return db else: return gffutils.FeatureDB(db_file)
create a gffutils DB, in memory if we don't have write permissions
383,673
def X_less(self): self.parent.value(, self.parent.value() / 2) self.parent.overview.update_position()
Zoom out on the x-axis.
383,674
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=256): cmap = get_cmap(cmap) name = "%s-trunc-%.2g-%.2g" % (cmap.name, minval, maxval) return colors.LinearSegmentedColormap.from_list( name, cmap(np.linspace(minval, maxval, n)))
Truncates a colormap, such that the new colormap consists of ``cmap[minval:maxval]``. If maxval is larger than minval, the truncated colormap will be reversed. Args: cmap (colormap): Colormap to be truncated minval (float): Lower bound. Should be a float betwee 0 and 1. maxval (float): Upper bound. Should be a float between 0 and 1 n (int): Number of colormap steps. Default is ``256``. Returns: colormap: A matplotlib colormap http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
383,675
def GetPlaylists(self, start, max_count, order, reversed): cv = convert2dbus return self.iface.GetPlaylists(cv(start, ), cv(max_count, ), cv(order, ), cv(reversed, ))
Gets a set of playlists. :param int start: The index of the first playlist to be fetched (according to the ordering). :param int max_count: The maximum number of playlists to fetch. :param str order: The ordering that should be used. :param bool reversed: Whether the order should be reversed.
383,676
def add(self, subj: Node, pred: URIRef, obj: Node) -> "FHIRResource": self._g.add((subj, pred, obj)) return self
Shortcut to rdflib add function :param subj: :param pred: :param obj: :return: self for chaining
383,677
def _generate_ndarray_function_code(handle, name, func_name, signature_only=False): real_name = ctypes.c_char_p() desc = ctypes.c_char_p() num_args = mx_uint() arg_names = ctypes.POINTER(ctypes.c_char_p)() arg_types = ctypes.POINTER(ctypes.c_char_p)() arg_descs = ctypes.POINTER(ctypes.c_char_p)() key_var_num_args = ctypes.c_char_p() ret_type = ctypes.c_char_p() check_call(_LIB.MXSymbolGetAtomicSymbolInfo( handle, ctypes.byref(real_name), ctypes.byref(desc), ctypes.byref(num_args), ctypes.byref(arg_names), ctypes.byref(arg_types), ctypes.byref(arg_descs), ctypes.byref(key_var_num_args), ctypes.byref(ret_type))) narg = int(num_args.value) arg_names = [py_str(arg_names[i]) for i in range(narg)] arg_types = [py_str(arg_types[i]) for i in range(narg)] key_var_num_args = py_str(key_var_num_args.value) ret_type = py_str(ret_type.value) if ret_type.value is not None else doc_str = _build_doc(name, py_str(desc.value), arg_names, arg_types, [py_str(arg_descs[i]) for i in range(narg)], key_var_num_args, ret_type) dtype_name = None arr_name = None ndsignature = [] signature = [] ndarg_names = [] kwarg_names = [] for i in range(narg): name, atype = arg_names[i], arg_types[i] if name == : dtype_name = name signature.append(%name) elif atype.startswith() or atype.startswith(): assert not arr_name, \ "Op can only have one argument with variable " \ "size and it must be the last argument." if atype.endswith(): ndsignature.append(%name) arr_name = name else: ndsignature.append(%name) ndarg_names.append(name) else: signature.append(%name) kwarg_names.append(name) signature.append() signature.append() signature.append() signature = ndsignature + signature code = [] if arr_name: code.append(%(func_name, arr_name)) if not signature_only: code.append(.format(arr_name)) if dtype_name is not None: code.append(%( dtype_name, dtype_name, dtype_name)) code.append() else: code.append(%(func_name, .join(signature))) if not signature_only: code.append() for name in ndarg_names: code.append(.format(name=name)) for name in kwarg_names: code.append(%(name, name, name)) if dtype_name is not None: code.append(%(dtype_name, dtype_name, dtype_name)) if not signature_only: code.append(%( handle.value)) else: code.append() doc_str_lines = _os.linesep+.join([+s if s.strip() else s for s in .format(doc_str=doc_str) .splitlines(True)]) code.insert(1, doc_str_lines) return .join(code), doc_str
Generate function for ndarray op by handle and function name.
383,678
def sh2(cmd): p = Popen(cmd, stdout=PIPE, shell=True, env=sub_environment()) out = p.communicate()[0] retcode = p.returncode if retcode: raise CalledProcessError(retcode, cmd) else: return out.rstrip()
Execute command in a subshell, return stdout. Stderr is unbuffered from the subshell.x
383,679
def deflections_from_grid(self, grid, tabulate_bins=1000): @jit_integrand def surface_density_integrand(x, kappa_radius, scale_radius, inner_slope): return (3 - inner_slope) * (x + kappa_radius / scale_radius) ** (inner_slope - 4) * (1 - np.sqrt(1 - x * x)) def calculate_deflection_component(npow, index): deflection_grid = 2.0 * self.kappa_s * self.axis_ratio * grid[:, index] deflection_grid *= quad_grid(self.deflection_func, 0.0, 1.0, grid, args=(npow, self.axis_ratio, minimum_log_eta, maximum_log_eta, tabulate_bins, surface_density_integral), epsrel=EllipticalGeneralizedNFW.epsrel)[0] return deflection_grid eta_min, eta_max, minimum_log_eta, maximum_log_eta, bin_size = self.tabulate_integral(grid, tabulate_bins) surface_density_integral = np.zeros((tabulate_bins,)) for i in range(tabulate_bins): eta = 10. ** (minimum_log_eta + (i - 1) * bin_size) integral = quad(surface_density_integrand, a=0.0, b=1.0, args=(eta, self.scale_radius, self.inner_slope), epsrel=EllipticalGeneralizedNFW.epsrel)[0] surface_density_integral[i] = ((eta / self.scale_radius) ** (1 - self.inner_slope)) * \ (((1 + eta / self.scale_radius) ** (self.inner_slope - 3)) + integral) deflection_y = calculate_deflection_component(1.0, 0) deflection_x = calculate_deflection_component(0.0, 1) return self.rotate_grid_from_profile(np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T))
Calculate the deflection angles at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on. tabulate_bins : int The number of bins to tabulate the inner integral of this profile.
383,680
def flag(val): if val == 1: return True elif val == 0: return False val = str(val) if len(val) > 5: return False return val.upper() in (, , , , , , , )
Does the value look like an on/off flag?
383,681
def on_bindok(self, unused_frame): self._logger.info() while not self._stopping: self.producer(self) self._logger.info("producer done")
This method is invoked by pika when it receives the Queue.BindOk response from RabbitMQ.
383,682
def get_traindata(self) -> np.ndarray: traindata = None for key, value in self.data.items(): if key not in [, , ]: if traindata is None: traindata = value[np.where(value[:, 4] != 0)] else: traindata = np.concatenate((traindata, value[np.where(value[:, 4] != 0)])) return traindata
Pulls all available data and concatenates for model training :return: 2d array of points
383,683
def get_class_name(self): if self.class_idx_value is None: self.class_idx_value = self.CM.get_type(self.class_idx) return self.class_idx_value
Return the class name of the field :rtype: string
383,684
def make_full_url(request, url): urlparts = request.urlparts return .format( scheme=urlparts.scheme, site=get_site_name(request), url=url.lstrip(), )
Get a relative URL and returns the absolute version. Eg: “/foo/bar?q=is-open” ==> “http://example.com/foo/bar?q=is-open”
383,685
def _get_descriptors(self): rlist = [] wlist = [] xlist = [] for socket, flags in self.sockets.items(): if isinstance(socket, zmq.Socket): rlist.append(socket.getsockopt(zmq.FD)) continue elif isinstance(socket, int): fd = socket elif hasattr(socket, ): try: fd = int(socket.fileno()) except: raise ValueError() else: raise TypeError( % socket) if flags & zmq.POLLIN: rlist.append(fd) if flags & zmq.POLLOUT: wlist.append(fd) if flags & zmq.POLLERR: xlist.append(fd) return (rlist, wlist, xlist)
Returns three elements tuple with socket descriptors ready for gevent.select.select
383,686
def zip(self, *others): args = [_unwrap(item) for item in (self,) + others] ct = self.count() if not all(len(arg) == ct for arg in args): raise ValueError("Arguments are not all the same length") return Collection(map(Wrapper.wrap, zip(*args)))
Zip the items of this collection with one or more other sequences, and wrap the result. Unlike Python's zip, all sequences must be the same length. Parameters: others: One or more iterables or Collections Returns: A new collection. Examples: >>> c1 = Collection([Scalar(1), Scalar(2)]) >>> c2 = Collection([Scalar(3), Scalar(4)]) >>> c1.zip(c2).val() [(1, 3), (2, 4)]
383,687
def has_permission(self, request, view): if view.suffix == : return True filter_and_actions = self._get_filter_and_actions( request.query_params.get(), view.action, .format( view.queryset.model._meta.app_label, view.queryset.model._meta.model_name ) ) if not filter_and_actions: return False if request.method == : for key, value in request.data.iteritems(): if (key in filter_and_actions[] and not unicode(filter_and_actions[][key]) == unicode(value)): return False return True
Check list and create permissions based on sign and filters.
383,688
def loop(self, timeout=1.0, max_packets=1): if timeout < 0.0: raise ValueError() self._current_out_packet_mutex.acquire() self._out_packet_mutex.acquire() if self._current_out_packet is None and len(self._out_packet) > 0: self._current_out_packet = self._out_packet.pop(0) if self._current_out_packet: wlist = [self.socket()] else: wlist = [] self._out_packet_mutex.release() self._current_out_packet_mutex.release() rlist = [self.socket(), self._sockpairR] try: socklist = select.select(rlist, wlist, [], timeout) except TypeError as e: socklist[1].insert(0, self.socket()) try: self._sockpairR.recv(1) except socket.error as err: if err.errno != EAGAIN: raise if self.socket() in socklist[1]: rc = self.loop_write(max_packets) if rc or (self._ssl is None and self._sock is None): return rc return self.loop_misc()
Process network events. This function must be called regularly to ensure communication with the broker is carried out. It calls select() on the network socket to wait for network events. If incoming data is present it will then be processed. Outgoing commands, from e.g. publish(), are normally sent immediately that their function is called, but this is not always possible. loop() will also attempt to send any remaining outgoing messages, which also includes commands that are part of the flow for messages with QoS>0. timeout: The time in seconds to wait for incoming/outgoing network traffic before timing out and returning. max_packets: Not currently used. Returns MQTT_ERR_SUCCESS on success. Returns >0 on error. A ValueError will be raised if timeout < 0
383,689
def main(): cli = docker.from_env() try: opts, args = getopt.gnu_getopt(sys.argv[1:], "pcv", ["pretty", "compose"]) except getopt.GetoptError as _: print("Usage: docker-parse [--pretty|-p|--compose|-c] [containers]") sys.exit(2) if len(args) == 0: containers = cli.containers.list(all=True) else: containers = map(lambda nm: cli.containers.get(nm), args) as_compose = False pretty = False for opt, _ in opts: if opt == : print(__version__) sys.exit() elif opt == or opt == : pretty = True break elif opt == or opt == : as_compose = True break for container in containers: info = container.attrs image_info = cli.images.get(info[][]).attrs if as_compose: output_compose(info, image_info) else: output_command(info, image_info, pretty)
main entry
383,690
def _get_args_contents(self): return .join( % (key, shlex_quote(str(self.args[key]))) for key in self.args ) +
Mimic the argument formatting behaviour of ActionBase._execute_module().
383,691
def area(self, chord_length=1e-4): def area_without_arcs(path): area_enclosed = 0 for seg in path: x = real(seg.poly()) dy = imag(seg.poly()).deriv() integrand = x*dy integral = integrand.integ() area_enclosed += integral(1) - integral(0) return area_enclosed def seg2lines(seg): num_lines = int(ceil(seg.length() / chord_length)) pts = [seg.point(t) for t in np.linspace(0, 1, num_lines+1)] return [Line(pts[i], pts[i+1]) for i in range(num_lines)] assert self.isclosed() bezier_path_approximation = [] for seg in self: if isinstance(seg, Arc): bezier_path_approximation += seg2lines(seg) else: bezier_path_approximation.append(seg) return area_without_arcs(Path(*bezier_path_approximation))
Find area enclosed by path. Approximates any Arc segments in the Path with lines approximately `chord_length` long, and returns the area enclosed by the approximated Path. Default chord length is 0.01. If Arc segments are included in path, to ensure accurate results, make sure this `chord_length` is set to a reasonable value (e.g. by checking curvature). Notes ----- * Negative area results from clockwise (as opposed to counter-clockwise) parameterization of the input Path. To Contributors --------------- This is one of many parts of `svgpathtools` that could be improved by a noble soul implementing a piecewise-linear approximation scheme for paths (one with controls to guarantee a desired accuracy).
383,692
def modify(self, service_name, json, **kwargs): return self._send(requests.put, service_name, json, **kwargs)
Modify an AppNexus object
383,693
def deserialize_profile(profile, key_prefix=, pop=False): result = {} if pop: getter = profile.pop else: getter = profile.get def prefixed(name): return % (key_prefix, name) for key in profile.keys(): val = getter(key) if key == prefixed(): result[] = val else: raise MeteorError(400, % key) return result
De-serialize user profile fields into concrete model fields.
383,694
async def presentProof(self, proofRequest: ProofRequest) -> FullProof: claims, requestedProof = await self._findClaims(proofRequest) proof = await self._prepareProof(claims, proofRequest.nonce, requestedProof) return proof
Presents a proof to the verifier. :param proofRequest: description of a proof to be presented (revealed attributes, predicates, timestamps for non-revocation) :return: a proof (both primary and non-revocation) and revealed attributes (initial non-encoded values)
383,695
def process_stats(self, stats, prefix, metric_categories, nested_tags, tags, recursion_level=0): for child in stats: if child.tag in metrics.METRIC_VALUE_FIELDS: self.submit_metrics(child, prefix, tags) elif child.tag in metrics.CATEGORY_FIELDS: recursion_tags = tags + ["{}:{}".format(nested_tags.get(prefix)[recursion_level], child.get())] self.process_stats(child, prefix, metric_categories, nested_tags, recursion_tags, recursion_level + 1)
The XML will have Stat Nodes and Nodes that contain the metrics themselves This code recursively goes through each Stat Node to properly setup tags where each Stat will have a different tag key depending on the context.
383,696
def depth(self, value): for command in self.subcommands.values(): command.depth = value + 1 del command.argparser._defaults[self.arg_label_fmt % self._depth] command.argparser._defaults[self.arg_label_fmt % value] = command self._depth = value
Update ourself and any of our subcommands.
383,697
def _make_pheno_assoc( self, graph, gene_id, disorder_num, disorder_label, phene_key ): disorder_id = .join((, disorder_num)) rel_label = rel_id = self.globaltt[rel_label] if disorder_label.startswith(): rel_id = self.globaltt[] elif disorder_label.startswith(): rel_id = self.globaltt[] elif disorder_label.startswith(): rel_id = self.globaltt[] assoc = G2PAssoc(graph, self.name, gene_id, disorder_id, rel_id) if phene_key is not None: evidence = self.resolve(phene_key, False) if evidence != phene_key: assoc.add_evidence(evidence) assoc.add_association_to_graph()
From the docs: Brackets, "[ ]", indicate "nondiseases," mainly genetic variations that lead to apparently abnormal laboratory test values (e.g., dysalbuminemic euthyroidal hyperthyroxinemia). Braces, "{ }", indicate mutations that contribute to susceptibility to multifactorial disorders (e.g., diabetes, asthma) or to susceptibility to infection (e.g., malaria). A question mark, "?", before the phenotype name indicates that the relationship between the phenotype and gene is provisional. More details about this relationship are provided in the comment field of the map and in the gene and phenotype OMIM entries. Phene key: The number in parentheses after the name of each disorder indicates the following: (1) the disorder was positioned by mapping of the wildtype gene; (2) the disease phenotype itself was mapped; (3) the molecular basis of the disorder is known; (4) the disorder is a chromosome deletion or duplication syndrome. reference: https://omim.org/help/faq#1_6 :param graph: graph object of type dipper.graph.Graph :param gene_id: str, gene id as curie :param gene_symbol: str, symbol :param disorder_num: str, disorder id :param disorder_label: str, disorder label :param phene_key: int or str, 1-4, see docstring :return:
383,698
def _recon_lcs(x, y): i, j = len(x), len(y) table = _lcs(x, y) def _recon(i, j): if i == 0 or j == 0: return [] elif x[i - 1] == y[j - 1]: return _recon(i - 1, j - 1) + [(x[i - 1], i)] elif table[i - 1, j] > table[i, j - 1]: return _recon(i - 1, j) else: return _recon(i, j - 1) recon_tuple = tuple(map(lambda x: x[0], _recon(i, j))) return recon_tuple
Returns the Longest Subsequence between x and y. Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence Args: x: sequence of words y: sequence of words Returns: sequence: LCS of x and y
383,699
def get_instance(cls, dependencies=None): assert cls is not ContractBase, assert cls.CONTRACT_NAME, return cls(cls.CONTRACT_NAME, dependencies)
Return an instance for a contract name. :param dependencies: :return: Contract base instance