Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
377,200
def _requires_submission(self): if self.dbcon_part is None: return False tables = get_table_list(self.dbcon_part) nrows = 0 for table in tables: if table == : continue nrows += get_number_of_rows(self.dbcon_part, table) if nrows: logger.debug( % nrows) else: logger.debug() t0 = datetime.datetime.now() s = self[] last_submission = s.get_last(1) if last_submission: logger.debug( % last_submission[0][]) t_ref = datetime.datetime.strptime(last_submission[0][], Table.time_fmt) else: t_ref = datetime.datetime.fromtimestamp(os.path.getmtime(self.filepath)) submission_interval_passed = (t0 - t_ref).total_seconds() > self.submit_interval_s submission_required = bool(submission_interval_passed and nrows) if submission_required: logger.debug() else: logger.debug() return submission_required
Returns True if the time since the last submission is greater than the submission interval. If no submissions have ever been made, check if the database last modified time is greater than the submission interval.
377,201
def name(self): return next((self.names.get(x) for x in self._locales if x in self.names), None)
Dict with locale codes as keys and localized name as value
377,202
def recv(self, stream, crc_mode=1, retry=16, timeout=60, delay=1, quiet=0): /etc/issuewb error_count = 0 char = 0 cancel = 0 while True: if error_count >= retry: self.abort(timeout=timeout) return None elif crc_mode and error_count < (retry / 2): if not self.putc(CRC): time.sleep(delay) error_count += 1 else: crc_mode = 0 if not self.putc(NAK): time.sleep(delay) error_count += 1 char = self.getc(1, timeout) if not char: error_count += 1 continue elif char == SOH: break elif char == STX: break elif char == CAN: if cancel: return None else: cancel = 1 else: error_count += 1 error_count = 0 income_size = 0 packet_size = 128 sequence = 1 cancel = 0 while True: while True: if char == SOH: packet_size = 128 break elif char == STX: packet_size = 1024 break elif char == EOT: self.putc(ACK) return income_size elif char == CAN: if cancel: return None else: cancel = 1 else: if not quiet: print >> sys.stderr, \ , ord(char) error_count += 1 if error_count >= retry: self.abort() return None error_count = 0 cancel = 0 seq1 = ord(self.getc(1)) seq2 = 0xff - ord(self.getc(1)) if seq1 == sequence and seq2 == sequence: data = self.getc(packet_size + 1 + crc_mode, timeout) if crc_mode: csum = (ord(data[-2]) << 8) + ord(data[-1]) data = data[:-2] log.debug( % \ (csum, self.calc_crc(data))) valid = csum == self.calc_crc(data) else: csum = data[-1] data = data[:-1] log.debug( % \ (ord(csum), self.calc_checksum(data))) valid = ord(csum) == self.calc_checksum(data) if valid: income_size += len(data) stream.write(data) self.putc(ACK) sequence = (sequence + 1) % 0x100 char = self.getc(1, timeout) continue else: self.getc(packet_size + 1 + crc_mode) self.debug( % \ (sequence, seq1, seq2)) self.putc(NAK)
Receive a stream via the XMODEM protocol. >>> stream = file('/etc/issue', 'wb') >>> print modem.recv(stream) 2342 Returns the number of bytes received on success or ``None`` in case of failure.
377,203
def from_enum(gtype, enum_value): pointer = vips_lib.vips_enum_nick(gtype, enum_value) if pointer == ffi.NULL: raise Error() return _to_string(pointer)
Turn an int back into an enum string.
377,204
def colorscale(mag, cmin, cmax): try: x = float(mag-cmin)/(cmax-cmin) except ZeroDivisionError: x = 0.5 blue = min((max((4*(0.75-x), 0.)), 1.)) red = min((max((4*(x-0.25), 0.)), 1.)) green = min((max((4*abs(x-0.5)-1., 0.)), 1.)) return red, green, blue
Return a tuple of floats between 0 and 1 for R, G, and B. From Python Cookbook (9.11?)
377,205
def update_variables(func): @wraps(func) def wrapper(self, *args, **kwargs): result = func(self, *args, **kwargs) if isinstance(result, tuple): return self.process_register(result[0], result[1]) else: return self.process_register(result) return wrapper
Use this decorator on Step.action implementation. Your action method should always return variables, or both variables and output. This decorator will update variables with output.
377,206
def info(dev): ** if in dev: qtype = else: qtype = cmd = .format(qtype, dev) udev_result = __salt__[](cmd, output_loglevel=) if udev_result[] != 0: raise CommandExecutionError(udev_result[]) return _parse_udevadm_info(udev_result[])[0]
Extract all info delivered by udevadm CLI Example: .. code-block:: bash salt '*' udev.info /dev/sda salt '*' udev.info /sys/class/net/eth0
377,207
def neighbor_add(self, address, remote_as, remote_port=DEFAULT_BGP_PORT, enable_ipv4=DEFAULT_CAP_MBGP_IPV4, enable_ipv6=DEFAULT_CAP_MBGP_IPV6, enable_vpnv4=DEFAULT_CAP_MBGP_VPNV4, enable_vpnv6=DEFAULT_CAP_MBGP_VPNV6, enable_evpn=DEFAULT_CAP_MBGP_EVPN, enable_ipv4fs=DEFAULT_CAP_MBGP_IPV4FS, enable_ipv6fs=DEFAULT_CAP_MBGP_IPV6FS, enable_vpnv4fs=DEFAULT_CAP_MBGP_VPNV4FS, enable_vpnv6fs=DEFAULT_CAP_MBGP_VPNV6FS, enable_l2vpnfs=DEFAULT_CAP_MBGP_L2VPNFS, enable_enhanced_refresh=DEFAULT_CAP_ENHANCED_REFRESH, enable_four_octet_as_number=DEFAULT_CAP_FOUR_OCTET_AS_NUMBER, next_hop=None, password=None, multi_exit_disc=None, site_of_origins=None, is_route_server_client=DEFAULT_IS_ROUTE_SERVER_CLIENT, is_route_reflector_client=DEFAULT_IS_ROUTE_REFLECTOR_CLIENT, is_next_hop_self=DEFAULT_IS_NEXT_HOP_SELF, local_address=None, local_port=None, local_as=None, connect_mode=DEFAULT_CONNECT_MODE): bgp_neighbor = { neighbors.IP_ADDRESS: address, neighbors.REMOTE_AS: remote_as, REMOTE_PORT: remote_port, PEER_NEXT_HOP: next_hop, PASSWORD: password, IS_ROUTE_SERVER_CLIENT: is_route_server_client, IS_ROUTE_REFLECTOR_CLIENT: is_route_reflector_client, IS_NEXT_HOP_SELF: is_next_hop_self, CONNECT_MODE: connect_mode, CAP_ENHANCED_REFRESH: enable_enhanced_refresh, CAP_FOUR_OCTET_AS_NUMBER: enable_four_octet_as_number, CAP_MBGP_IPV4: enable_ipv4, CAP_MBGP_IPV6: enable_ipv6, CAP_MBGP_VPNV4: enable_vpnv4, CAP_MBGP_VPNV6: enable_vpnv6, CAP_MBGP_EVPN: enable_evpn, CAP_MBGP_IPV4FS: enable_ipv4fs, CAP_MBGP_IPV6FS: enable_ipv6fs, CAP_MBGP_VPNV4FS: enable_vpnv4fs, CAP_MBGP_VPNV6FS: enable_vpnv6fs, CAP_MBGP_L2VPNFS: enable_l2vpnfs, } if multi_exit_disc: bgp_neighbor[MULTI_EXIT_DISC] = multi_exit_disc if site_of_origins: bgp_neighbor[SITE_OF_ORIGINS] = site_of_origins if local_address: bgp_neighbor[LOCAL_ADDRESS] = local_address if local_port: bgp_neighbor[LOCAL_PORT] = local_port if local_as: bgp_neighbor[LOCAL_AS] = local_as call(, **bgp_neighbor)
This method registers a new neighbor. The BGP speaker tries to establish a bgp session with the peer (accepts a connection from the peer and also tries to connect to it). ``address`` specifies the IP address of the peer. It must be the string representation of an IP address. Only IPv4 is supported now. ``remote_as`` specifies the AS number of the peer. It must be an integer between 1 and 65535. ``remote_port`` specifies the TCP port number of the peer. ``enable_ipv4`` enables IPv4 address family for this neighbor. ``enable_ipv6`` enables IPv6 address family for this neighbor. ``enable_vpnv4`` enables VPNv4 address family for this neighbor. ``enable_vpnv6`` enables VPNv6 address family for this neighbor. ``enable_evpn`` enables Ethernet VPN address family for this neighbor. ``enable_ipv4fs`` enables IPv4 Flow Specification address family for this neighbor. ``enable_ipv6fs`` enables IPv6 Flow Specification address family for this neighbor. ``enable_vpnv4fs`` enables VPNv4 Flow Specification address family for this neighbor. ``enable_vpnv6fs`` enables VPNv6 Flow Specification address family for this neighbor. ``enable_l2vpnfs`` enables L2VPN Flow Specification address family for this neighbor. ``enable_enhanced_refresh`` enables Enhanced Route Refresh for this neighbor. ``enable_four_octet_as_number`` enables Four-Octet AS Number capability for this neighbor. ``next_hop`` specifies the next hop IP address. If not specified, host's ip address to access to a peer is used. ``password`` is used for the MD5 authentication if it's specified. By default, the MD5 authentication is disabled. ``multi_exit_disc`` specifies multi exit discriminator (MED) value as an int type value. If omitted, MED is not sent to the neighbor. ``site_of_origins`` specifies site_of_origin values. This parameter must be a list of string. ``is_route_server_client`` specifies whether this neighbor is a router server's client or not. ``is_route_reflector_client`` specifies whether this neighbor is a router reflector's client or not. ``is_next_hop_self`` specifies whether the BGP speaker announces its own ip address to iBGP neighbor or not as path's next_hop address. ``local_address`` specifies Loopback interface address for iBGP peering. ``local_port`` specifies source TCP port for iBGP peering. ``local_as`` specifies local AS number per-peer. If omitted, the AS number of BGPSpeaker instance is used. ``connect_mode`` specifies how to connect to this neighbor. This parameter must be one of the following. - CONNECT_MODE_ACTIVE = 'active' - CONNECT_MODE_PASSIVE = 'passive' - CONNECT_MODE_BOTH (default) = 'both'
377,208
def set_attributes(self, **kwargs): kwargs = dict(kwargs) for name,value in kwargs.items(): self.__getattr__(name) try: self.setp(name,**value) except TypeError: try: self.setp(name,*value) except (TypeError,KeyError): self.__setattr__(name,value)
Set a group of attributes (parameters and members). Calls `setp` directly, so kwargs can include more than just the parameter value (e.g., bounds, free, etc.).
377,209
def get_groups_of_account_apikey(self, account_id, api_key, **kwargs): kwargs[] = True if kwargs.get(): return self.get_groups_of_account_apikey_with_http_info(account_id, api_key, **kwargs) else: (data) = self.get_groups_of_account_apikey_with_http_info(account_id, api_key, **kwargs) return data
Get groups of the API key. # noqa: E501 An endpoint for retrieving groups of the API key. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/accounts/{accountID}/api-keys/{apiKey}/groups -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_groups_of_account_apikey(account_id, api_key, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str account_id: Account ID. (required) :param str api_key: The ID of the API key whose details are retrieved. (required) :param int limit: The number of results to return (2-1000), default is 50. :param str after: The entity ID to fetch after the given one. :param str order: The order of the records based on creation time, ASC or DESC; by default ASC :param str include: Comma separated additional data to return. Currently supported: total_count :return: GroupSummaryList If the method is called asynchronously, returns the request thread.
377,210
def __parse_aliases_line(self, raw_alias, raw_username): alias = self.__encode(raw_alias) username = self.__encode(raw_username) return alias, username
Parse aliases lines
377,211
def calc_piece_size(size, min_piece_size=20, max_piece_size=29, max_piece_count=1000): logger.debug( % size) for i in range(min_piece_size, max_piece_size): if size / (2**i) < max_piece_count: break return 2**i
Calculates a good piece size for a size
377,212
def _lscmp(a, b): return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
Compares two strings in a cryptographically save way: Runtime is not affected by length of common prefix.
377,213
def _sam_to_grouped_umi_cl(data, umi_consensus, tx_out_file): tmp_file = "%s-sorttmp" % utils.splitext_plus(tx_out_file)[0] jvm_opts = _get_fgbio_jvm_opts(data, os.path.dirname(tmp_file), 1) cores, mem = _get_cores_memory(data) bamsormadup = config_utils.get_program("bamsormadup", data) cmd = ("{bamsormadup} tmpfile={tmp_file}-markdup inputformat=sam threads={cores} outputformat=bam " "level=0 SO=coordinate | ") if os.path.exists(umi_consensus) and os.path.isfile(umi_consensus): cmd += "fgbio {jvm_opts} AnnotateBamWithUmis -i /dev/stdin -f {umi_consensus} -o {tx_out_file}" else: cmd += ("%s %s bamtag - | samtools view -b > {tx_out_file}" % (utils.get_program_python("umis"), config_utils.get_program("umis", data["config"]))) return cmd.format(**locals())
Mark duplicates on aligner output and convert to grouped UMIs by position. Works with either a separate umi_file or UMI embedded in the read names.
377,214
def do_reparse(self, arg): full = arg == "full" from os import path fullpath = path.abspath(self.tests[self.active].stagedir) self.tests[self.active] = Analysis(fullpath, full)
Reparses the currently active unit test to get the latest test results loaded to the console.
377,215
def quote(key, value): if key in quoted_options and isinstance(value, string_types): return "" % value if key in quoted_bool_options and isinstance(value, bool): return {True:,False:}[value] return value
Certain options support string values. We want clients to be able to pass Python strings in but we need them to be quoted in the output. Unfortunately some of those options also allow numbers so we type check the value before wrapping it in quotes.
377,216
def com_google_fonts_check_smart_dropout(ttFont): INSTRUCTIONS = b"\xb8\x01\xff\x85\xb0\x04\x8d" if ("prep" in ttFont and INSTRUCTIONS in ttFont["prep"].program.getBytecode()): yield PASS, (" table contains instructions" " enabling smart dropout control.") else: yield FAIL, (" table does not contain TrueType " " instructions enabling smart dropout control." " To fix, export the font with autohinting enabled," " or run ttfautohint on the font, or run the " " `gftools fix-nonhinting` script.")
Font enables smart dropout control in "prep" table instructions? B8 01 FF PUSHW 0x01FF 85 SCANCTRL (unconditinally turn on dropout control mode) B0 04 PUSHB 0x04 8D SCANTYPE (enable smart dropout control) Smart dropout control means activating rules 1, 2 and 5: Rule 1: If a pixel's center falls within the glyph outline, that pixel is turned on. Rule 2: If a contour falls exactly on a pixel's center, that pixel is turned on. Rule 5: If a scan line between two adjacent pixel centers (either vertical or horizontal) is intersected by both an on-Transition contour and an off-Transition contour and neither of the pixels was already turned on by rules 1 and 2, turn on the pixel which is closer to the midpoint between the on-Transition contour and off-Transition contour. This is "Smart" dropout control.
377,217
def login(self, username, password, mode="demo"): url = "https://trading212.com/it/login" try: logger.debug(f"visiting %s" % url) self.browser.visit(url) logger.debug(f"connected to %s" % url) except selenium.common.exceptions.WebDriverException: logger.critical("connection timed out") raise try: self.search_name("login[username]").fill(username) self.search_name("login[password]").fill(password) self.css1(path[]).click() timeout = time.time() + 30 while not self.elCss(path[]): if time.time() > timeout: logger.critical("login failed") raise CredentialsException(username) time.sleep(1) logger.info(f"logged in as {username}") logger.debug("weekend trading alert-box closed") except Exception as e: logger.critical("login failed") raise exceptions.BaseExc(e) return True
login function
377,218
def maxdiff_dtu_configurations(list_of_objects): result = DtuConfiguration() if len(list_of_objects) == 0: return result list_of_members = result.__dict__.keys() for member in list_of_members: tmp_array = np.array( [tmp_dtu.__dict__[member] for tmp_dtu in list_of_objects] ) minval = tmp_array.min() maxval = tmp_array.max() result.__dict__[member] = maxval - minval return result
Return DtuConfiguration instance with maximum differences. Parameters ---------- list_of_objects : python list List of DtuConfiguration instances to be averaged. Returns ------- result : DtuConfiguration instance Object with averaged values.
377,219
def _generate_sdss_object_name( self): self.log.info() converter = unit_conversion( log=self.log ) for row in self.results: raSex = converter.ra_decimal_to_sexegesimal( ra=row["ra"], delimiter=":" ) decSex = converter.dec_decimal_to_sexegesimal( dec=row["dec"], delimiter=":" ) raSex = raSex.replace(":", "")[:9] decSex = decSex.replace(":", "")[:9] sdssName = "SDSS J%(raSex)s%(decSex)s" % locals() row["sdss_name"] = sdssName wordType = ["unknown", "cosmic_ray", "defect", "galaxy", "ghost", "knownobj", "star", "trail", "sky", "notatype", ] numberType = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] row["type"] = wordType[row["type"]] self.log.info() return None
*generate sdss object names for the results* **Key Arguments:** # - **Return:** - None .. todo::
377,220
def remove(self, observableElement): if observableElement in self._observables: self._observables.remove(observableElement)
remove an obsrvable element :param str observableElement: the name of the observable element
377,221
def location_from_dictionary(d): country = None if in d and in d[]: country = d[][] if in d: data = d[] else: data = d if in data: name = data[] else: name = None if in data: ID = int(data[]) else: ID = None if in data: lon = data[].get(, 0.0) lat = data[].get(, 0.0) elif in data[]: if in data[][]: lon = data[][].get(, 0.0) elif in data[][]: lon = data[][].get(, 0.0) else: lon = 0.0 lat = data[][].get(, 0.0) else: raise KeyError("Impossible to read geographical coordinates from JSON") if in data: country = data[] return Location(name, lon, lat, ID, country)
Builds a *Location* object out of a data dictionary. Only certain properties of the dictionary are used: if these properties are not found or cannot be read, an error is issued. :param d: a data dictionary :type d: dict :returns: a *Location* instance :raises: *KeyError* if it is impossible to find or read the data needed to build the instance
377,222
def from_csv(cls, filename: str): with open(filename, "r", encoding="utf-8") as f: reader = csv.reader(f, delimiter=unicode2str(","), quotechar=unicode2str("\""), quoting=csv.QUOTE_MINIMAL) entries = list() header_read = False elements = None for row in reader: if not header_read: elements = row[1:(len(row) - 1)] header_read = True else: name = row[0] energy = float(row[-1]) comp = dict() for ind in range(1, len(row) - 1): if float(row[ind]) > 0: comp[Element(elements[ind - 1])] = float(row[ind]) entries.append(PDEntry(Composition(comp), energy, name)) return cls(entries)
Imports PDEntries from a csv. Args: filename: Filename to import from. Returns: List of Elements, List of PDEntries
377,223
def connect(self, ip_address, tsap_snap7, tsap_logo, tcpport=102): logger.info("connecting to %s:%s tsap_snap7 %s tsap_logo %s" % (ip_address, tcpport, tsap_snap7, tsap_logo)) self.set_param(snap7.snap7types.RemotePort, tcpport) self.set_connection_params(ip_address, tsap_snap7, tsap_logo) result = self.library.Cli_Connect(self.pointer) check_error(result, context="client") return result
Connect to a Siemens LOGO server. Howto setup Logo communication configuration see: http://snap7.sourceforge.net/logo.html :param ip_address: IP ip_address of server :param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000) :param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
377,224
def http_responder_factory(proto): return GrowlerHTTPResponder( proto, request_factory=proto.http_application._request_class, response_factory=proto.http_application._response_class, )
The default factory function which creates a GrowlerHTTPResponder with this object as the parent protocol, and the application's req/res factory functions. To change the default responder, overload this method with the same to return your own responder. Params ------ proto : GrowlerHTTPProtocol Explicitly passed protocol object (actually it's what would be 'self'!) Note ---- This method is decorated with @staticmethod, as the connection_made method of GrowlerProtocol explicitly passes `self` as a parameters, instead of treating as a bound method.
377,225
def segments_distance(segment1, segment2): assert isinstance(segment1, LineSegment), \ "segment1 is not a LineSegment, but a %s" % type(segment1) assert isinstance(segment2, LineSegment), \ "segment2 is not a LineSegment, but a %s" % type(segment2) if len(get_segments_intersections(segment1, segment2)) >= 1: return 0 distances = [] distances.append(point_segment_distance(segment1.p1, segment2)) distances.append(point_segment_distance(segment1.p2, segment2)) distances.append(point_segment_distance(segment2.p1, segment1)) distances.append(point_segment_distance(segment2.p2, segment1)) return min(distances)
Calculate the distance between two line segments in the plane. >>> a = LineSegment(Point(1,0), Point(2,0)) >>> b = LineSegment(Point(0,1), Point(0,2)) >>> "%0.2f" % segments_distance(a, b) '1.41' >>> c = LineSegment(Point(0,0), Point(5,5)) >>> d = LineSegment(Point(2,2), Point(4,4)) >>> e = LineSegment(Point(2,2), Point(7,7)) >>> "%0.2f" % segments_distance(c, d) '0.00' >>> "%0.2f" % segments_distance(c, e) '0.00'
377,226
def ip_to_array(ipaddress): res = [] for i in ipaddress.split("."): res.append(int(i)) assert len(res) == 4 return res
Convert a string representing an IPv4 address to 4 bytes.
377,227
def remove_internal_names(self): self.visit(lambda n: setattr(n, , None), lambda n: not n.is_leaf)
Set the name of all non-leaf nodes in the subtree to None.
377,228
def _convert_to(maybe_device, convert_to): if not convert_to or \ (convert_to == and maybe_device.startswith()) or \ maybe_device.startswith(.format(convert_to.upper())): return maybe_device if maybe_device.startswith(): blkid = __salt__[](maybe_device) else: blkid = __salt__[](token=maybe_device) result = None if len(blkid) == 1: if convert_to == : result = list(blkid.keys())[0] else: key = convert_to.upper() result = .format(key, list(blkid.values())[0][key]) return result
Convert a device name, UUID or LABEL to a device name, UUID or LABEL. Return the fs_spec required for fstab.
377,229
def pull_log_dump(self, project_name, logstore_name, from_time, to_time, file_path, batch_size=None, compress=None, encodings=None, shard_list=None, no_escape=None): file_path = file_path.replace("{}", "{0}") if "{0}" not in file_path: file_path += "{0}" return pull_log_dump(self, project_name, logstore_name, from_time, to_time, file_path, batch_size=batch_size, compress=compress, encodings=encodings, shard_list=shard_list, no_escape=no_escape)
dump all logs seperatedly line into file_path, file_path, the time parameters are log received time on server side. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type from_time: string/int :param from_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type to_time: string/int :param to_time: curosr value, could be begin, timestamp or readable time in readable time like "%Y-%m-%d %H:%M:%S<time_zone>" e.g. "2018-01-02 12:12:10+8:00", also support human readable string, e.g. "1 hour ago", "now", "yesterday 0:0:0", refer to https://aliyun-log-cli.readthedocs.io/en/latest/tutorials/tutorial_human_readable_datetime.html :type file_path: string :param file_path: file path with {} for shard id. e.g. "/data/dump_{}.data", {} will be replaced with each partition. :type batch_size: int :param batch_size: batch size to fetch the data in each iteration. by default it's 500 :type compress: bool :param compress: if use compression, by default it's True :type encodings: string list :param encodings: encoding like ["utf8", "latin1"] etc to dumps the logs in json format to file. default is ["utf8",] :type shard_list: string :param shard_list: shard number list. could be comma seperated list or range: 1,20,31-40 :type no_escape: bool :param no_escape: if not_escape the non-ANSI, default is to escape, set it to True if don't want it. :return: LogResponse {"total_count": 30, "files": {'file_path_1': 10, "file_path_2": 20} }) :raise: LogException
377,230
def get_sensor_code_by_number(si, mtype, sensor_number, quiet=False): try: if in si[mtype][sensor_number]: orientation = si[mtype][sensor_number][] else: orientation = "" return "%s%s-%s-%s-%s" % (mtype, orientation, si[mtype][sensor_number][], si[mtype][sensor_number][], si[mtype][sensor_number][]) except KeyError: if quiet: return None raise
Given a sensor number, get the full sensor code (e.g. ACCX-UB1-L2C-M) :param si: dict, sensor index json dictionary :param mtype: str, sensor type :param sensor_number: int, number of sensor :param quiet: bool, if true then return None if not found :return: str or None, sensor_code: a sensor code (e.g. ACCX-UB1-L2C-M)
377,231
def item_enclosure_length(self, item): try: return str(item.image.size) except (AttributeError, ValueError, os.error): pass return
Try to obtain the size of the enclosure if it's present on the FS, otherwise returns an hardcoded value. Note: this method is only called if item_enclosure_url has returned something.
377,232
def find_users(session, *usernames): user_string = .join(usernames) return _make_request(session, FIND_USERS_URL, user_string)
Find multiple users by name.
377,233
def sos_get_command_output(command, timeout=300, stderr=False, chroot=None, chdir=None, env=None, binary=False, sizelimit=None, poller=None): def _child_prep_fn(): if (chroot): os.chroot(chroot) if (chdir): os.chdir(chdir) cmd_env = os.environ.copy() cmd_env[] = if env: for key, value in env.items(): if value: cmd_env[key] = value else: cmd_env.pop(key, None) if timeout and is_executable("timeout"): command = "timeout %ds %s" % (timeout, command) if not six.PY3: command = command.encode(, ) args = shlex.split(command) expanded_args = [] for arg in args: expanded_arg = glob.glob(arg) if expanded_arg: expanded_args.extend(expanded_arg) else: expanded_args.append(arg) try: p = Popen(expanded_args, shell=False, stdout=PIPE, stderr=STDOUT if stderr else PIPE, bufsize=-1, env=cmd_env, close_fds=True, preexec_fn=_child_prep_fn) reader = AsyncReader(p.stdout, sizelimit, binary) if poller: while reader.running: if poller(): p.terminate() raise SoSTimeoutError stdout = reader.get_contents() while p.poll() is None: pass except OSError as e: if e.errno == errno.ENOENT: return {: 127, : ""} else: raise e if p.returncode == 126 or p.returncode == 127: stdout = six.binary_type(b"") return { : p.returncode, : stdout }
Execute a command and return a dictionary of status and output, optionally changing root or current working directory before executing command.
377,234
def create_volume(self, volume, size, **kwargs): data = {"size": size} data.update(kwargs) return self._request("POST", "volume/{0}".format(volume), data)
Create a volume and return a dictionary describing it. :param volume: Name of the volume to be created. :type volume: str :param size: Size in bytes, or string representing the size of the volume to be created. :type size: int or str :param \*\*kwargs: See the REST API Guide on your array for the documentation on the request: **POST volume/:volume** :type \*\*kwargs: optional :returns: A dictionary describing the created volume. :rtype: ResponseDict .. note:: The maximum volume size supported is 4 petabytes (4 * 2^50). .. note:: If size is an int, it must be a multiple of 512. .. note:: If size is a string, it must consist of an integer followed by a valid suffix. Accepted Suffixes ====== ======== ====== Suffix Size Bytes ====== ======== ====== S Sector (2^9) K Kilobyte (2^10) M Megabyte (2^20) G Gigabyte (2^30) T Terabyte (2^40) P Petabyte (2^50) ====== ======== ======
377,235
def _send_resource(self, environ, start_response, is_head_method): path = environ["PATH_INFO"] res = self._davProvider.get_resource_inst(path, environ) if util.get_content_length(environ) != 0: self._fail( HTTP_MEDIATYPE_NOT_SUPPORTED, "The server does not handle any body content.", ) elif environ.setdefault("HTTP_DEPTH", "0") != "0": self._fail(HTTP_BAD_REQUEST, "Only Depth: 0 supported.") elif res is None: self._fail(HTTP_NOT_FOUND) elif res.is_collection: self._fail( HTTP_FORBIDDEN, "Directory browsing is not enabled." "(to enable it put WsgiDavDirBrowser into middleware_stack" "option and set dir_browser -> enabled = True option.)", ) self._evaluate_if_headers(res, environ) filesize = res.get_content_length() if filesize is None: filesize = -1 last_modified = res.get_last_modified() if last_modified is None: last_modified = -1 entitytag = res.get_etag() if entitytag is None: entitytag = "[]" doignoreranges = ( not res.support_content_length() or not res.support_ranges() or filesize == 0 ) if ( "HTTP_RANGE" in environ and "HTTP_IF_RANGE" in environ and not doignoreranges ): ifrange = environ["HTTP_IF_RANGE"] secstime = util.parse_time_string(ifrange) if secstime: if last_modified != secstime: doignoreranges = True else: ifrange = ifrange.strip() if entitytag is None or ifrange != entitytag: doignoreranges = True ispartialranges = False if "HTTP_RANGE" in environ and not doignoreranges: ispartialranges = True list_ranges, _totallength = util.obtain_content_ranges( environ["HTTP_RANGE"], filesize ) if len(list_ranges) == 0: self._fail(HTTP_RANGE_NOT_SATISFIABLE) (range_start, range_end, range_length) = list_ranges[0] else: (range_start, range_end, range_length) = (0, filesize - 1, filesize) mimetype = res.get_content_type() response_headers = [] if res.support_content_length(): response_headers.append(("Content-Length", str(range_length))) if res.support_modified(): response_headers.append( ("Last-Modified", util.get_rfc1123_time(last_modified)) ) response_headers.append(("Content-Type", mimetype)) response_headers.append(("Date", util.get_rfc1123_time())) if res.support_etag(): response_headers.append(("ETag", .format(entitytag))) if "response_headers" in environ["wsgidav.config"]: customHeaders = environ["wsgidav.config"]["response_headers"] for header, value in customHeaders: response_headers.append((header, value)) res.finalize_headers(environ, response_headers) if ispartialranges: response_headers.append( ( "Content-Range", "bytes {}-{}/{}".format(range_start, range_end, filesize), ) ) start_response("206 Partial Content", response_headers) else: start_response("200 OK", response_headers) if is_head_method: yield b"" return fileobj = res.get_content() if not doignoreranges: fileobj.seek(range_start) contentlengthremaining = range_length while 1: if contentlengthremaining < 0 or contentlengthremaining > self.block_size: readbuffer = fileobj.read(self.block_size) else: readbuffer = fileobj.read(contentlengthremaining) assert compat.is_bytes(readbuffer) yield readbuffer contentlengthremaining -= len(readbuffer) if len(readbuffer) == 0 or contentlengthremaining == 0: break fileobj.close() return
If-Range If the entity is unchanged, send me the part(s) that I am missing; otherwise, send me the entire new entity If-Range: "737060cd8c284d8af7ad3082f209582d" @see: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.27
377,236
def can_rename(self): return len(self._paths) == 1 and ( self._paths[0].is_file() or blobxfer.models.upload.LocalSourcePath.is_stdin( str(self._paths[0])) )
Check if source can be renamed :param LocalSourcePath self: this :rtype: bool :return: if rename possible
377,237
def set_frame_parameters(self, profile_index: int, frame_parameters) -> None: self.frame_parameters_changed_event.fire(profile_index, frame_parameters)
Set the frame parameters with the settings index and fire the frame parameters changed event. If the settings index matches the current settings index, call set current frame parameters. If the settings index matches the record settings index, call set record frame parameters.
377,238
def _cleanup_workflow(config, task_id, args, **kwargs): from lightflow.models import Workflow if isinstance(args[0], Workflow): if config.celery[] == 0: AsyncResult(task_id).forget()
Cleanup the results of a workflow when it finished. Connects to the postrun signal of Celery. If the signal was sent by a workflow, remove the result from the result backend. Args: task_id (str): The id of the task. args (tuple): The arguments the task was started with. **kwargs: Keyword arguments from the hook.
377,239
def to_(self, data_pts): data_pts = np.asarray(data_pts, dtype=np.float) has_z = (data_pts.shape[-1] > 2) if self.use_center: data_pts = data_pts - self.viewer.data_off ref_pt = [self.viewer._org_x, self.viewer._org_y] if has_z: ref_pt.append(self.viewer._org_z) off_pts = np.subtract(data_pts, ref_pt) return off_pts
Reverse of :meth:`from_`.
377,240
def info(self, exp_path=False, project_path=False, global_path=False, config_path=False, complete=False, no_fix=False, on_projects=False, on_globals=False, projectname=None, return_dict=False, insert_id=True, only_keys=False, archives=False, **kwargs): self.app_main(**kwargs) def get_archives(project): ret = OrderedDict() for exp, a in self.config.experiments.items(): if self.is_archived(exp) and a.project == project: ret.setdefault(str(a), []).append(exp) return ret paths = OrderedDict([ (, config_path), (, global_path)]) if any(paths.values()): for key, val in paths.items(): if val: return (self.print_ or six.print_)(getattr( self.config, key)) return if archives: base = OrderedDict() current = projectname or self.projectname if complete: for project in self.config.projects.keys(): d = get_archives(project) if d: base[project] = d else: base[current] = get_archives(current) elif exp_path: current = self.experiment base = self.config.experiments.exp_files elif project_path: current = self.projectname base = OrderedDict( (key, osp.join(val, , )) for key, val in self.config.projects.project_paths.items()) elif on_globals: complete = True no_fix = True base = self.config.global_config elif on_projects: base = OrderedDict(self.config.projects) current = projectname or self.projectname else: current = self.experiment if projectname is None: if insert_id: base = copy.deepcopy(self.config.experiments) if not complete: base[current][] = current if six.PY3: base[current].move_to_end(, last=False) else: base = self.config.experiments if not only_keys: if complete: base.load() else: base[current] base = base.as_ordereddict() else: base = OrderedDict( (exp, self.config.experiments[exp]) for exp in self.config.experiments.project_map[projectname] ) complete = True if no_fix and not (archives or on_globals): for key, val in base.items(): if isinstance(val, dict): base[key] = self.rel_paths(copy.deepcopy(val)) if not complete: base = base[current] if only_keys: base = list(base.keys()) if not return_dict: if isinstance(base, six.string_types): ret = base else: ret = ordered_yaml_dump(base, default_flow_style=False) return (self.print_ or six.print_)(ret.rstrip()) else: return base
Print information on the experiments Parameters ---------- exp_path: bool If True/set, print the filename of the experiment configuration project_path: bool If True/set, print the filename on the project configuration global_path: bool If True/set, print the filename on the global configuration config_path: bool If True/set, print the path to the configuration directory complete: bool If True/set, the information on all experiments are printed no_fix: bool If set, paths are given relative to the root directory of the project on_projects: bool If set, show information on the projects rather than the experiment on_globals: bool If set, show the global configuration settings projectname: str The name of the project that shall be used. If provided and `on_projects` is not True, the information on all experiments for this project will be shown return_dict: bool If True, the dictionary is returned instead of printed insert_id: bool If True and neither `on_projects`, nor `on_globals`, nor `projectname` is given, the experiment id is inserted in the dictionary only_keys: bool If True, only the keys of the given dictionary are printed archives: bool If True, print the archives and the corresponding experiments for the specified project
377,241
def squared_toroidal_dist(p1, p2, world_size=(60, 60)): halfx = world_size[0]/2.0 if world_size[0] == world_size[1]: halfy = halfx else: halfy = world_size[1]/2.0 deltax = p1[0] - p2[0] if deltax < -halfx: deltax += world_size[0] elif deltax > halfx: deltax -= world_size[0] deltay = p1[1] - p2[1] if deltay < -halfy: deltay += world_size[1] elif deltay > halfy: deltay -= world_size[1] return deltax*deltax + deltay*deltay
Separated out because sqrt has a lot of overhead
377,242
def _make_sprite_image(images, save_path): if isinstance(images, np.ndarray): images = nd.array(images, dtype=images.dtype, ctx=current_context()) elif not isinstance(images, (NDArray, np.ndarray)): raise TypeError( .format(str(type(images)))) assert isinstance(images, NDArray) shape = images.shape nrow = int(np.ceil(np.sqrt(shape[0]))) _save_image( images, os.path.join(save_path, ), nrow=nrow, padding=0, square_image=True)
Given an NDArray as a batch images, make a sprite image out of it following the rule defined in https://www.tensorflow.org/programmers_guide/embedding and save it in sprite.png under the path provided by the user.
377,243
def phase_fraction(im, normed=True): r if im.dtype == bool: im = im.astype(int) elif im.dtype != int: raise Exception() labels = sp.arange(0, sp.amax(im)+1) results = sp.zeros_like(labels) for i in labels: results[i] = sp.sum(im == i) if normed: results = results/im.size return results
r""" Calculates the number (or fraction) of each phase in an image Parameters ---------- im : ND-array An ND-array containing integer values normed : Boolean If ``True`` (default) the returned values are normalized by the total number of voxels in image, otherwise the voxel count of each phase is returned. Returns ------- result : 1D-array A array of length max(im) with each element containing the number of voxels found with the corresponding label. See Also -------- porosity
377,244
def calc_route_info(self, real_time=True, stop_at_bounds=False, time_delta=0): route = self.get_route(1, time_delta) results = route[] route_time, route_distance = self._add_up_route(results, real_time=real_time, stop_at_bounds=stop_at_bounds) self.log.info(, route_time, route_distance) return route_time, route_distance
Calculate best route info.
377,245
def listdir(self, path=): self._connect() if self.sftp: contents = self._sftp_listdir(path) else: contents = self._ftp_listdir(path) self._close() return contents
Gets an list of the contents of path in (s)FTP
377,246
def exclude(self, *args, **kwargs): if in kwargs: kwargs = self.get_filter_args_with_path(False, **kwargs) return super(FileNodeManager, self).exclude(*args, **kwargs)
Works just like the default Manager's :func:`exclude` method, but you can pass an additional keyword argument named ``path`` specifying the full **path of the folder whose immediate child objects** you want to exclude, e.g. ``"path/to/folder"``.
377,247
def get_kafka_brokers(): if not os.environ.get(): raise RuntimeError() return [.format(parsedUrl.hostname, parsedUrl.port) for parsedUrl in [urlparse(url) for url in os.environ.get().split()]]
Parses the KAKFA_URL and returns a list of hostname:port pairs in the format that kafka-python expects.
377,248
def create_from_fits(cls, fitsfile, norm_type=, hdu_scan="SCANDATA", hdu_energies="EBOUNDS", irow=None): if irow is not None: tab_s = Table.read(fitsfile, hdu=hdu_scan)[irow] else: tab_s = Table.read(fitsfile, hdu=hdu_scan) tab_e = Table.read(fitsfile, hdu=hdu_energies) tab_s = convert_sed_cols(tab_s) tab_e = convert_sed_cols(tab_e) return cls.create_from_tables(norm_type, tab_s, tab_e)
Create a CastroData object from a tscube FITS file. Parameters ---------- fitsfile : str Name of the fits file norm_type : str Type of normalization to use. Valid options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) hdu_scan : str Name of the FITS HDU with the scan data hdu_energies : str Name of the FITS HDU with the energy binning and normalization data irow : int or None If none, then this assumes that there is a single row in the scan data table Otherwise, this specifies which row of the table to use Returns ------- castro : `~fermipy.castro.CastroData`
377,249
def __upload(self, resource, bytes): headers = { : http_time(self.options.get(, self._DEFAULT_EXPIRE)), : str(self._file_size), : self.content_type } return Request(self._client, , resource, domain=self._DEFAULT_DOMAIN, headers=headers, body=bytes).perform()
Performs a single chunk upload.
377,250
def start (self): self.sub = rospy.Subscriber(self.topic, ImageROS, self.__callback)
Starts (Subscribes) the client.
377,251
def restore_repository_from_recycle_bin(self, repository_details, project, repository_id): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if repository_id is not None: route_values[] = self._serialize.url(, repository_id, ) content = self._serialize.body(repository_details, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, content=content) return self._deserialize(, response)
RestoreRepositoryFromRecycleBin. [Preview API] Recover a soft-deleted Git repository. Recently deleted repositories go into a soft-delete state for a period of time before they are hard deleted and become unrecoverable. :param :class:`<GitRecycleBinRepositoryDetails> <azure.devops.v5_1.git.models.GitRecycleBinRepositoryDetails>` repository_details: :param str project: Project ID or project name :param str repository_id: The ID of the repository. :rtype: :class:`<GitRepository> <azure.devops.v5_1.git.models.GitRepository>`
377,252
def get(cls, resource_id=None, parent_id=None, grandparent_id=None): if not resource_id: return cls._get_all(parent_id, grandparent_id) else: return cls._get(resource_id, parent_id, grandparent_id)
Retrieves the required resources. :param resource_id: The identifier for the specific resource within the resource type. :param parent_id: The identifier for the specific ancestor resource within the resource type. :param grandparent_id: The identifier that is associated with network objects that are ancestors of the parent of the necessary resource.
377,253
def sendstop(self): if not self.is_started: raise EasyProcessError(self, ) log.debug(, self.pid, self.cmd) if self.popen: if self.is_alive(): log.debug() try: try: self.popen.terminate() except AttributeError: os.kill(self.popen.pid, signal.SIGKILL) except OSError as oserror: log.debug(, oserror) else: log.debug() else: log.debug() return self
Kill process (:meth:`subprocess.Popen.terminate`). Do not wait for command to complete. :rtype: self
377,254
def unmarshal(self, value, bind_client=None): if not isinstance(value, self.type): o = self.type() if bind_client is not None and hasattr(o.__class__, ): o.bind_client = bind_client if isinstance(value, dict): for (k, v) in value.items(): if not hasattr(o.__class__, k): self.log.warning("Unable to set attribute {0} on entity {1!r}".format(k, o)) else: setattr(o, k, v) value = o else: raise Exception("Unable to unmarshall object {0!r}".format(value)) return value
Cast the specified value to the entity type.
377,255
def create(self, ospf_process_id, vrf=None): value = int(ospf_process_id) if not 0 < value < 65536: raise ValueError() command = .format(ospf_process_id) if vrf: command += % vrf return self.configure(command)
Creates a OSPF process in the specified VRF or the default VRF. Args: ospf_process_id (str): The OSPF process Id value vrf (str): The VRF to apply this OSPF process to Returns: bool: True if the command completed successfully Exception: ValueError: If the ospf_process_id passed in less than 0 or greater than 65536
377,256
def propose_value(self, value, assume_leader=False): if value is None: raise ValueError("Not allowed to propose value None") paxos = self.paxos_instance paxos.leader = assume_leader msg = paxos.propose_value(value) if msg is None: msg = paxos.prepare() self.setattrs_from_paxos(paxos) self.announce(msg) return msg
Proposes a value to the network.
377,257
def move(self, remote_path_from, remote_path_to, overwrite=False): urn_from = Urn(remote_path_from) if not self.check(urn_from.path()): raise RemoteResourceNotFound(urn_from.path()) urn_to = Urn(remote_path_to) if not self.check(urn_to.parent()): raise RemoteParentNotFound(urn_to.path()) header_destination = f header_overwrite = f self.execute_request(action=, path=urn_from.quote(), headers_ext=[header_destination, header_overwrite])
Moves resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_MOVE :param remote_path_from: the path to resource which will be moved, :param remote_path_to: the path where resource will be moved. :param overwrite: (optional) the flag, overwrite file if it exists. Defaults is False
377,258
def _get_config(self, path=None): if not path and not self.option("config"): raise Exception("The --config|-c option is missing.") if not path: path = self.option("config") filename, ext = os.path.splitext(path) if ext in [".yml", ".yaml"]: with open(path) as fd: config = yaml.load(fd) elif ext in [".py"]: config = {} with open(path) as fh: exec(fh.read(), {}, config) else: raise RuntimeError("Config file [%s] is not supported." % path) return config
Get the config. :rtype: dict
377,259
def data(self, name, chunk, body): self.callRemote(Data, name=name, chunk=chunk, body=body)
Issue a DATA command return None Sends a chunk of data to a peer.
377,260
def evaluate_report(report): if report["valid"]: return for warn in report["warnings"]: LOGGER.warning(warn) for err in report["tables"][0]["errors"]: LOGGER.error(err["message"]) raise ValueError("Invalid data file. Please see errors above.")
Iterate over validation errors.
377,261
def add_generator_action(self, action): if not isinstance(action, GeneratorAction): raise RuntimeError() self.__generator_actions.append(action)
Attach/add one :class:`GeneratorAction`. Warning: The order in which you add :class:`GeneratorAction` objects **is** important in case of conflicting :class:`GeneratorAction` objects: **only** the **first compatible** :class:`GeneratorAction` object will be used to generate the (source code) files.
377,262
def add_command_line_options(cls, parser): if "add_argument" in dir(parser): return cls.add_command_line_options_argparse(parser) else: return cls.add_command_line_options_optparse(parser)
function to inject command line parameters
377,263
def add_color(self, name, model, description): r if self.color is False: self.packages.append(Package("color")) self.color = True self.preamble.append(Command("definecolor", arguments=[name, model, description]))
r"""Add a color that can be used throughout the document. Args ---- name: str Name to set for the color model: str The color model to use when defining the color description: str The values to use to define the color
377,264
def dump_table_as_insert_sql(engine: Engine, table_name: str, fileobj: TextIO, wheredict: Dict[str, Any] = None, include_ddl: bool = False, multirow: bool = False) -> None: log.info("dump_data_as_insert_sql: table_name={}", table_name) writelines_nl(fileobj, [ SEP1, sql_comment("Data for table: {}".format(table_name)), SEP2, sql_comment("Filters: {}".format(wheredict)), ]) dialect = engine.dialect if not dialect.supports_multivalues_insert: multirow = False if multirow: log.warning("dump_data_as_insert_sql: multirow parameter substitution " "not working yet") multirow = False meta = MetaData(bind=engine) log.debug("... retrieving schema") table = Table(table_name, meta, autoload=True) if include_ddl: log.debug("... producing DDL") dump_ddl(table.metadata, dialect_name=engine.dialect.name, fileobj=fileobj) log.debug("... fetching records") query = select(table.columns) if wheredict: for k, v in wheredict.items(): col = table.columns.get(k) query = query.where(col == v) cursor = engine.execute(query) if multirow: row_dict_list = [] for r in cursor: row_dict_list.append(dict(r)) if row_dict_list: statement = table.insert().values(row_dict_list) insert_str = get_literal_query(statement, bind=engine) writeline_nl(fileobj, insert_str) else: writeline_nl(fileobj, sql_comment("No data!")) else: found_one = False for r in cursor: found_one = True row_dict = dict(r) statement = table.insert(values=row_dict) insert_str = get_literal_query(statement, bind=engine) writeline_nl(fileobj, insert_str) if not found_one: writeline_nl(fileobj, sql_comment("No data!")) writeline_nl(fileobj, SEP2) log.debug("... done")
Reads a table from the database, and writes SQL to replicate the table's data to the output ``fileobj``. Args: engine: SQLAlchemy :class:`Engine` table_name: name of the table fileobj: file-like object to write to wheredict: optional dictionary of ``{column_name: value}`` to use as ``WHERE`` filters include_ddl: if ``True``, include the DDL to create the table as well multirow: write multi-row ``INSERT`` statements
377,265
def imread(files, **kwargs): kwargs_file = parse_kwargs(kwargs, , , , , , , , , ) kwargs_seq = parse_kwargs(kwargs, ) if kwargs.get(, None) is not None: if kwargs.get(, None) is not None: raise TypeError( "the and arguments cannot be used together") log.warning("imread: the argument is deprecated") kwargs[] = kwargs.pop() if isinstance(files, basestring) and any(i in files for i in ): files = glob.glob(files) if not files: raise ValueError() if not hasattr(files, ) and len(files) == 1: files = files[0] if isinstance(files, basestring) or hasattr(files, ): with TiffFile(files, **kwargs_file) as tif: return tif.asarray(**kwargs) else: with TiffSequence(files, **kwargs_seq) as imseq: return imseq.asarray(**kwargs)
Return image data from TIFF file(s) as numpy array. Refer to the TiffFile and TiffSequence classes and their asarray functions for documentation. Parameters ---------- files : str, binary stream, or sequence File name, seekable binary stream, glob pattern, or sequence of file names. kwargs : dict Parameters 'name', 'offset', 'size', 'multifile', and 'is_ome' are passed to the TiffFile constructor. The 'pattern' parameter is passed to the TiffSequence constructor. Other parameters are passed to the asarray functions. The first image series in the file is returned if no arguments are provided.
377,266
def save(self, filepath=None, filename=None, mode="md"): if mode not in ["html", "md", "markdown"]: raise ValueError("`mode` must be , or ," " got {0}".format(mode)) self._make_soup() file = get_path(filepath, filename, mode, self.column.name, self.title + + self.author.name) with open(file, ) as f: if mode == "html": f.write(self.soup[].encode()) else: import html2text h2t = html2text.HTML2Text() h2t.body_width = 0 f.write(h2t.handle(self.soup[]).encode())
保存答案为 Html 文档或 markdown 文档. :param str filepath: 要保存的文件所在的目录, 不填为当前目录下以专栏标题命名的目录, 设为"."则为当前目录。 :param str filename: 要保存的文件名, 不填则默认为 所在文章标题 - 作者名.html/md。 如果文件已存在,自动在后面加上数字区分。 **自定义文件名时请不要输入后缀 .html 或 .md。** :param str mode: 保存类型,可选 `html` 、 `markdown` 、 `md` 。 :return: 无 :rtype: None
377,267
def send_reminder(self, user, sender=None, **kwargs): if user.is_active: return False token = RegistrationTokenGenerator().make_token(user) kwargs.update({"token": token}) self.email_message( user, self.reminder_subject, self.reminder_body, sender, **kwargs ).send()
Sends a reminder email to the specified user
377,268
def nmb_weights_hidden(self) -> int: nmb = 0 for idx_layer in range(self.nmb_layers-1): nmb += self.nmb_neurons[idx_layer] * self.nmb_neurons[idx_layer+1] return nmb
Number of hidden weights. >>> from hydpy import ANN >>> ann = ANN(None) >>> ann(nmb_inputs=2, nmb_neurons=(4, 3, 2), nmb_outputs=3) >>> ann.nmb_weights_hidden 18
377,269
def phot(fits_filename, x_in, y_in, aperture=15, sky=20, swidth=10, apcor=0.3, maxcount=30000.0, exptime=1.0, zmag=None, extno=0, centroid=True): if not hasattr(x_in, ): x_in = [x_in, ] if not hasattr(y_in, ): y_in = [y_in, ] if (not os.path.exists(fits_filename) and not fits_filename.endswith(".fits")): fits_filename += ".fits" try: input_hdulist = fits.open(fits_filename) except Exception as err: logger.debug(str(err)) raise TaskError("Failed to open input image: %s" % err.message) filter_name = input_hdulist[extno].header.get(, ) zeropoints = {"I": 25.77, "R": 26.07, "V": 26.07, "B": 25.92, "DEFAULT": 26.0, "g.MP9401": 32.0, : 31.9, : 33.520} if zmag is None: logger.warning("No zmag supplied to daophot, looking for header or default values.") zmag = input_hdulist[extno].header.get(, zeropoints[filter_name]) logger.warning("Setting zmag to: {}".format(zmag)) for zpu_file in ["{}.zeropoint.used".format(os.path.splitext(fits_filename)[0]), "zeropoint.used"]: if os.access(zpu_file, os.R_OK): with open(zpu_file) as zpu_fh: zmag = float(zpu_fh.read()) logger.warning("Using file {} to set zmag to: {}".format(zpu_file, zmag)) break photzp = input_hdulist[extno].header.get(, zeropoints.get(filter_name, zeropoints["DEFAULT"])) if zmag != photzp: logger.warning(("zmag sent to daophot: ({}) " "doesnnononononodaophotMAGPIERSIERCIER'][0] != 0: os.remove(coofile.name) os.remove(magfile.name) logger.debug("Computed aperture photometry on {} objects in {}".format(len(pdump_out), fits_filename)) del input_hdulist return pdump_out
Compute the centroids and magnitudes of a bunch sources on fits image. :rtype : astropy.table.Table :param fits_filename: Name of fits image to measure source photometry on. :type fits_filename: str :param x_in: x location of source to measure :type x_in: float, numpy.array :param y_in: y location of source to measure :type y_in: float, numpy.array :param aperture: radius of circular aperture to use. :type aperture: float :param sky: radius of inner sky annulus :type sky: float :param swidth: width of the sky annulus :type swidth: float :param apcor: Aperture correction to take aperture flux to full flux. :type apcor: float :param maxcount: maximum linearity in the image. :type maxcount: float :param exptime: exposure time, relative to zmag supplied :type exptime: float :param zmag: zeropoint magnitude :param extno: extension of fits_filename the x/y location refers to.
377,270
def state(self, state): assert state != self.buildstate.state.current = state self.buildstate.state[state] = time() self.buildstate.state.lasttime = time() self.buildstate.state.error = False self.buildstate.state.exception = None self.buildstate.state.exception_type = None self.buildstate.commit() if state in (self.STATES.NEW, self.STATES.CLEANED, self.STATES.BUILT, self.STATES.FINALIZED, self.STATES.SOURCE): state = state if state != self.STATES.CLEANED else self.STATES.NEW self.dstate = state
Set the current build state and record the time to maintain history. Note! This is different from the dataset state. Setting the build set is commiteed to the progress table/database immediately. The dstate is also set, but is not committed until the bundle is committed. So, the dstate changes more slowly.
377,271
def fingerprint( self, phrase, phonetic_algorithm=double_metaphone, joiner=, *args, **kwargs ): phonetic = for word in phrase.split(): word = phonetic_algorithm(word, *args, **kwargs) if not isinstance(word, text_type) and hasattr(word, ): word = word[0] phonetic += word + joiner phonetic = phonetic[: -len(joiner)] return super(self.__class__, self).fingerprint(phonetic)
Return the phonetic fingerprint of a phrase. Parameters ---------- phrase : str The string from which to calculate the phonetic fingerprint phonetic_algorithm : function A phonetic algorithm that takes a string and returns a string (presumably a phonetic representation of the original string). By default, this function uses :py:func:`.double_metaphone`. joiner : str The string that will be placed between each word *args Variable length argument list **kwargs Arbitrary keyword arguments Returns ------- str The phonetic fingerprint of the phrase Examples -------- >>> pf = Phonetic() >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.') '0 afr fks jmpt kk ls prn tk' >>> from abydos.phonetic import soundex >>> pf.fingerprint('The quick brown fox jumped over the lazy dog.', ... phonetic_algorithm=soundex) 'b650 d200 f200 j513 l200 o160 q200 t000'
377,272
def get_settings_from_client(client): settings = { : , : , : , : , } try: settings[] = client.auth.username settings[] = client.auth.api_key except AttributeError: pass transport = _resolve_transport(client.transport) try: settings[] = transport.timeout settings[] = transport.endpoint_url except AttributeError: pass return settings
Pull out settings from a SoftLayer.BaseClient instance. :param client: SoftLayer.BaseClient instance
377,273
def extended_key_usage(self): try: ext = self.x509.extensions.get_extension_for_oid(ExtensionOID.EXTENDED_KEY_USAGE) except x509.ExtensionNotFound: return None return ExtendedKeyUsage(ext)
The :py:class:`~django_ca.extensions.ExtendedKeyUsage` extension, or ``None`` if it doesn't exist.
377,274
def add_status_code(code): def class_decorator(cls): cls.status_code = code _mprpc_exceptions[code] = cls return cls return class_decorator
用于将mprpc的标准异常注册到`_mprpc_exceptions`的装饰器. Parameters: code (int): - 标准状态码 Return: (Callable): - 装饰函数
377,275
def set_system_time(self, time_source, ntp_server, date_format, time_format, time_zone, is_dst, dst, year, mon, day, hour, minute, sec, callback=None): if ntp_server not in [, , , , ]: raise ValueError() params = {: time_source, : ntp_server, : date_format, : time_format, : time_zone, : is_dst, : dst, : year, : mon, : day, : hour, : minute, : sec } return self.execute_command(, params, callback=callback)
Set systeim time
377,276
def get_command(self, ctx: click.Context, name: str) -> click.Command: info = ctx.ensure_object(ScriptInfo) command = None try: command = info.load_app().cli.get_command(ctx, name) except NoAppException: pass if command is None: command = super().get_command(ctx, name) return command
Return the relevant command given the context and name. .. warning:: This differs substaintially from Flask in that it allows for the inbuilt commands to be overridden.
377,277
def pressure_tendency(code: str, unit: str = ) -> str: width, precision = int(code[2:4]), code[4] return ( f)
Translates a 5-digit pressure outlook code Ex: 50123 -> 12.3 mb: Increasing, then decreasing
377,278
def _mod_run_check(cmd_kwargs, onlyif, unless): if onlyif: if __salt__[](onlyif, **cmd_kwargs) != 0: return {: , : True, : True} if unless: if __salt__[](unless, **cmd_kwargs) == 0: return {: , : True, : True} return True
Execute the onlyif and unless logic. Return a result dict if: * onlyif failed (onlyif != 0) * unless succeeded (unless == 0) else return True
377,279
def run_to_selected_state(self, path, state_machine_id=None): if self.state_machine_manager.get_active_state_machine() is not None: self.state_machine_manager.get_active_state_machine().root_state.recursively_resume_states() if not self.finished_or_stopped(): logger.debug("Resume execution engine and run to selected state!") self.run_to_states = [] self.run_to_states.append(path) self.set_execution_mode(StateMachineExecutionStatus.RUN_TO_SELECTED_STATE) else: logger.debug("Start execution engine and run to selected state!") if state_machine_id is not None: self.state_machine_manager.active_state_machine_id = state_machine_id self.set_execution_mode(StateMachineExecutionStatus.RUN_TO_SELECTED_STATE) self.run_to_states = [] self.run_to_states.append(path) self._run_active_state_machine()
Execute the state machine until a specific state. This state won't be executed. This is an asynchronous task
377,280
def op_canonicalize(op_name, parsed_op): global CANONICALIZE_METHODS if op_name not in CANONICALIZE_METHODS: return parsed_op else: return CANONICALIZE_METHODS[op_name](parsed_op)
Get the canonical representation of a parsed operation's data. Meant for backwards-compatibility
377,281
def process_constraints(self, inequalities=None, equalities=None, momentinequalities=None, momentequalities=None, block_index=0, removeequalities=False): self.status = "unsolved" if block_index == 0: if self._original_F is not None: self.F = self._original_F self.obj_facvar = self._original_obj_facvar self.constant_term = self._original_constant_term self.n_vars = len(self.obj_facvar) self._new_basis = None block_index = self.constraint_starting_block self.__wipe_F_from_constraints() self.constraints = flatten([inequalities]) self._constraint_to_block_index = {} for constraint in self.constraints: self._constraint_to_block_index[constraint] = (block_index, ) block_index += 1 if momentinequalities is not None: for mineq in momentinequalities: self.constraints.append(mineq) self._constraint_to_block_index[mineq] = (block_index, ) block_index += 1 if not (removeequalities or equalities is None): for k, equality in enumerate(equalities): if equality.is_Relational: equality = convert_relational(equality) self.constraints.append(equality) self.constraints.append(-equality) ln = len(self.localizing_monomial_sets[block_index- self.constraint_starting_block]) self._constraint_to_block_index[equality] = (block_index, block_index+ln*(ln+1)//2) block_index += ln*(ln+1) if momentequalities is not None and not removeequalities: for meq in momentequalities: self.constraints += [meq, flip_sign(meq)] self._constraint_to_block_index[meq] = (block_index, block_index+1) block_index += 2 block_index = self.constraint_starting_block self.__process_inequalities(block_index) if removeequalities: self.__remove_equalities(equalities, momentequalities)
Process the constraints and generate localizing matrices. Useful only if the moment matrix already exists. Call it if you want to replace your constraints. The number of the respective types of constraints and the maximum degree of each constraint must remain the same. :param inequalities: Optional parameter to list inequality constraints. :type inequalities: list of :class:`sympy.core.exp.Expr`. :param equalities: Optional parameter to list equality constraints. :type equalities: list of :class:`sympy.core.exp.Expr`. :param momentinequalities: Optional parameter of inequalities defined on moments. :type momentinequalities: list of :class:`sympy.core.exp.Expr`. :param momentequalities: Optional parameter of equalities defined on moments. :type momentequalities: list of :class:`sympy.core.exp.Expr`. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :param removeequalities: Optional parameter to attempt removing the equalities by solving the linear equations. :type removeequalities: bool.
377,282
def dispatch(self, event: Event) -> Iterator[Any]: LOG.debug(, event.get("type")) if event["type"] in self._routes: for detail_key, detail_values in self._routes.get( event["type"], {} ).items(): event_value = event.get(detail_key, "*") yield from detail_values.get(event_value, []) else: return
Yields handlers matching the routing of the incoming :class:`slack.events.Event`. Args: event: :class:`slack.events.Event` Yields: handler
377,283
def remove_cons_vars_from_problem(model, what): context = get_context(model) model.solver.remove(what) if context: context(partial(model.solver.add, what))
Remove variables and constraints from a Model's solver object. Useful to temporarily remove variables and constraints from a Models's solver object. Parameters ---------- model : a cobra model The model from which to remove the variables and constraints. what : list or tuple of optlang variables or constraints. The variables or constraints to remove from the model. Must be of class `model.problem.Variable` or `model.problem.Constraint`.
377,284
def get_url(params): baseurl = .format( **app.config) with app.test_request_context(): return urllib.parse.urljoin( baseurl, url_for(, **params), )
Return external URL for warming up a given chart/table cache.
377,285
def p_instanceDeclaration(p): alias = None quals = OrderedDict() ns = p.parser.handle.default_namespace if isinstance(p[1], six.string_types): cname = p[3] if p[4] == : props = p[5] else: props = p[6] alias = p[4] else: cname = p[4] if p[5] == : props = p[6] else: props = p[7] alias = p[5] try: cc = p.parser.handle.GetClass(cname, LocalOnly=False, IncludeQualifiers=True) p.parser.classnames[ns].append(cc.classname.lower()) except CIMError as ce: ce.file_line = (p.parser.file, p.lexer.lineno) if ce.status_code == CIM_ERR_NOT_FOUND: file_ = p.parser.mofcomp.find_mof(cname) if p.parser.verbose: p.parser.log( _format("Class {0!A} does not exist", cname)) if file_: p.parser.mofcomp.compile_file(file_, ns) cc = p.parser.handle.GetClass(cname, LocalOnly=False, IncludeQualifiers=True) else: if p.parser.verbose: p.parser.log("Cankey' in cprop.qualifiers: keybindings[pname] = pprop.value except ValueError as ve: ce = CIMError( CIM_ERR_INVALID_PARAMETER, _format("Invalid value for property {0!A}: {1}", pname, ve)) ce.file_line = (p.parser.file, p.lexer.lineno) raise ce if alias: if keybindings: inst.path.keybindings = keybindings p.parser.aliases[alias] = inst.path p[0] = inst
instanceDeclaration : INSTANCE OF className '{' valueInitializerList '}' ';' | INSTANCE OF className alias '{' valueInitializerList '}' ';' | qualifierList INSTANCE OF className '{' valueInitializerList '}' ';' | qualifierList INSTANCE OF className alias '{' valueInitializerList '}' ';'
377,286
def _parse_octet(self, octet_str): if not octet_str: raise ValueError("Empty octet not permitted") if not self._DECIMAL_DIGITS.issuperset(octet_str): msg = "Only decimal digits permitted in %r" raise ValueError(msg % octet_str) if len(octet_str) > 3: msg = "At most 3 characters permitted in %r" raise ValueError(msg % octet_str) octet_int = int(octet_str, 10) msg = "Ambiguous (octal/decimal) value in %r not permitted" raise ValueError(msg % octet_str) if octet_int > 255: raise ValueError("Octet %d (> 255) not permitted" % octet_int) return octet_int
Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255].
377,287
def _build_integer_type(var, property_path=None): if not property_path: property_path = [] schema = {"type": "integer"} if is_builtin_type(var): return schema if is_config_var(var): schema.update( _build_attribute_modifiers(var, {"min": "minimum", "max": "maximum"}) ) return schema
Builds schema definitions for integer type values. :param var: The integer type value :param List[str] property_path: The property path of the current type, defaults to None, optional :param property_path: [type], optional :return: The built schema definition :rtype: Dict[str, Any]
377,288
def preprocess_worksheet(self, table, worksheet): table_conversion = [] flags = {} units = {} for rind, row in enumerate(table): conversion_row = [] table_conversion.append(conversion_row) if self.skippable_rows and worksheet in self.skippable_rows and rind in self.skippable_rows[worksheet]: self.flag_change(flags, , (rind, None), worksheet, self.FLAGS[]) continue for cind, cell in enumerate(row): position = (rind, cind) if self.skippable_columns and worksheet in self.skippable_columns and cind in self.skippable_columns[worksheet]: conversion = None self.flag_change(flags, , position, worksheet, self.FLAGS[]) else: conversion = auto_convert_cell(self, cell, position, worksheet, flags, units, parens_as_neg=self.parens_as_neg) conversion_row.append(conversion) return table_conversion, flags, units
Performs a preprocess pass of the table to attempt naive conversions of data and to record the initial types of each cell.
377,289
def do_usufy(self, query, **kwargs): results = [] test = self.check_usufy(query, **kwargs) if test: r = { "type": "i3visio.profile", "value": self.platformName + " - " + query, "attributes": [] } aux = {} aux["type"] = "i3visio.uri" aux["value"] = self.createURL(word=query, mode="usufy") aux["attributes"] = [] r["attributes"].append(aux) aux = {} aux["type"] = "i3visio.alias" aux["value"] = query aux["attributes"] = [] r["attributes"].append(aux) aux = {} aux["type"] = "i3visio.platform" aux["value"] = self.platformName aux["attributes"] = [] r["attributes"].append(aux) r["attributes"] += self.process_usufy(test) results.append(r) return results
Verifying a usufy query in this platform. This might be redefined in any class inheriting from Platform. Args: ----- query: The element to be searched. Return: ------- A list of elements to be appended.
377,290
def get_data_pct(self, xpct, ypct): xy_mn, xy_mx = self.get_limits() width = abs(xy_mx[0] - xy_mn[0]) height = abs(xy_mx[1] - xy_mn[1]) x, y = int(float(xpct) * width), int(float(ypct) * height) return (x, y)
Calculate new data size for the given axis ratios. See :meth:`get_limits`. Parameters ---------- xpct, ypct : float Ratio for X and Y, respectively, where 1 is 100%. Returns ------- x, y : int Scaled dimensions.
377,291
def retry(num_attempts=3, exception_class=Exception, log=None, sleeptime=1): def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): for i in range(num_attempts): try: return func(*args, **kwargs) except exception_class as e: if i == num_attempts - 1: raise else: if log: log.warn(, e) sleep(sleeptime) return wrapper return decorator
>>> def fail(): ... runs[0] += 1 ... raise ValueError() >>> runs = [0]; retry(sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [3] >>> runs = [0]; retry(2, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [2] >>> runs = [0]; retry(exception_class=IndexError, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [1] >>> logger = DoctestLogger() >>> runs = [0]; retry(log=logger, sleeptime=0)(fail)() Traceback (most recent call last): ... ValueError >>> runs [3] >>> logger.print_logs() Failed with error ValueError(), trying again Failed with error ValueError(), trying again
377,292
def make_file_object_logger(fh): def logger_func(stmt, args, fh=fh): now = datetime.datetime.now() six.print_("Executing (%s):" % now.isoformat(), file=fh) six.print_(textwrap.dedent(stmt), file=fh) six.print_("Arguments:", file=fh) pprint.pprint(args, fh) return logger_func
Make a logger that logs to the given file object.
377,293
def nd_load_and_stats(filenames, base_path=BASEPATH): nds = [] for filename in filenames: try: nd_load = results.load_nd_from_pickle(filename= os.path.join(base_path, , filename)) nds.append(nd_load) except: print("File {mvgd} not found. It was maybe excluded by Ding0 or " "just forgotten to generate by you...".format(mvgd=filename)) nd = nds[0] for n in nds[1:]: nd.add_mv_grid_district(n._mv_grid_districts[0]) stats = results.calculate_mvgd_stats(nd) return stats
Load multiple files from disk and generate stats Passes the list of files assuming the ding0 data structure as default in :code:`~/.ding0`. Data will be concatenated and key indicators for each grid district are returned in table and graphic format. Parameters ---------- filenames : list of str Provide list of files you want to analyze base_path : str Root directory of Ding0 data structure, i.e. '~/.ding0' (which is default). Returns ------- stats : pandas.DataFrame Statistics of each MV grid districts
377,294
def _within_box(points, boxes): x_within = (points[..., 0] >= boxes[:, 0, None]) & ( points[..., 0] <= boxes[:, 2, None] ) y_within = (points[..., 1] >= boxes[:, 1, None]) & ( points[..., 1] <= boxes[:, 3, None] ) return x_within & y_within
Validate which keypoints are contained inside a given box. points: NxKx2 boxes: Nx4 output: NxK
377,295
def get_nameserver_detail_output_show_nameserver_nameserver_ag_base_device(self, **kwargs): config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail output = ET.SubElement(get_nameserver_detail, "output") show_nameserver = ET.SubElement(output, "show-nameserver") nameserver_portid_key = ET.SubElement(show_nameserver, "nameserver-portid") nameserver_portid_key.text = kwargs.pop() nameserver_ag_base_device = ET.SubElement(show_nameserver, "nameserver-ag-base-device") nameserver_ag_base_device.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
377,296
def function_exclusion_filter_builder(func: Strings) -> NodePredicate: if isinstance(func, str): def function_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool: return node[FUNCTION] != func return function_exclusion_filter elif isinstance(func, Iterable): functions = set(func) def functions_exclusion_filter(_: BELGraph, node: BaseEntity) -> bool: return node[FUNCTION] not in functions return functions_exclusion_filter raise ValueError(.format(func))
Build a filter that fails on nodes of the given function(s). :param func: A BEL Function or list/set/tuple of BEL functions
377,297
def explore(args): logger.info("reading sequeces") data = load_data(args.json) logger.info("get sequences from json") c1, c2 = args.names.split(",") seqs, names = get_sequences_from_cluster(c1, c2, data[0]) loci = get_precursors_from_cluster(c1, c2, data[0]) logger.info("map all sequences to all loci") print("%s" % (loci)) map_to_precursors(seqs, names, loci, os.path.join(args.out, "map.tsv"), args) logger.info("plot sequences on loci") logger.info("Done")
Create mapping of sequences of two clusters
377,298
def analyze(self, text): try: self.process text = render_safe(text).replace(, ).lower() results = [] for chunk in string_pieces(text): self.send_input((chunk + ).encode()) while True: out_line = self.receive_output_line().decode() if out_line == : break word, info = out_line.strip().split() record_parts = [word] + info.split() record = MeCabRecord(*record_parts) results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word.
377,299
async def expand_all_quays(self) -> None: if not self.stops: return headers = {: self._client_name} request = { : GRAPHQL_STOP_TO_QUAY_TEMPLATE, : { : self.stops, : self.omit_non_boarding } } with async_timeout.timeout(10): resp = await self.web_session.post(RESOURCE, json=request, headers=headers) if resp.status != 200: _LOGGER.error( "Error connecting to Entur, response http status code: %s", resp.status) return None result = await resp.json() if in result: return for stop_place in result[][]: if len(stop_place[]) > 1: for quay in stop_place[]: if quay[]: self.quays.append(quay[])
Find all quays from stop places.