Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
23,600
def serve(config): "Serve the app with Gevent" from gevent.pywsgi import WSGIServer app = make_app(config=config) host = app.config.get("HOST", ) port = app.config.get("PORT", 5000) http_server = WSGIServer((host, port), app) http_server.serve_forever()
Serve the app with Gevent
23,601
def conn_is_open(conn): if conn is None: return False try: get_table_names(conn) return True except sqlite3.ProgrammingError as e: return False
Tests sqlite3 connection, returns T/F
23,602
def getTopRight(self): return (float(self.get_x()) + float(self.get_width()), float(self.get_y()) + float(self.get_height()))
Retrieves a tuple with the x,y coordinates of the upper right point of the rect. Requires the coordinates, width, height to be numbers
23,603
def send_headers(self): hkeys = [key.lower() for key, value in self.outheaders] status = int(self.status[:3]) if status == 413: self.close_connection = True elif "content-length" not in hkeys: if status < 200 or status in (204, 205, 304): pass else: if (self.response_protocol == and self.method != ): self.chunked_write = True self.outheaders.append(("Transfer-Encoding", "chunked")) else: self.close_connection = True if "connection" not in hkeys: if self.response_protocol == : if self.close_connection: self.outheaders.append(("Connection", "close")) else: if not self.close_connection: self.outheaders.append(("Connection", "Keep-Alive")) if (not self.close_connection) and (not self.chunked_read): remaining = getattr(self.rfile, , 0) if remaining > 0: self.rfile.read(remaining) if "date" not in hkeys: self.outheaders.append(("Date", rfc822.formatdate())) if "server" not in hkeys: self.outheaders.append(("Server", self.server.server_name)) buf = [self.server.protocol + " " + self.status + CRLF] for k, v in self.outheaders: buf.append(k + ": " + v + CRLF) buf.append(CRLF) self.conn.wfile.sendall("".join(buf))
Assert, process, and send the HTTP response message-headers. You must set self.status, and self.outheaders before calling this.
23,604
def _init_params(self, amplitude, length_scale, validate_args): dtype = util.maybe_get_common_dtype( [amplitude, length_scale]) if amplitude is not None: amplitude = tf.convert_to_tensor( value=amplitude, name=, dtype=dtype) self._amplitude = _validate_arg_if_not_none( amplitude, tf.compat.v1.assert_positive, validate_args) if length_scale is not None: length_scale = tf.convert_to_tensor( value=length_scale, name=, dtype=dtype) self._length_scale = _validate_arg_if_not_none( length_scale, tf.compat.v1.assert_positive, validate_args) return dtype
Shared init logic for `amplitude` and `length_scale` params. Args: amplitude: `Tensor` (or convertible) or `None` to convert, validate. length_scale: `Tensor` (or convertible) or `None` to convert, validate. validate_args: If `True`, parameters are checked for validity despite possibly degrading runtime performance Returns: dtype: The common `DType` of the parameters.
23,605
def set_secure_boot_mode(irmc_info, enable): bios_config_data = { : { : , : { : { : , : { : enable } } } } } restore_bios_config(irmc_info=irmc_info, bios_config=bios_config_data)
Enable/Disable secure boot on the server. :param irmc_info: node info :param enable: True, if secure boot needs to be enabled for next boot, else False.
23,606
def free_symbols(self): return set([ sym for sym in self.term.free_symbols if sym not in self.bound_symbols])
Set of all free symbols
23,607
def _register(self, defaults=None, **kwargs): f = lambda: self.update_or_create(defaults=defaults, **kwargs)[0] ret = SimpleLazyObject(f) self._lazy_entries.append(ret) return ret
Fetch (update or create) an instance, lazily. We're doing this lazily, so that it becomes possible to define custom enums in your code, even before the Django ORM is fully initialized. Domain.objects.SHOPPING = Domain.objects.register( ref='shopping', name='Webshop') Domain.objects.USERS = Domain.objects.register( ref='users', name='User Accounts')
23,608
def fit(self, X, y=None, **kwargs): super(DataVisualizer, self).fit(X, y, **kwargs) if self.classes_ is None: self.classes_ = [str(label) for label in np.unique(y)] self.draw(X, y, **kwargs) return self
The fit method is the primary drawing input for the visualization since it has both the X and y data required for the viz and the transform method does not. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer
23,609
def from_data(cls, data): if not data.shape[1] == 3: raise ValueError("Gyroscope data must have shape (N, 3)") instance = cls() instance.data = data return instance
Create gyroscope stream from data array Parameters ------------------- data : (N, 3) ndarray Data array of angular velocities (rad/s) Returns ------------------- GyroStream Stream object
23,610
def initialize(self, config, context): self.logger.info("Initializing PulsarSpout with the following") self.logger.info("Component-specific config: \n%s" % str(config)) self.logger.info("Context: \n%s" % str(context)) self.emit_count = 0 self.ack_count = 0 self.fail_count = 0 if not PulsarSpout.serviceUrl in config or not PulsarSpout.topicName in config: self.logger.fatal("Need to specify both serviceUrl and topicName") self.pulsar_cluster = str(config[PulsarSpout.serviceUrl]) self.topic = str(config[PulsarSpout.topicName]) mode = config[api_constants.TOPOLOGY_RELIABILITY_MODE] if mode == api_constants.TopologyReliabilityMode.ATLEAST_ONCE: self.acking_timeout = 1000 * int(config[api_constants.TOPOLOGY_MESSAGE_TIMEOUT_SECS]) else: self.acking_timeout = 30000 if PulsarSpout.receiveTimeoutMs in config: self.receive_timeout_ms = config[PulsarSpout.receiveTimeoutMs] else: self.receive_timeout_ms = 10 if PulsarSpout.deserializer in config: self.deserializer = config[PulsarSpout.deserializer] if not callable(self.deserializer): self.logger.fatal("Pulsar Message Deserializer needs to be callable") else: self.deserializer = self.default_deserializer self.logConfFileName = GenerateLogConfig(context) self.logger.info("Generated LogConf at %s" % self.logConfFileName) self.client = pulsar.Client(self.pulsar_cluster, log_conf_file_path=self.logConfFileName) self.logger.info("Setup Client with cluster %s" % self.pulsar_cluster) try: self.consumer = self.client.subscribe(self.topic, context.get_topology_name(), consumer_type=pulsar.ConsumerType.Failover, unacked_messages_timeout_ms=self.acking_timeout) except Exception as e: self.logger.fatal("Pulsar client subscription failed: %s" % str(e)) self.logger.info("Subscribed to topic %s" % self.topic)
Implements Pulsar Spout's initialize method
23,611
def _set_session_ldp_stats(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=session_ldp_stats.session_ldp_stats, is_container=, presence=False, yang_name="session-ldp-stats", rest_name="session-ldp-stats", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=False) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__session_ldp_stats = t if hasattr(self, ): self._set()
Setter method for session_ldp_stats, mapped from YANG variable /mpls_state/ldp/ldp_session/session_ldp_stats (container) If this variable is read-only (config: false) in the source YANG file, then _set_session_ldp_stats is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_session_ldp_stats() directly. YANG Description: Session LDP stats
23,612
def _sb_short_word(self, term, r1_prefixes=None): if self._sb_r1(term, r1_prefixes) == len( term ) and self._sb_ends_in_short_syllable(term): return True return False
Return True iff term is a short word. (...according to the Porter2 specification.) Parameters ---------- term : str The term to examine r1_prefixes : set Prefixes to consider Returns ------- bool True iff term is a short word
23,613
def facts(puppet=False): * ret = {} opt_puppet = if puppet else cmd_ret = __salt__[](.format(opt_puppet)) if cmd_ret[] != 0: raise CommandExecutionError(cmd_ret[]) output = cmd_ret[] for line in output.splitlines(): if not line: continue fact, value = _format_fact(line) if not fact: continue ret[fact] = value return ret
Run facter and return the results CLI Example: .. code-block:: bash salt '*' puppet.facts
23,614
def query_obj(self): d = super().query_obj() d[] = self.form_data.get( , int(config.get())) numeric_columns = self.form_data.get() if numeric_columns is None: raise Exception(_()) self.columns = numeric_columns d[] = numeric_columns + self.groupby d[] = [] return d
Returns the query object for this visualization
23,615
def config(_config=None, **kwargs): if _config is None: _config = {} _config.update(kwargs) def wrapper(func): func._config = _config return func return wrapper
A decorator for setting the default kwargs of `BaseHandler.crawl`. Any self.crawl with this callback will use this config.
23,616
def request_token(): while True: email, password, captcha_solution, captcha_id = win_login() options = { : , : email, : password, : captcha_solution, : captcha_id, : } r = requests.post(, data=options, headers=HEADERS) req_json = json.loads(r.text, object_hook=decode_dict) if req_json[] == 0: post_data = { : req_json[][][], : req_json[][][], : req_json[][][], : req_json[][], : req_json[][], : r.cookies, : 50, : 0, : 0 } return post_data print(req_json[]) print(ERROR + req_json[])
通过帐号,密码请求token,返回一个dict { "user_info": { "ck": "-VQY", "play_record": { "fav_chls_count": 4, "liked": 802, "banned": 162, "played": 28368 }, "is_new_user": 0, "uid": "taizilongxu", "third_party_info": null, "url": "http://www.douban.com/people/taizilongxu/", "is_dj": false, "id": "2053207", "is_pro": false, "name": "刘小备" }, "r": 0 }
23,617
def modelSetCompleted(self, modelID, completionReason, completionMsg, cpuTime=0, useConnectionID=True): if completionMsg is None: completionMsg = query = \ \ \ \ \ \ \ \ % (self.modelsTableName,) sqlParams = [self.STATUS_COMPLETED, completionReason, completionMsg, cpuTime, modelID] if useConnectionID: query += " AND _eng_worker_conn_id=%s" sqlParams.append(self._connectionID) with ConnectionFactory.get() as conn: numRowsAffected = conn.cursor.execute(query, sqlParams) if numRowsAffected != 1: raise InvalidConnectionException( ("Tried to set modelID=%r using connectionID=%r, but this model " "belongs to some other worker or modelID not found; " "numRowsAffected=%r") % (modelID, self._connectionID, numRowsAffected))
Mark a model as completed, with the given completionReason and completionMsg. This will fail if the model does not currently belong to this client (connection_id doesn't match). Parameters: ---------------------------------------------------------------- modelID: model ID of model to modify completionReason: completionReason string completionMsg: completionMsg string cpuTime: amount of CPU time spent on this model useConnectionID: True if the connection id of the calling function must be the same as the connection that created the job. Set to True for hypersearch workers, which use this mechanism for orphaned model detection.
23,618
async def send_initial_metadata(self, *, metadata=None): if self._send_initial_metadata_done: raise ProtocolError() headers = [ (, ), (, self._content_type), ] metadata = MultiDict(metadata or ()) metadata, = await self._dispatch.send_initial_metadata(metadata) headers.extend(encode_metadata(metadata)) await self._stream.send_headers(headers) self._send_initial_metadata_done = True
Coroutine to send headers with initial metadata to the client. In gRPC you can send initial metadata as soon as possible, because gRPC doesn't use `:status` pseudo header to indicate success or failure of the current request. gRPC uses trailers for this purpose, and trailers are sent during :py:meth:`send_trailing_metadata` call, which should be called in the end. .. note:: This coroutine will be called implicitly during first :py:meth:`send_message` coroutine call, if not called before explicitly. :param metadata: custom initial metadata, dict or list of pairs
23,619
def get_route_lines_route(self, **kwargs): select_date = % ( kwargs.get(, ), kwargs.get(, ), kwargs.get(, ) ) params = { : select_date, : util.ints_to_string(kwargs.get(, [])) } result = self.make_request(, , **params) if not util.check_result(result): return False, result.get(, ) values = util.response_list(result, ) return True, [emtype.RouteLinesItem(**a) for a in values]
Obtain itinerary for one or more lines in the given date. Args: day (int): Day of the month in format DD. The number is automatically padded if it only has one digit. month (int): Month number in format MM. The number is automatically padded if it only has one digit. year (int): Year number in format YYYY. lines (list[int] | int): Lines to query, may be empty to get all the lines. Returns: Status boolean and parsed response (list[RouteLinesItem]), or message string in case of error.
23,620
def _is_viable_phone_number(number): if len(number) < _MIN_LENGTH_FOR_NSN: return False match = fullmatch(_VALID_PHONE_NUMBER_PATTERN, number) return bool(match)
Checks to see if a string could possibly be a phone number. At the moment, checks to see that the string begins with at least 2 digits, ignoring any punctuation commonly found in phone numbers. This method does not require the number to be normalized in advance - but does assume that leading non-number symbols have been removed, such as by the method _extract_possible_number. Arguments: number -- string to be checked for viability as a phone number Returns True if the number could be a phone number of some sort, otherwise False
23,621
def create(self, friendly_name, event_callback_url=values.unset, events_filter=values.unset, multi_task_enabled=values.unset, template=values.unset, prioritize_queue_order=values.unset): data = values.of({ : friendly_name, : event_callback_url, : events_filter, : multi_task_enabled, : template, : prioritize_queue_order, }) payload = self._version.create( , self._uri, data=data, ) return WorkspaceInstance(self._version, payload, )
Create a new WorkspaceInstance :param unicode friendly_name: Human readable description of this workspace :param unicode event_callback_url: If provided, the Workspace will publish events to this URL. :param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace. :param bool multi_task_enabled: Multi tasking allows workers to handle multiple tasks simultaneously. :param unicode template: One of the available template names. :param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues. :returns: Newly created WorkspaceInstance :rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
23,622
def show_tooltip(self, pos, tooltip, _sender_deco=None): if _sender_deco is not None and _sender_deco not in self.decorations: return QtWidgets.QToolTip.showText(pos, tooltip[0: 1024], self)
Show a tool tip at the specified position :param pos: Tooltip position :param tooltip: Tooltip text :param _sender_deco: TextDecoration which is the sender of the show tooltip request. (for internal use only).
23,623
def set_cores_massive(self,filename=): core_info=[] minis=[] for i in range(len(self.runs_H5_surf)): sefiles=se(self.runs_H5_out[i]) mini=sefiles.get() minis.append(mini) incycle=int(sefiles.se.cycles[-1]) core_info.append(sefiles.cores(incycle=incycle)) print_info= for i in range(len(self.runs_H5_surf)): if i ==0: print print core_info[i][1] print_info+=(str(minis[i])+) info=core_info[i][0] for k in range(len(info)): print_info+=(.format(float(core_info[i][0][k]))+) print_info=(print_info+) f1=open(filename,) f1.write(print_info) f1.close()
Uesse function cores in nugridse.py
23,624
def read_uic1tag(fh, byteorder, dtype, count, offsetsize, planecount=None): assert dtype in (, ) and byteorder == result = {} if dtype == : values = fh.read_array(, 2*count).reshape(count, 2) result = {: values[:, 0] / values[:, 1]} elif planecount: for _ in range(count): tagid = struct.unpack(, fh.read(4))[0] if tagid in (28, 29, 37, 40, 41): fh.read(4) continue name, value = read_uic_tag(fh, tagid, planecount, offset=True) result[name] = value return result
Read MetaMorph STK UIC1Tag from file and return as dict. Return empty dictionary if planecount is unknown.
23,625
def create_space(deployment_name, space_name, security_policy=, events_retention_days=0, metrics_retention_days=0, token_manager=None, app_url=defaults.APP_URL): deployment_id = get_deployment_id(deployment_name, token_manager=token_manager, app_url=app_url) payload = { : space_name, : security_policy, : events_retention_days, : metrics_retention_days, } headers = token_manager.get_access_token_headers() deployment_url = environment.get_deployment_url(app_url=app_url) response = requests.post( % (deployment_url, deployment_id), data=json.dumps(payload), headers=headers) if response.status_code == 201: return response.json() else: raise JutException( % (response.status_code, response.text))
create a space within the deployment specified and with the various rentention values set
23,626
def _clean_html(html): content = html.replace(u, u).replace(u, ) content = _LINK_PATTERN.sub(u, content) content = _HTML_TAG_PATTERN.sub(u, content) content = _BACKSLASH_PATTERN.sub(u, content) return content
\ Removes links (``<a href="...">...</a>``) from the provided HTML input. Further, it replaces "&#x000A;" with ``\n`` and removes "¶" from the texts.
23,627
def add(name, mac, mtu=1500): etherstub**DE:AD:OO:OO:BE:EF ret = {} if mtu > 9000 or mtu < 1500: return {: } if mac != : cmd = res = __salt__[](cmd) if mac.replace(, ) not in res[].splitlines(): return {: .format(mac)} if mac == : cmd = .format(name) res = __salt__[](cmd) else: cmd = .format(mtu, mac, name) res = __salt__[](cmd) if res[] == 0: return True else: return {: if not in res and res[] == else res[]}
Add a new nictag name : string name of new nictag mac : string mac of parent interface or 'etherstub' to create a ether stub mtu : int MTU (ignored for etherstubs) CLI Example: .. code-block:: bash salt '*' nictagadm.add storage0 etherstub salt '*' nictagadm.add trunk0 'DE:AD:OO:OO:BE:EF' 9000
23,628
def from_yamlfile(cls, fp, selector_handler=None, strict=False, debug=False): return cls.from_yamlstring(fp.read(), selector_handler=selector_handler, strict=strict, debug=debug)
Create a Parselet instance from a file containing the Parsley script as a YAML object >>> import parslepy >>> with open('parselet.yml') as fp: ... parslepy.Parselet.from_yamlfile(fp) ... <parslepy.base.Parselet object at 0x2014e50> :param file fp: an open file-like pointer containing the Parsley script :rtype: :class:`.Parselet` Other arguments: same as for :class:`.Parselet` contructor
23,629
def _managePsets(configobj, section_name, task_name, iparsobj=None, input_dict=None): configobj[section_name] = {} iparsobj_cfg = teal.load(task_name) if input_dict is not None: for key in list(input_dict.keys()): if key in iparsobj_cfg: if iparsobj is not None and key in iparsobj: raise DuplicateKeyError("Duplicate parameter " "provided for task {:s}".format(key, task_name)) iparsobj_cfg[key] = input_dict[key] del input_dict[key] if iparsobj is not None: iparsobj_cfg.update(iparsobj) del iparsobj_cfg[] configobj[section_name].merge(iparsobj_cfg)
Read in parameter values from PSET-like configobj tasks defined for source-finding algorithms, and any other PSET-like tasks under this task, and merge those values into the input configobj dictionary.
23,630
def parse(self, stream): lines = re.sub("[\r\n]+", "\n", stream.read()).split("\n") for line in lines: self.parseline(line)
Parse the given stream
23,631
def packageRootPath(path): path = nstr(path) if os.path.isfile(path): path = os.path.dirname(path) parts = os.path.normpath(path).split(os.path.sep) package_parts = [] for i in range(len(parts), 0, -1): filename = os.path.sep.join(parts[:i] + []) if not os.path.isfile(filename): break package_parts.insert(0, parts[i - 1]) if not package_parts: return path return os.path.abspath(os.path.sep.join(parts[:-len(package_parts)]))
Returns the root file path that defines a Python package from the inputted path. :param path | <str> :return <str>
23,632
def transform(transform_func): def decorator(func): @wraps(func) def f(*args, **kwargs): return transform_func( func(*args, **kwargs) ) return f return decorator
Apply a transformation to a functions return value
23,633
def parse_csv_headers(dataset_id): data = Dataset.objects.get(pk=dataset_id) with open(data.dataset_file.path, ) as datasetFile: csvReader = reader(datasetFile, delimiter=, quotechar=) headers = next(csvReader) return headers
Return the first row of a CSV as a list of headers.
23,634
def populate_results_dict(self, sample, gene, total_mismatches, genome_pos, amplicon_length, contig, primer_set): sample[self.analysistype].result_dict[gene] = { : total_mismatches, : genome_pos, : amplicon_length, : contig, : primer_set }
Populate the results dictionary with the required key: value pairs :param sample: type MetadataObject: Current metadata sample to process :param gene: type STR: Gene of interest :param total_mismatches: type INT: Number of mismatches between primer pairs and subject sequence :param genome_pos: type STR: Positions of 5' and 3' ends of the amplicon :param amplicon_length: type INT: Total length of the amplicon :param contig: type STR: Contig name :param primer_set: type STR: Name of primer set from the ePCR-formatted file used in the analyses
23,635
def highlights_worker(work_unit): if not in work_unit.spec: raise coordinate.exceptions.ProgrammerError( ) web_conf = Config() unitconf = work_unit.spec[] with yakonfig.defaulted_config([coordinate, kvlayer, dblogger, web_conf], config=unitconf): file_id = make_file_id(work_unit.key) web_conf.kvlclient.setup_namespace(highlights_kvlayer_tables) payload_strs = list(web_conf.kvlclient.get(, file_id)) if payload_strs and payload_strs[0][1]: payload_str = payload_strs[0][1] try: data = json.loads(payload_str) maybe_store_highlights(file_id, data, web_conf.tfidf, web_conf.kvlclient) except Exception, exc: logger.critical(, payload_str, exc_info=True) payload = { : ERROR, : { : 7, : % \ traceback.format_exc(exc)} } payload_str = json.dumps(payload) kvlclient.put(, (file_id, payload_str))
coordinate worker wrapper around :func:`maybe_create_highlights`
23,636
def get_closest_points(self, mesh): dists = numpy.array( [surf.get_min_distance(mesh).flatten() for surf in self.surfaces] ) idx = dists == numpy.min(dists, axis=0) lons = numpy.empty_like(mesh.lons.flatten()) lats = numpy.empty_like(mesh.lats.flatten()) depths = None if mesh.depths is None else \ numpy.empty_like(mesh.depths.flatten()) for i, surf in enumerate(self.surfaces): if not idx[i, :].any(): continue cps = surf.get_closest_points(mesh) lons[idx[i, :]] = cps.lons.flatten()[idx[i, :]] lats[idx[i, :]] = cps.lats.flatten()[idx[i, :]] if depths is not None: depths[idx[i, :]] = cps.depths.flatten()[idx[i, :]] lons = lons.reshape(mesh.lons.shape) lats = lats.reshape(mesh.lats.shape) if depths is not None: depths = depths.reshape(mesh.depths.shape) return Mesh(lons, lats, depths)
For each point in ``mesh`` find the closest surface element, and return the corresponding closest point. See :meth:`superclass method <.base.BaseSurface.get_closest_points>` for spec of input and result values.
23,637
def get_assessment_admin_session_for_bank(self, bank_id): if not self.supports_assessment_admin(): raise errors.Unimplemented() return sessions.AssessmentAdminSession(bank_id, runtime=self._runtime)
Gets the ``OsidSession`` associated with the assessment admin service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank return: (osid.assessment.AssessmentAdminSession) - ``an _assessment_admin_session`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_admin()`` and ``supports_visible_federation()`` are ``true``.*
23,638
def get_next_assessment_part_id(self, assessment_part_id=None): if assessment_part_id is None: part_id = self.get_id() else: part_id = assessment_part_id return get_next_part_id(part_id, runtime=self._runtime, proxy=self._proxy, sequestered=True)[0]
This supports the basic simple sequence case. Can be overriden in a record for other cases
23,639
def _lookup_generic_scalar(self, obj, as_of_date, country_code, matches, missing): result = self._lookup_generic_scalar_helper( obj, as_of_date, country_code, ) if result is not None: matches.append(result) else: missing.append(obj)
Convert asset_convertible to an asset. On success, append to matches. On failure, append to missing.
23,640
def _add_current_usage(self, value, maximum=None, resource_id=None, aws_type=None): self._current_usage.append( AwsLimitUsage( self, value, maximum=maximum, resource_id=resource_id, aws_type=aws_type ) )
Add a new current usage value for this limit. Creates a new :py:class:`~.AwsLimitUsage` instance and appends it to the internal list. If more than one usage value is given to this service, they should have ``id`` and ``aws_type`` set. This method should only be called from the :py:class:`~._AwsService` instance that created and manages this Limit. :param value: the numeric usage value :type value: :py:obj:`int` or :py:obj:`float` :param resource_id: If there can be multiple usage values for one limit, an AWS ID for the resource this instance describes :type resource_id: str :param aws_type: if ``id`` is not None, the AWS resource type that ID represents. As a convention, we use the AWS Resource Type names used by `CloudFormation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-template-resource-type-ref.html>`_ # noqa :type aws_type: str
23,641
def get_ngrok_public_url(): try: response = requests.get(url=NGROK_CLIENT_API_BASE_URL + "/tunnels", headers={: }) response.raise_for_status() except requests.exceptions.RequestException: print("Could not connect to the ngrok client API; " "assuming not running.") return None else: for tunnel in response.json()["tunnels"]: if tunnel.get("public_url", "").startswith("http://"): print("Found ngrok public HTTP URL:", tunnel["public_url"]) return tunnel["public_url"]
Get the ngrok public HTTP URL from the local client API.
23,642
def predict(self, X): if X.ndim > 1 and X.shape[1] != 1: out = [] for x in X: out += self.predict(x) return out X = X.flatten() if self.metric == : dists = np.sum(np.abs(self._data - X) ** self.p, axis=1) else: raise ValueError("Only Minkowski distance metric implemented...") argument = np.argsort(dists) labels = self._labels[argument[:self.n_neighbors]] if self.weights == : weights = 1 / dists[argument[:self.n_neighbors]] out = np.zeros((len(self._classes), ), ) for i, c in enumerate(self._classes): out[i] = np.sum(weights[labels == c]) out /= np.sum(out) y_pred = self._labels[np.argmax(out)] else: y_pred, _ = mode(labels) return y_pred.tolist()
Predict the class labels for the provided data Parameters ---------- X : array-like, shape (n_query, n_features). Test samples. Returns ------- y : array of shape [n_samples] Class labels for each data sample.
23,643
def create(self, chat_id=None, name=None, owner=None, user_list=None): data = optionaldict( chatid=chat_id, name=name, owner=owner, userlist=user_list, ) return self._post(, data=data)
创建群聊会话 详情请参考 https://work.weixin.qq.com/api/doc#90000/90135/90245 限制说明: 只允许企业自建应用调用,且应用的可见范围必须是根部门; 群成员人数不可超过管理端配置的“群成员人数上限”,且最大不可超过500人; 每企业创建群数不可超过1000/天; :param chat_id: 群聊的唯一标志,不能与已有的群重复;字符串类型,最长32个字符。只允许字符0-9及字母a-zA-Z。如果不填,系统会随机生成群id :param name: 群聊名,最多50个utf8字符,超过将截断 :param owner: 指定群主的id。如果不指定,系统会随机从userlist中选一人作为群主 :param user_list: 会话成员列表,成员用userid来标识。至少2人,至多500人 :return: 返回的 JSON 数据包
23,644
def commit_signature(vcs, user_config, signature): if signature not in get_staged_signatures(vcs): raise NotStagedError evidence_path = _get_committed_history_path(vcs) committed_signatures = get_committed_signatures(vcs) if signature in committed_signatures: raise AlreadyCommittedError committed_signatures.append(signature) string = .join(committed_signatures[-user_config[]:]) with open(evidence_path, ) as f: f.write(string) unstage_signature(vcs, signature)
Add `signature` to the list of committed signatures The signature must already be staged Args: vcs (easyci.vcs.base.Vcs) user_config (dict) signature (basestring) Raises: NotStagedError AlreadyCommittedError
23,645
def delete(self): r = self._client.request(, self.url) logger.info("delete(): %s", r.status_code)
Delete this source
23,646
def lineup_user(self,userid): headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",: +self.domain+,"User-Agent": user_agent} req = self.session.get(+self.domain++userid,headers=headers).content soup = BeautifulSoup(req) info = [] for i in soup.find_all(,{:}): info.append(i.text.strip()) return info
Get user lineup using a ID
23,647
def main(): if in sys.argv: print(main.__doc__) sys.exit() if in sys.argv: while 1: try: ans=input("Input Declination: <cntrl-D to quit> ") Dec=float(ans) ans=input("Input Inclination: ") Inc =float(ans) ans=input("Input Alpha 95: ") a95 =float(ans) ans=input("Input Site Latitude: ") slat =float(ans) ans=input("Input Site Longitude: ") slong =float(ans) spitout(Dec,Inc,a95,slat,slong) print(%(plong,plat,dp,dm)) except: print("\n Good-bye\n") sys.exit() elif in sys.argv: ind=sys.argv.index() file=sys.argv[ind+1] f=open(file,) inlist = [] for line in f.readlines(): inlist.append([]) for el in line.split(): inlist[-1].append(float(el)) spitout(inlist) else: input = sys.stdin.readlines() inlist = [] for line in input: inlist.append([]) for el in line.split(): inlist[-1].append(float(el)) spitout(inlist)
NAME dia_vgp.py DESCRIPTION converts declination inclination alpha95 to virtual geomagnetic pole, dp and dm SYNTAX dia_vgp.py [-h] [-i] [-f FILE] [< filename] OPTIONS -h prints help message and quits -i interactive data entry -f FILE to specify file name on the command line INPUT for file entry: D I A95 SLAT SLON where: D: declination I: inclination A95: alpha_95 SLAT: site latitude (positive north) SLON: site longitude (positive east) OUTPUT PLON PLAT DP DM where: PLAT: pole latitude PLON: pole longitude (positive east) DP: 95% confidence angle in parallel DM: 95% confidence angle in meridian
23,648
def title_from_content(content): for end in (". ", "?", "!", "<br />", "\n", "</p>"): if end in content: content = content.split(end)[0] + end break return strip_tags(content)
Try and extract the first sentence from a block of test to use as a title.
23,649
def from_config(cls, cp, section, variable_args): tag = variable_args variable_args = variable_args.split(VARARGS_DELIM) try: polar_angle = cp.get_opt_tag(section, , tag) except Error: polar_angle = cls._default_polar_angle try: azimuthal_angle = cp.get_opt_tag(section, , tag) except Error: azimuthal_angle = cls._default_azimuthal_angle if polar_angle not in variable_args: raise Error("polar-angle %s is not one of the variable args (%s)"%( polar_angle, .join(variable_args))) if azimuthal_angle not in variable_args: raise Error("azimuthal-angle %s is not one of the variable args "%( azimuthal_angle) + "(%s)"%(.join(variable_args))) polar_bounds = bounded.get_param_bounds_from_config( cp, section, tag, polar_angle) azimuthal_bounds = bounded.get_param_bounds_from_config( cp, section, tag, azimuthal_angle) azimuthal_cyclic_domain = cp.has_option_tag(section, , tag) return cls(polar_angle=polar_angle, azimuthal_angle=azimuthal_angle, polar_bounds=polar_bounds, azimuthal_bounds=azimuthal_bounds, azimuthal_cyclic_domain=azimuthal_cyclic_domain)
Returns a distribution based on a configuration file. The section must have the names of the polar and azimuthal angles in the tag part of the section header. For example: .. code-block:: ini [prior-theta+phi] name = uniform_solidangle If nothing else is provided, the default names and bounds of the polar and azimuthal angles will be used. To specify a different name for each angle, set the `polar-angle` and `azimuthal-angle` attributes. For example: .. code-block:: ini [prior-foo+bar] name = uniform_solidangle polar-angle = foo azimuthal-angle = bar Note that the names of the variable args in the tag part of the section name must match the names of the polar and azimuthal angles. Bounds may also be specified for each angle, as factors of pi. For example: .. code-block:: ini [prior-theta+phi] polar-angle = theta azimuthal-angle = phi min-theta = 0 max-theta = 0.5 This will return a distribution that is uniform in the upper hemisphere. By default, the domain of the azimuthal angle is `[0, 2pi)`. To make this domain cyclic, add `azimuthal_cyclic_domain =`. Parameters ---------- cp : ConfigParser instance The config file. section : str The name of the section. variable_args : str The names of the parameters for this distribution, separated by ``VARARGS_DELIM``. These must appear in the "tag" part of the section header. Returns ------- UniformSolidAngle A distribution instance from the pycbc.inference.prior module.
23,650
def _validate(self): sections = odict([ (,[,, ,,, , , , , , , ]), (,[]), (,[,,, ,,, ,, ]), (,[]), (,[]), (,[]), ]) keys = np.array(list(sections.keys())) found = np.in1d(keys,list(self.keys())) if not np.all(found): msg = +str(keys[~found]) raise Exception(msg) for section,keys in sections.items(): keys = np.array(keys) found = np.in1d(keys,list(self[section].keys())) if not np.all(found): msg = %(section)+str(keys[~found]) raise Exception(msg)
Enforce some structure to the config file
23,651
def LoadSecondaryConfig(self, filename=None, parser=None): if filename: self.files.append(filename) parser_cls = self.GetParserFromFilename(filename) parser = parser_cls(filename=filename) logging.debug("Loading configuration from %s", filename) self.secondary_config_parsers.append(parser) elif parser is None: raise ValueError("Must provide either a filename or a parser.") clone = self.MakeNewConfig() clone.MergeData(parser.RawData()) clone.initialized = True for file_to_load in clone["Config.includes"]: if not os.path.isabs(file_to_load): if not filename: raise ConfigFileNotFound( "While loading %s: Unable to include a relative path (%s) " "from a config without a filename" % (filename, file_to_load)) file_to_load = os.path.join(os.path.dirname(filename), file_to_load) clone_parser = clone.LoadSecondaryConfig(file_to_load) if not clone_parser.parsed: raise ConfigFileNotFound( "Unable to load include file %s" % file_to_load) self.MergeData(clone.raw_data) self.files.extend(clone.files) return parser
Loads an additional configuration file. The configuration system has the concept of a single Primary configuration file, and multiple secondary files. The primary configuration file is the main file that is used by the program. Any writebacks will only be made to the primary configuration file. Secondary files contain additional configuration data which will be merged into the configuration system. This method adds an additional configuration file. Args: filename: The configuration file that will be loaded. For example file:///etc/grr.conf or reg://HKEY_LOCAL_MACHINE/Software/GRR. parser: An optional parser can be given. In this case, the parser's data will be loaded directly. Returns: The parser used to parse this configuration source. Raises: ValueError: if both filename and parser arguments are None. ConfigFileNotFound: If a specified included file was not found.
23,652
def limit(self, limit): if not isinstance(limit, int) or isinstance(limit, bool): raise InvalidUsage("limit size must be of type integer") self._sysparms[] = limit
Sets `sysparm_limit` :param limit: Size limit (int)
23,653
def set_default(sld, tld): my-minion opts = salt.utils.namecheap.get_opts() opts[] = sld opts[] = tld response_xml = salt.utils.namecheap.post_request(opts) if response_xml is None: return False dnsresult = response_xml.getElementsByTagName()[0] return salt.utils.namecheap.string_to_value(dnsresult.getAttribute())
Sets domain to use namecheap default DNS servers. Required for free services like Host record management, URL forwarding, email forwarding, dynamic DNS and other value added services. sld SLD of the domain name tld TLD of the domain name Returns ``True`` if the domain was successfully pointed at the default DNS servers. CLI Example: .. code-block:: bash salt 'my-minion' namecheap_domains_dns.set_default sld tld
23,654
def execute(self): self.log(u"Executing job") if self.job is None: self.log_exc(u"The job object is None", None, True, ExecuteJobExecutionError) if len(self.job) == 0: self.log_exc(u"The job has no tasks", None, True, ExecuteJobExecutionError) job_max_tasks = self.rconf[RuntimeConfiguration.JOB_MAX_TASKS] if (job_max_tasks > 0) and (len(self.job) > job_max_tasks): self.log_exc(u"The Job has %d Tasks, more than the maximum allowed (%d)." % (len(self.job), job_max_tasks), None, True, ExecuteJobExecutionError) self.log([u"Number of tasks: ", len(self.job)]) for task in self.job.tasks: try: custom_id = task.configuration["custom_id"] self.log([u"Executing task ...", custom_id]) executor = ExecuteTask(task, rconf=self.rconf, logger=self.logger) executor.execute() self.log([u"Executing task ... done", custom_id]) except Exception as exc: self.log_exc(u"Error while executing task " % (custom_id), exc, True, ExecuteJobExecutionError) self.log(u"Executing task: succeeded") self.log(u"Executing job: succeeded")
Execute the job, that is, execute all of its tasks. Each produced sync map will be stored inside the corresponding task object. :raises: :class:`~aeneas.executejob.ExecuteJobExecutionError`: if there is a problem during the job execution
23,655
def circuit_to_pyquil(circuit: Circuit) -> pyquil.Program: prog = pyquil.Program() for elem in circuit.elements: if isinstance(elem, Gate) and elem.name in QUIL_GATES: params = list(elem.params.values()) if elem.params else [] prog.gate(elem.name, params, elem.qubits) elif isinstance(elem, Measure): prog.measure(elem.qubit, elem.cbit) else: raise ValueError() return prog
Convert a QuantumFlow circuit to a pyQuil program
23,656
def get_expiration_date(self, fn): r = self.local_renderer r.env.crt_fn = fn with hide(): ret = r.local(, capture=True) matches = re.findall(, ret, flags=re.IGNORECASE) if matches: return dateutil.parser.parse(matches[0])
Reads the expiration date of a local crt file.
23,657
def to_unicode(sorb, allow_eval=False): r if sorb is None: return sorb if isinstance(sorb, bytes): sorb = sorb.decode() for i, s in enumerate(["bb"", ]): if (sorb.startswith(s) and sorb.endswith(s[-1])): return to_unicode(eval(sorb, {: None}, {})) return sorb
r"""Ensure that strings are unicode (UTF-8 encoded). Evaluate bytes literals that are sometimes accidentally created by str(b'whatever') >>> to_unicode(b'whatever') 'whatever' >>> to_unicode(b'b"whatever"') 'whatever' >>> to_unicode(repr(b'b"whatever"')) 'whatever' >>> to_unicode(str(b'b"whatever"')) 'whatever' >>> to_unicode(str(str(b'whatever'))) 'whatever' >>> to_unicode(bytes(u'whatever', 'utf-8')) 'whatever' >>> to_unicode(b'u"whatever"') 'whatever' >>> to_unicode(u'b"whatever"') 'whatever' There seems to be a bug in python3 core: >>> str(b'whatever') # user intended str.decode(b'whatever') (str coercion) rather than python code repr "b'whatever'" >>> repr(str(b'whatever')) '"b\'whatever\'"' >>> str(repr(str(b'whatever'))) '"b\'whatever\'"' >>> repr(str(repr(str(b'whatever')))) '\'"b\\\'whatever\\\'"\'' >>> repr(repr(b'whatever')) '"b\'whatever\'"' >>> str(str(b'whatever')) "b'whatever'" >>> str(repr(b'whatever')) "b'whatever'"
23,658
def edit_txt(filename, substitutions, newname=None): if newname is None: newname = filename _substitutions = [{: re.compile(str(lRE)), : re.compile(str(sRE)), : repl} for lRE,sRE,repl in substitutions if repl is not None] with tempfile.TemporaryFile() as target: with open(filename, ) as src: logger.info("editing txt = {0!r} ({1:d} substitutions)".format(filename, len(substitutions))) for line in src: line = line.decode("utf-8") keep_line = True for subst in _substitutions: m = subst[].match(line) if m: logger.debug(+line.rstrip()) if subst[] is False: keep_line = False else: line = subst[].sub(str(subst[]), line) logger.debug(+line.rstrip()) if keep_line: target.write(line.encode()) else: logger.debug("Deleting line %r", line) target.seek(0) with open(newname, ) as final: shutil.copyfileobj(target, final) logger.info("edited txt = {newname!r}".format(**vars()))
Primitive text file stream editor. This function can be used to edit free-form text files such as the topology file. By default it does an **in-place edit** of *filename*. If *newname* is supplied then the edited file is written to *newname*. :Arguments: *filename* input text file *substitutions* substitution commands (see below for format) *newname* output filename; if ``None`` then *filename* is changed in place [``None``] *substitutions* is a list of triplets; the first two elements are regular expression strings, the last is the substitution value. It mimics ``sed`` search and replace. The rules for *substitutions*: .. productionlist:: substitutions: "[" search_replace_tuple, ... "]" search_replace_tuple: "(" line_match_RE "," search_RE "," replacement ")" line_match_RE: regular expression that selects the line (uses match) search_RE: regular expression that is searched in the line replacement: replacement string for search_RE Running :func:`edit_txt` does pretty much what a simple :: sed /line_match_RE/s/search_RE/replacement/ with repeated substitution commands does. Special replacement values: - ``None``: the rule is ignored - ``False``: the line is deleted (even if other rules match) .. note:: * No sanity checks are performed and the substitutions must be supplied exactly as shown. * All substitutions are applied to a line; thus the order of the substitution commands may matter when one substitution generates a match for a subsequent rule. * If replacement is set to ``None`` then the whole expression is ignored and whatever is in the template is used. To unset values you must provided an empty string or similar. * Delete a matching line if replacement=``False``.
23,659
def league_info(): league = __get_league_object() output = {} for x in league.attrib: output[x] = league.attrib[x] return output
Returns a dictionary of league information
23,660
def _bn(editor, force=False): eb = editor.window_arrangement.active_editor_buffer if not force and eb.has_unsaved_changes: editor.show_message(_NO_WRITE_SINCE_LAST_CHANGE_TEXT) else: editor.window_arrangement.go_to_next_buffer()
Go to next buffer.
23,661
def term(self): node = self.atom() while self.token.nature in (Nature.MUL, Nature.DIV, Nature.INT_DIV): token = self.token if token.nature == Nature.MUL: self._process(Nature.MUL) elif token.nature == Nature.DIV: self._process(Nature.DIV) elif token.nature == Nature.INT_DIV: self._process(Nature.INT_DIV) else: self._error() node = BinaryOperation(left=node, op=token, right=self.atom()) return node
term: atom (('*' | '/' | '//') atom)*
23,662
def get_project_name(project_id, projects): for project in projects: if project_id == project.id: return project.name
Retrieves project name for given project id Args: projects: List of projects project_id: project id Returns: Project name or None if there is no match
23,663
def get_url(self, name, view_name, kwargs, request): if not view_name: return None try: url = self.reverse(view_name, kwargs=kwargs, request=request) except NoReverseMatch: msg = ( ) raise ImproperlyConfigured(msg % view_name) if url is None: return None return Hyperlink(url, name)
Given a name, view name and kwargs, return the URL that hyperlinks to the object. May raise a `NoReverseMatch` if the `view_name` and `lookup_field` attributes are not configured to correctly match the URL conf.
23,664
def attach(self, instance_id, device): return self.connection.attach_volume(self.id, instance_id, device)
Attach this EBS volume to an EC2 instance. :type instance_id: str :param instance_id: The ID of the EC2 instance to which it will be attached. :type device: str :param device: The device on the instance through which the volume will be exposed (e.g. /dev/sdh) :rtype: bool :return: True if successful
23,665
def register_agent(self, host, sweep_id=None, project_name=None): mutation = gql() if project_name is None: project_name = self.settings()
Register a new agent Args: host (str): hostname persistent (bool): long running or oneoff sweep (str): sweep id project_name: (str): model that contains sweep
23,666
def activate(self, profile_name=NotSet): if profile_name is NotSet: profile_name = self.profile_name self._active_profile_name = profile_name
Sets <PROFILE_ROOT>_PROFILE environment variable to the name of the current profile.
23,667
def _relation(self, id, join_on, join_to, level=None, featuretype=None, order_by=None, reverse=False, completely_within=False, limit=None): if isinstance(id, Feature): id = id.id other = .format(**locals()) args = [id] level_clause = if level is not None: level_clause = args.append(level) query, args = helpers.make_query( args=args, other=other, extra=level_clause, featuretype=featuretype, order_by=order_by, reverse=reverse, limit=limit, completely_within=completely_within, ) query = query.replace("SELECT", "SELECT DISTINCT") for i in self._execute(query, args): yield self._feature_returner(**i)
Parameters ---------- id : string or a Feature object level : None or int If `level=None` (default), then return all children regardless of level. If `level` is an integer, then constrain to just that level. {_method_doc} Returns ------- A generator object that yields :class:`Feature` objects.
23,668
def _enough_time_has_passed(self, FPS): if FPS == 0: return False else: earliest_time = self.last_update_time + (1.0 / FPS) return time.time() >= earliest_time
For limiting how often frames are computed.
23,669
def scale(config=None, name=None, replicas=None): rc = K8sReplicationController(config=config, name=name).get() rc.desired_replicas = replicas rc.update() rc._wait_for_desired_replicas() return rc
Scales the number of pods in the specified K8sReplicationController to the desired replica count. :param config: an instance of K8sConfig :param name: the name of the ReplicationController we want to scale. :param replicas: the desired number of replicas. :return: An instance of K8sReplicationController
23,670
def image_exists(self, id=None, tag=None): exists = False if id and self.image_by_id(id): exists = True elif tag and self.image_by_tag(tag): exists = True return exists
Check if specified image exists
23,671
def iter_transport_opts(opts): transports = set() for transport, opts_overrides in six.iteritems(opts.get(, {})): t_opts = dict(opts) t_opts.update(opts_overrides) t_opts[] = transport transports.add(transport) yield transport, t_opts if opts[] not in transports: yield opts[], opts
Yield transport, opts for all master configured transports
23,672
def date_time_this_month( self, before_now=True, after_now=False, tzinfo=None): now = datetime.now(tzinfo) this_month_start = now.replace( day=1, hour=0, minute=0, second=0, microsecond=0) next_month_start = this_month_start + \ relativedelta.relativedelta(months=1) if before_now and after_now: return self.date_time_between_dates( this_month_start, next_month_start, tzinfo) elif not before_now and after_now: return self.date_time_between_dates(now, next_month_start, tzinfo) elif not after_now and before_now: return self.date_time_between_dates(this_month_start, now, tzinfo) else: return now
Gets a DateTime object for the current month. :param before_now: include days in current month before today :param after_now: include days in current month after today :param tzinfo: timezone, instance of datetime.tzinfo subclass :example DateTime('2012-04-04 11:02:02') :return DateTime
23,673
async def register_user(self, password, **kwds): user = await self._create_remote_user(password=password, **kwds) if not in user: user[] = user[] match_query = self.model.user == user[] if self.model.select().where(match_query).count() > 0: raise RuntimeError() password = self.model(user=user[], password=password) password.save() return { : user, : self._user_session_token(user) }
This function is used to provide a sessionToken for later requests. Args: uid (str): The
23,674
def missing_some(data, min_required, args): if min_required < 1: return [] found = 0 not_found = object() ret = [] for arg in args: if get_var(data, arg, not_found) is not_found: ret.append(arg) else: found += 1 if found >= min_required: return [] return ret
Implements the missing_some operator for finding missing variables.
23,675
def _build(self, one_hot_input_sequence): input_shape = one_hot_input_sequence.get_shape() batch_size = input_shape[1] batch_embed_module = snt.BatchApply(self._embed_module) input_sequence = batch_embed_module(one_hot_input_sequence) input_sequence = tf.nn.relu(input_sequence) initial_state = self._core.initial_state(batch_size) if self._use_dynamic_rnn: output_sequence, final_state = tf.nn.dynamic_rnn( cell=self._core, inputs=input_sequence, time_major=True, initial_state=initial_state) else: rnn_input_sequence = tf.unstack(input_sequence) output, final_state = tf.contrib.rnn.static_rnn( cell=self._core, inputs=rnn_input_sequence, initial_state=initial_state) output_sequence = tf.stack(output) batch_output_module = snt.BatchApply(self._output_module) output_sequence_logits = batch_output_module(output_sequence) return output_sequence_logits, final_state
Builds the deep LSTM model sub-graph. Args: one_hot_input_sequence: A Tensor with the input sequence encoded as a one-hot representation. Its dimensions should be `[truncation_length, batch_size, output_size]`. Returns: Tuple of the Tensor of output logits for the batch, with dimensions `[truncation_length, batch_size, output_size]`, and the final state of the unrolled core,.
23,676
def get_param_type_indexes(self, data, name=None, prev=None): start, end = -1, -1 stl_type = self.opt[][self.style[]][] if not prev: _, prev = self.get_param_description_indexes(data) if prev >= 0: if self.style[] in self.tagstyles + []: idx = self.get_elem_index(data[prev:]) if idx >= 0 and data[prev + idx:].startswith(stl_type): idx = prev + idx + len(stl_type) m = re.match(r, data[idx:].strip()) if m: param = m.group(1).strip() if (name and param == name) or not name: desc = m.group(2) start = data[idx:].find(desc) + idx end = self.get_elem_index(data[start:]) if end >= 0: end += start if self.style[] in [, ] and (start, end) == (-1, -1): pass return (start, end)
Get from a docstring a parameter type indexes. In javadoc style it is after @type. :param data: string to parse :param name: the name of the parameter (Default value = None) :param prev: index after the previous element (param or param's description) (Default value = None) :returns: start and end indexes of found element else (-1, -1) Note: the end index is the index after the last included character or -1 if reached the end :rtype: tuple
23,677
def lifter(cepstra, L=22): if L > 0: nframes,ncoeff = numpy.shape(cepstra) n = numpy.arange(ncoeff) lift = 1 + (L/2.)*numpy.sin(numpy.pi*n/L) return lift*cepstra else: return cepstra
Apply a cepstral lifter the the matrix of cepstra. This has the effect of increasing the magnitude of the high frequency DCT coeffs. :param cepstra: the matrix of mel-cepstra, will be numframes * numcep in size. :param L: the liftering coefficient to use. Default is 22. L <= 0 disables lifter.
23,678
def style(self): LOGGER.info() classes = generate_classified_legend( self.analysis_impacted, self.exposure, self.hazard, self.use_rounding, self.debug_mode) simple_polygon_without_brush( self.aggregation_summary, aggregation_width, aggregation_color) simple_polygon_without_brush( self.analysis_impacted, analysis_width, analysis_color) for layer in self._outputs(): layer.saveDefaultStyle()
Function to apply some styles to the layers.
23,679
def find_frametype(self, gpstime=None, frametype_match=None, host=None, port=None, return_all=False, allow_tape=True): return datafind.find_frametype( self, gpstime=gpstime, frametype_match=frametype_match, host=host, port=port, return_all=return_all, allow_tape=allow_tape)
Find the containing frametype(s) for this `Channel` Parameters ---------- gpstime : `int` a reference GPS time at which to search for frame files frametype_match : `str` a regular expression string to use to down-select from the list of all available frametypes host : `str` the name of the datafind server to use for frame file discovery port : `int` the port of the datafind server on the given host return_all: `bool`, default: `False` return all matched frame types, otherwise only the first match is returned allow_tape : `bool`, default: `True` include frame files on (slow) magnetic tape in the search Returns ------- frametype : `str`, `list` the first matching frametype containing the this channel (`return_all=False`, or a `list` of all matches
23,680
def split(self, length, vertical=True): [left, bottom, right, top] = self.bounds if vertical: box = [[left, bottom, left + length, top], [left + length, bottom, right, top]] else: box = [[left, bottom, right, bottom + length], [left, bottom + length, right, top]] return box
Returns two bounding boxes representing the current bounds split into two smaller boxes. Parameters ------------- length: float, length to split vertical: bool, if True will split box vertically Returns ------------- box: (2,4) float, two bounding boxes consisting of: [minx, miny, maxx, maxy]
23,681
def wait_for_response(client, timeout, path=, expected_status_code=None): get_time = getattr(time, , time.time) deadline = get_time() + timeout while True: try:
Try make a GET request with an HTTP client against a certain path and return once any response has been received, ignoring any errors. :param ContainerHttpClient client: The HTTP client to use to connect to the container. :param timeout: Timeout value in seconds. :param path: HTTP path to request. :param int expected_status_code: If set, wait until a response with this status code is received. If not set, the status code will not be checked. :raises TimeoutError: If a request fails to be made within the timeout period.
23,682
def call(self, task, decorators=None): if decorators is None: decorators = [] task = self.apply_task_decorators(task, decorators) data = task.get_data() name = task.get_name() result = self._inner_call(name, data) task_result = RawTaskResult(task, result) return self.apply_task_result_decorators(task_result, decorators)
Call given task on service layer. :param task: task to be called. task will be decorated with TaskDecorator's contained in 'decorators' list :type task: instance of Task class :param decorators: list of TaskDecorator's / TaskResultDecorator's inherited classes :type decorators: list :return task_result: result of task call decorated with TaskResultDecorator's contained in 'decorators' list :rtype TaskResult instance
23,683
def parse_metric_family(buf): n = 0 while n < len(buf): msg_len, new_pos = _DecodeVarint32(buf, n) n = new_pos msg_buf = buf[n : n + msg_len] n += msg_len message = metrics_pb2.MetricFamily() message.ParseFromString(msg_buf) yield message
Parse the binary buffer in input, searching for Prometheus messages of type MetricFamily [0] delimited by a varint32 [1]. [0] https://github.com/prometheus/client_model/blob/086fe7ca28bde6cec2acd5223423c1475a362858/metrics.proto#L76-%20%20L81 # noqa: E501 [1] https://developers.google.com/protocol-buffers/docs/reference/java/com/google/protobuf/AbstractMessageLite#writeDelimitedTo(java.io.OutputStream) # noqa: E501
23,684
def add_matches(self): try: VTE_REGEX_FLAGS = 0x40080400 for expr in TERMINAL_MATCH_EXPRS: tag = self.match_add_regex( Vte.Regex.new_for_match(expr, len(expr), VTE_REGEX_FLAGS), 0 ) self.match_set_cursor_type(tag, Gdk.CursorType.HAND2) for _useless, match, _otheruseless in QUICK_OPEN_MATCHERS: tag = self.match_add_regex( Vte.Regex.new_for_match(match, len(match), VTE_REGEX_FLAGS), 0 ) self.match_set_cursor_type(tag, Gdk.CursorType.HAND2) except (GLib.Error, AttributeError) as e: try: compile_flag = 0 if (Vte.MAJOR_VERSION, Vte.MINOR_VERSION) >= (0, 44): compile_flag = GLib.RegexCompileFlags.MULTILINE for expr in TERMINAL_MATCH_EXPRS: tag = self.match_add_gregex(GLib.Regex.new(expr, compile_flag, 0), 0) self.match_set_cursor_type(tag, Gdk.CursorType.HAND2) for _useless, match, _otheruseless in QUICK_OPEN_MATCHERS: tag = self.match_add_gregex(GLib.Regex.new(match, compile_flag, 0), 0) self.match_set_cursor_type(tag, Gdk.CursorType.HAND2) except GLib.Error as e: log.error( "ERROR: PCRE2 does not seems to be enabled on your system. " "Quick Edit and other Ctrl+click features are disabled. " "Please update your VTE package or contact your distribution to ask " "to enable regular expression support in VTE. Exception: ", str(e) )
Adds all regular expressions declared in guake.globals.TERMINAL_MATCH_EXPRS to the terminal to make vte highlight text that matches them.
23,685
def south_field_triple(self): return (field_class, args, kwargs)
Returns a suitable description of this field for South.
23,686
def get_thellier_gui_meas_mapping(input_df, output=2): if int(output) == 2: thellier_gui_meas3_2_meas2_map = meas_magic3_2_magic2_map.copy() if in input_df.columns: thellier_gui_meas3_2_meas2_map.update( {: }) thellier_gui_meas3_2_meas2_map.pop() return thellier_gui_meas3_2_meas2_map else: thellier_gui_meas2_2_meas3_map = meas_magic2_2_magic3_map.copy() if in input_df.columns: thellier_gui_meas2_2_meas3_map.pop() try: res = int(input_df.iloc[0][]) if res < 100: thellier_gui_meas2_2_meas3_map[] = except ValueError as ex: pass return thellier_gui_meas2_2_meas3_map
Get the appropriate mapping for translating measurements in Thellier GUI. This requires special handling for treat_step_num/measurement/measurement_number. Parameters ---------- input_df : pandas DataFrame MagIC records output : int output to this MagIC data model (2 or 3) Output -------- mapping : dict (used in convert_meas_df_thellier_gui)
23,687
def parse_import_directory(self, rva, size): import_descs = [] while True: try: data = self.get_data(rva, Structure(self.__IMAGE_IMPORT_DESCRIPTOR_format__).sizeof() ) except PEFormatError, e: self.__warnings.append( % ( rva ) ) break import_desc = self.__unpack_data__( self.__IMAGE_IMPORT_DESCRIPTOR_format__, data, file_offset = self.get_offset_from_rva(rva) ) if not import_desc or import_desc.all_zeroes(): break rva += import_desc.sizeof() try: import_data = self.parse_imports( import_desc.OriginalFirstThunk, import_desc.FirstThunk, import_desc.ForwarderChain) except PEFormatError, excp: self.__warnings.append( + % ( rva, str(excp) ) ) break if not import_data: continue dll = self.get_string_at_rva(import_desc.Name) if not is_valid_dos_filename(dll): dll = if dll: import_descs.append( ImportDescData( struct = import_desc, imports = import_data, dll = dll)) suspicious_imports = set([ , ]) suspicious_imports_count = 0 total_symbols = 0 for imp_dll in import_descs: for symbol in imp_dll.imports: for suspicious_symbol in suspicious_imports: if symbol and symbol.name and symbol.name.startswith( suspicious_symbol ): suspicious_imports_count += 1 break total_symbols += 1 if suspicious_imports_count == len(suspicious_imports) and total_symbols < 20: self.__warnings.append( ) return import_descs
Walk and parse the import directory.
23,688
def _succeed(self, request_id, reply, duration): self.listeners.publish_command_success( duration, reply, self.name, request_id, self.sock_info.address, self.op_id)
Publish a CommandSucceededEvent.
23,689
async def get_resource(self, resource_id: int) -> dict: resource = await self.request.get( join_path(self._base_path, str(resource_id)) ) self._sanitize_resource(self._get_to_actual_data(resource)) return resource
Get a single resource. :raises PvApiError when a hub connection occurs.
23,690
def _get_item(self, package, flavor): for item in package[]: if item[] == flavor: return item raise SoftLayer.SoftLayerError("Could not find valid item for: " % flavor)
Returns the item for ordering a dedicated host.
23,691
def apply_T7(word): WORD = _split_consonants_and_vowels(word) for k, v in WORD.iteritems(): if len(v) == 3 and is_vowel(v[0]): WORD[k] = v[:2] + + v[2:] word = _compile_dict_into_word(WORD) return word
If a VVV-sequence does not contain a potential /i/-final diphthong, there is a syllable boundary between the second and third vowels, e.g. [kau.an], [leu.an], [kiu.as].
23,692
def make_target(url, extra_opts=None): parts = compat.urlparse(url, allow_fragments=False) scheme = parts.scheme.lower() if scheme in ["ftp", "ftps"]: creds = parts.username, parts.password tls = scheme == "ftps" from ftpsync import ftp_target target = ftp_target.FtpTarget( parts.path, parts.hostname, parts.port, username=creds[0], password=creds[1], tls=tls, timeout=None, extra_opts=extra_opts, ) else: target = FsTarget(url, extra_opts) return target
Factory that creates `_Target` objects from URLs. FTP targets must begin with the scheme ``ftp://`` or ``ftps://`` for TLS. Note: TLS is only supported on Python 2.7/3.2+. Args: url (str): extra_opts (dict, optional): Passed to Target constructor. Default: None. Returns: :class:`_Target`
23,693
def delete(self, save=True): for thumb in self.field.thumbs: thumb_name, thumb_options = thumb thumb_filename = self._calc_thumb_filename(thumb_name) self.storage.delete(thumb_filename) super(ImageWithThumbsFieldFile, self).delete(save)
Deletes the original, plus any thumbnails. Fails silently if there are errors deleting the thumbnails.
23,694
def create(self): doc = dict(self) if doc.get() is not None: doc.__delitem__() headers = {: } resp = self.r_session.post( self._database.database_url, headers=headers, data=json.dumps(doc, cls=self.encoder) ) resp.raise_for_status() data = response_to_json_dict(resp) super(Document, self).__setitem__(, data[]) super(Document, self).__setitem__(, data[])
Creates the current document in the remote database and if successful, updates the locally cached Document object with the ``_id`` and ``_rev`` returned as part of the successful response.
23,695
def _get_register_specs(bit_labels): it = itertools.groupby(bit_labels, operator.itemgetter(0)) for register_name, sub_it in it: yield register_name, max(ind[1] for ind in sub_it) + 1
Get the number and size of unique registers from bit_labels list. Args: bit_labels (list): this list is of the form:: [['reg1', 0], ['reg1', 1], ['reg2', 0]] which indicates a register named "reg1" of size 2 and a register named "reg2" of size 1. This is the format of classic and quantum bit labels in qobj header. Yields: tuple: iterator of register_name:size pairs.
23,696
def _render_log(): config = load_config(ROOT) definitions = config[] fragments, fragment_filenames = find_fragments( pathlib.Path(config[]).absolute(), config[], None, definitions, ) rendered = render_fragments( pathlib.Path(config[]).read_text(encoding=), config[], split_fragments(fragments, definitions), definitions, config[][1:], ) return rendered
Totally tap into Towncrier internals to get an in-memory result.
23,697
def sort_against(list1, list2, reverse=False): try: return [item for _, item in sorted(zip(list2, list1), key=lambda x: x[0], reverse=reverse)] except: return list1
Arrange items of list1 in the same order as sorted(list2). In other words, apply to list1 the permutation which takes list2 to sorted(list2, reverse).
23,698
def copy(self, memo=None, which=None): if memo is None: memo = {} import copy parents = [] if which is None: which = self which.traverse_parents(parents.append) for p in parents: if not id(p) in memo :memo[id(p)] = None if not id(self.gradient) in memo:memo[id(self.gradient)] = None if not id(self._fixes_) in memo :memo[id(self._fixes_)] = None copy = copy.deepcopy(self, memo) copy._parent_index_ = None copy._trigger_params_changed() return copy
Returns a (deep) copy of the current parameter handle. All connections to parents of the copy will be cut. :param dict memo: memo for deepcopy :param Parameterized which: parameterized object which started the copy process [default: self]
23,699
def _balance_braces(tokens, filename=None): depth = 0 for token, line, quoted in tokens: if token == and not quoted: depth -= 1 elif token == and not quoted: depth += 1 if depth < 0: reason = raise NgxParserSyntaxError(reason, filename, line) else: yield (token, line, quoted) if depth > 0: reason = raise NgxParserSyntaxError(reason, filename, line)
Raises syntax errors if braces aren't balanced