Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
7,600
def _expand_scheduledict(scheduledict): result = [] def f(d): nonlocal result d2 = {} for k,v in d.items(): if isinstance(v, str) and _cronslash(v, k) is not None: d[k] = _cronslash(v, k) for k,v in d.items(): if isinstance(v, Iterable): continue else: d2[k] = v if len(d2.keys()) == len(d.keys()): result.append(d2) return for k,v in d.items(): if isinstance(v, Iterable): for i in v: dprime = dict(**d) dprime[k] = i f(dprime) break f(scheduledict) return result
Converts a dict of items, some of which are scalar and some of which are lists, to a list of dicts with scalar items.
7,601
def get_ecf_props(ep_id, ep_id_ns, rsvc_id=None, ep_ts=None): results = {} if not ep_id: raise ArgumentError("ep_id", "ep_id must be a valid endpoint id") results[ECF_ENDPOINT_ID] = ep_id if not ep_id_ns: raise ArgumentError("ep_id_ns", "ep_id_ns must be a valid namespace") results[ECF_ENDPOINT_CONTAINERID_NAMESPACE] = ep_id_ns if not rsvc_id: rsvc_id = get_next_rsid() results[ECF_RSVC_ID] = rsvc_id if not ep_ts: ep_ts = time_since_epoch() results[ECF_ENDPOINT_TIMESTAMP] = ep_ts return results
Prepares the ECF properties :param ep_id: Endpoint ID :param ep_id_ns: Namespace of the Endpoint ID :param rsvc_id: Remote service ID :param ep_ts: Timestamp of the endpoint :return: A dictionary of ECF properties
7,602
def inspect(self, tab_width=2, ident_char=): startpath = self.path output = [] for (root, dirs, files) in os.walk(startpath): level = root.replace(startpath, ).count(os.sep) indent = ident_char * tab_width * (level) if level == 0: output.append(.format(indent, os.path.basename(root))) else: output.append(.format(indent, os.path.basename(root))) subindent = ident_char * tab_width * (level + 1) [output.append(.format(subindent, f)) for f in files] return .join(output)
Inspects a project file structure based based on the instance folder property. :param tab_width: width size for subfolders and files. :param ident_char: char to be used to show identation level Returns A string containing the project structure.
7,603
def hclust_linearize(U): from scipy.cluster import hierarchy Z = hierarchy.ward(U) return hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, U))
Sorts the rows of a matrix by hierarchical clustering. Parameters: U (ndarray) : matrix of data Returns: prm (ndarray) : permutation of the rows
7,604
def calc_surfdist(surface, labels, annot, reg, origin, target): import nibabel as nib import numpy as np import os from surfdist import load, utils, surfdist import csv surf = nib.freesurfer.read_geometry(surface) cort = np.sort(nib.freesurfer.read_label(labels)) src = load.load_freesurfer_label(annot, origin, cort) dist = surfdist.dist_calc(surf, cort, src) trg = nib.freesurfer.read_geometry(target)[0] native = nib.freesurfer.read_geometry(reg)[0] idx_trg_to_native = utils.find_node_match(trg, native)[0] distt = dist[idx_trg_to_native] filename = os.path.join(os.getcwd(),) distt.tofile(filename,sep=",") return filename
inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4)
7,605
def signOp(self, op: Dict, identifier: Identifier=None) -> Request: request = Request(operation=op, protocolVersion=CURRENT_PROTOCOL_VERSION) return self.signRequest(request, identifier)
Signs the message if a signer is configured :param identifier: signing identifier; if not supplied the default for the wallet is used. :param op: Operation to be signed :return: a signed Request object
7,606
def add_wic(self, old_wic, wic): new_wic = + old_wic[-1] self.node[][new_wic] = wic
Convert the old style WIC slot to a new style WIC slot and add the WIC to the node properties :param str old_wic: Old WIC slot :param str wic: WIC name
7,607
def ckchol(M): from numpy import linalg, matrix, eye, size try: output=linalg.cholesky(M) except: print() output=matrix(eye(size(M,0))) return output
CKCHOL This function computes the Cholesky decomposition of the matrix if it's positive-definite; else it returns the identity matrix. It was written to handle the "matrix must be positive definite" error in linalg.cholesky. Version: 2011may03
7,608
def set_monitoring_transaction_name(name, group=None, priority=None): if not newrelic: return newrelic.agent.set_transaction_name(name, group, priority)
Sets the transaction name for monitoring. This is not cached, and only support reporting to New Relic.
7,609
def spoolable(*, pre_condition=True, body_params=()): def decorator(func): context_name = None keyword_kinds = {inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY} invalid_body_params = set(body_params) for name, parameter in inspect.signature(func).parameters.items(): if parameter.kind not in keyword_kinds: continue if not context_name and parameter.annotation is Context: context_name = name elif name in invalid_body_params: invalid_body_params.remove(name) if invalid_body_params: raise TypeError() task = Task(func, context_name=context_name, pre_condition=pre_condition, body_params=body_params) spooler.register(task) return task return decorator
Decorates a function to make it spoolable using uWSGI, but if no spooling mechanism is available, the function is called synchronously. All decorated function arguments must be picklable and the first annotated with `Context` will receive an object that defines the current execution state. Return values are always ignored and all exceptions are caught in spooled mode. :param pre_condition: additional condition needed to use spooler :param body_params: parameter names that can have large values and should use spooler body
7,610
def _advance_window(self): x_to_remove, y_to_remove = self._x_in_window[0], self._y_in_window[0] self._window_bound_lower += 1 self._update_values_in_window() x_to_add, y_to_add = self._x_in_window[-1], self._y_in_window[-1] self._remove_observation(x_to_remove, y_to_remove) self._add_observation(x_to_add, y_to_add)
Update values in current window and the current window means and variances.
7,611
def add_stack_frame(self, stack_frame): if len(self.stack_frames) >= MAX_FRAMES: self.dropped_frames_count += 1 else: self.stack_frames.append(stack_frame.format_stack_frame_json())
Add StackFrame to frames list.
7,612
def get_insertions(aln_df): insertion_df = aln_df[aln_df[] == ] to_append = (insertion_region, insertion_length) insertions.append(to_append) return insertions
Get a list of tuples indicating the first and last residues of a insertion region, as well as the length of the insertion. If the first tuple is: (-1, 1) that means the insertion is at the beginning of the original protein (X, Inf) where X is the length of the original protein, that means the insertion is at the end of the protein Examples: # Insertion at beginning, length 3 >>> test = {'id_a': {0: 'a', 1: 'a', 2: 'a', 3: 'a'}, 'id_a_aa': {0: np.nan, 1: np.nan, 2: np.nan, 3: 'M'}, 'id_a_pos': {0: np.nan, 1: np.nan, 2: np.nan, 3: 1.0}, 'id_b': {0: 'b', 1: 'b', 2: 'b', 3: 'b'}, 'id_b_aa': {0: 'M', 1: 'M', 2: 'L', 3: 'M'}, 'id_b_pos': {0: 1, 1: 2, 2: 3, 3: 4}, 'type': {0: 'insertion', 1: 'insertion', 2: 'insertion', 3: 'match'}} >>> my_alignment = pd.DataFrame.from_dict(test) >>> get_insertions(my_alignment) [((-1, 1.0), 3)] Args: aln_df (DataFrame): Alignment DataFrame Returns: list: A list of tuples with the format ((insertion_start_resnum, insertion_end_resnum), insertion_length)
7,613
def on_update(self, value, *args, **kwargs): parent_value = self._parent_min if self._max != self._min: sub_progress = (value - self._min) / (self._max - self._min) parent_value = self._parent_min + sub_progress * (self._parent_max - self._parent_min) self._parent.update(parent_value, *args, **kwargs)
Inform the parent of progress. :param value: The value of this subprogresscallback :param args: Extra positional arguments :param kwargs: Extra keyword arguments
7,614
def getExperimentDescriptionInterfaceFromModule(module): result = module.descriptionInterface assert isinstance(result, exp_description_api.DescriptionIface), \ "expected DescriptionIface-based instance, but got %s" % type(result) return result
:param module: imported description.py module :returns: (:class:`nupic.frameworks.opf.exp_description_api.DescriptionIface`) represents the experiment description
7,615
def colorize(text=, opts=(), **kwargs): code_list = [] if text == and len(opts) == 1 and opts[0] == : return % RESET for k, v in kwargs.iteritems(): if k == : code_list.append(foreground[v]) elif k == : code_list.append(background[v]) for o in opts: if o in opt_dict: code_list.append(opt_dict[o]) if not in opts: text = text + % RESET return ( % .join(code_list)) + text
Returns your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Returns the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print colorize('first line', fg='red', opts=('noreset',)) print 'this should be red too' print colorize('and so should this') print 'this should not be red'
7,616
def import_patch(self, patch_name, new_name=None): if new_name: dir_name = os.path.dirname(new_name) name = os.path.basename(new_name) dest_dir = self.quilt_patches + Directory(dir_name) dest_dir.create() else: name = os.path.basename(patch_name) dest_dir = self.quilt_patches patch_file = File(patch_name) dest_file = dest_dir + File(name) patch_file.copy(dest_file) self._import_patches([name])
Import patch into the patch queue The patch is inserted as the next unapplied patch.
7,617
def print_hex(self, value, justify_right=True): if value < 0 or value > 0xFFFF: return self.print_str(.format(value), justify_right)
Print a numeric value in hexadecimal. Value should be from 0 to FFFF.
7,618
def interp(mapping, x): mapping = sorted(mapping) if len(mapping) == 1: xa, ya = mapping[0] if xa == x: return ya return x for (xa, ya), (xb, yb) in zip(mapping[:-1], mapping[1:]): if xa <= x <= xb: return ya + float(x - xa) / (xb - xa) * (yb - ya) return x
Compute the piecewise linear interpolation given by mapping for input x. >>> interp(((1, 1), (2, 4)), 1.5) 2.5
7,619
def append_sibling_field(self, linenum, indent, field_name, field_value): frame = self.current_frame() assert frame.indent is not None and frame.indent == indent self.pop_frame() self.append_child_field(linenum, indent, field_name, field_value)
:param linenum: The line number of the frame. :type linenum: int :param indent: The indentation level of the frame. :type indent: int :param path: :type path: Path :param field_name: :type field_name: str :param field_value: :type field_value: str
7,620
def create_external_table(self, external_project_dataset_table, schema_fields, source_uris, source_format=, autodetect=False, compression=, ignore_unknown_values=False, max_bad_records=0, skip_leading_rows=0, field_delimiter=, quote_character=None, allow_quoted_newlines=False, allow_jagged_rows=False, src_fmt_configs=None, labels=None ): if src_fmt_configs is None: src_fmt_configs = {} project_id, dataset_id, external_table_id = \ _split_tablename(table_input=external_project_dataset_table, default_project_id=self.project_id, var_name=) )
Creates a new external table in the dataset with the data in Google Cloud Storage. See here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource for more details about these parameters. :param external_project_dataset_table: The dotted ``(<project>.|<project>:)<dataset>.<table>($<partition>)`` BigQuery table name to create external table. If ``<project>`` is not included, project will be the project defined in the connection json. :type external_project_dataset_table: str :param schema_fields: The schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource :type schema_fields: list :param source_uris: The source Google Cloud Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild per-object name can be used. :type source_uris: list :param source_format: File format to export. :type source_format: str :param autodetect: Try to detect schema and format options automatically. Any option specified explicitly will be honored. :type autodetect: bool :param compression: [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. :type compression: str :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. :type ignore_unknown_values: bool :param max_bad_records: The maximum number of bad records that BigQuery can ignore when running the job. :type max_bad_records: int :param skip_leading_rows: Number of rows to skip when loading from a CSV. :type skip_leading_rows: int :param field_delimiter: The delimiter to use when loading from a CSV. :type field_delimiter: str :param quote_character: The value that is used to quote data sections in a CSV file. :type quote_character: str :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false). :type allow_quoted_newlines: bool :param allow_jagged_rows: Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. Only applicable when soure_format is CSV. :type allow_jagged_rows: bool :param src_fmt_configs: configure optional fields specific to the source format :type src_fmt_configs: dict :param labels: a dictionary containing labels for the table, passed to BigQuery :type labels: dict
7,621
def get_notebook_status(self, name): context = comm.get_context(self.get_pid(name)) if not context: return None return context
Get the running named Notebook status. :return: None if no notebook is running, otherwise context dictionary
7,622
def fit(self, X, y=None, **fit_params): self.opt_ = None self.cputime_ = None self.iters_ = None self.duality_gap_ = None self.path_ = None self.sample_covariance_ = None self.lam_scale_ = None self.lam_ = None self.is_fitted_ = False X = check_array(X, ensure_min_features=2, estimator=self) X = as_float_array(X, copy=False, force_all_finite=False) self.init_coefs(X) lam_1 = self.lam_scale_ lam_0 = 1e-2 * lam_1 if self.path is None: self.path_ = np.logspace(np.log10(lam_0), np.log10(lam_1), 100)[::-1] elif isinstance(self.path, int): self.path_ = np.logspace(np.log10(lam_0), np.log10(lam_1), self.path)[::-1] else: self.path_ = self.path self.path_ = _validate_path(self.path_) if self.method == "quic": (self.precision_, self.covariance_, _, _, _, _) = quic( self.sample_covariance_, self.lam * self.lam_scale_, mode="path", tol=self.tol, max_iter=self.max_iter, Theta0=self.Theta0, Sigma0=self.Sigma0, path=self.path_, msg=self.verbose, ) self.is_fitted_ = True else: raise NotImplementedError("Only method= has been implemented.") best_lam_idx = self.ebic_select(gamma=self.gamma) self.lam_ = self.lam * self.lam_scale_ * self.path_[best_lam_idx] self.precision_ = self.precision_[best_lam_idx] self.covariance_ = self.covariance_[best_lam_idx] self.is_fitted_ = True return self
Fits the inverse covariance model according to the given training data and parameters. Parameters ----------- X : 2D ndarray, shape (n_features, n_features) Input data. Returns ------- self
7,623
def save_config(self): self.set_option(, self.recent_projects) self.set_option(, self.explorer.treewidget.get_expanded_state()) self.set_option(, self.explorer.treewidget.get_scrollbar_position()) if self.current_active_project and self.dockwidget: self.set_option(, self.dockwidget.isVisible())
Save configuration: opened projects & tree widget state. Also save whether dock widget is visible if a project is open.
7,624
def task(ft): ft.pack(expand = True, fill = BOTH, side = TOP) pb_hD = ttk.Progressbar(ft, orient = , mode = ) pb_hD.pack(expand = True, fill = BOTH, side = TOP) pb_hD.start(50) ft.mainloop()
to create loading progress bar
7,625
def saml_provider_absent(name, region=None, key=None, keyid=None, profile=None): ret = {: name, : True, : , : {}} provider = __salt__[](region=region, key=key, keyid=keyid, profile=profile) if not provider: ret[] = .format(name) return ret if __opts__[]: ret[] = .format(name) ret[] = None return ret deleted = __salt__[](name, region=region, key=key, keyid=keyid, profile=profile) if deleted is not False: ret[] = .format(name) ret[][] = name return ret ret[] = False ret[] = .format(name) return ret
.. versionadded:: 2016.11.0 Ensure the SAML provider with the specified name is absent. name (string) The name of the SAML provider. saml_metadata_document (string) The xml document of the SAML provider. region (string) Region to connect to. key (string) Secret key to be used. keyid (string) Access key to be used. profile (dict) A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid.
7,626
def start(self): if self._status == TransferState.PREPARING: super(Upload, self).start() else: raise SbgError( )
Starts the upload. :raises SbgError: If upload is not in PREPARING state.
7,627
async def create(self, query, *, dc=None): if "Token" in query: query["Token"] = extract_attr(query["Token"], keys=["ID"]) response = await self._api.post("/v1/query", params={"dc": dc}, data=query) return response.body
Creates a new prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: New query ID The create operation expects a body that defines the prepared query, like this example:: { "Name": "my-query", "Session": "adf4238a-882b-9ddc-4a9d-5b6758e4159e", "Token": "", "Near": "node1", "Service": { "Service": "redis", "Failover": { "NearestN": 3, "Datacenters": ["dc1", "dc2"] }, "OnlyPassing": False, "Tags": ["master", "!experimental"] }, "DNS": { "TTL": timedelta(seconds=10) } } Only the **Service** field inside the **Service** structure is mandatory, all other fields will take their default values if they are not included. **Name** is an optional friendly name that can be used to execute a query instead of using its ID. **Session** provides a way to automatically remove a prepared query when the given session is invalidated. This is optional, and if not given the prepared query must be manually removed when no longer needed. **Token**, if specified, is a captured ACL Token that is reused as the ACL Token every time the query is executed. This allows queries to be executed by clients with lesser or even no ACL Token, so this should be used with care. The token itself can only be seen by clients with a management token. If the **Token** field is left blank or omitted, the client's ACL Token will be used to determine if they have access to the service being queried. If the client does not supply an ACL Token, the anonymous token will be used. **Near** allows specifying a particular node to sort near based on distance sorting using Network Coordinates. The nearest instance to the specified node will be returned first, and subsequent nodes in the response will be sorted in ascending order of estimated round-trip times. If the node given does not exist, the nodes in the response will be shuffled. Using the magic **_agent** value is supported, and will automatically return results nearest the agent servicing the request. If unspecified, the response will be shuffled by default. The set of fields inside the **Service** structure define the query's behavior. **Service** is the name of the service to query. This is required. **Failover** contains two fields, both of which are optional, and determine what happens if no healthy nodes are available in the local datacenter when the query is executed. It allows the use of nodes in other datacenters with very little configuration. If **NearestN** is set to a value greater than zero, then the query will be forwarded to up to **NearestN** other datacenters based on their estimated network round trip time using Network Coordinates from the WAN gossip pool. The median round trip time from the server handling the query to the servers in the remote datacenter is used to determine the priority. The default value is zero. All Consul servers must be running version 0.6.0 or above in order for this feature to work correctly. If any servers are not running the required version of Consul they will be considered last since they won't have any available network coordinate information. **Datacenters** contains a fixed list of remote datacenters to forward the query to if there are no healthy nodes in the local datacenter. Datacenters are queried in the order given in the list. If this option is combined with **NearestN**, then the **NearestN** queries will be performed first, followed by the list given by **Datacenters**. A given datacenter will only be queried one time during a failover, even if it is selected by both **NearestN** and is listed in **Datacenters**. The default value is an empty list. **OnlyPassing** controls the behavior of the query's health check filtering. If this is set to false, the results will include nodes with checks in the passing as well as the warning states. If this is set to true, only nodes with checks in the passing state will be returned. The default value is False. **Tags** provides a list of service tags to filter the query results. For a service to pass the tag filter it must have all of the required tags, and none of the excluded tags (prefixed with ``!``). The default value is an empty list, which does no tag filtering. **TTL** in the **DNS** structure is a duration string that can use "s" as a suffix for seconds. It controls how the TTL is set when query results are served over DNS. If this isn't specified, then the Consul agent configuration for the given service will be used (see DNS Caching). If this is specified, it will take precedence over any Consul agent-specific configuration. If no TTL is specified here or at the Consul agent level, then the TTL will default to 0. It returns the ID of the created query:: { "ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05" }
7,628
def Set(self, interface_name, property_name, value, *args, **kwargs): self.log( % (interface_name, property_name, self.format_args((value,)))) try: iface_props = self.props[interface_name] except KeyError: raise dbus.exceptions.DBusException( + interface_name, name=self.interface + ) if property_name not in iface_props: raise dbus.exceptions.DBusException( + property_name, name=self.interface + ) iface_props[property_name] = value self.EmitSignal(, , , [interface_name, dbus.Dictionary({property_name: value}, signature=), dbus.Array([], signature=) ])
Standard D-Bus API for setting a property value
7,629
def poke(self, context): self.log.info(, self.channels) message = self.pubsub.get_message() self.log.info(, message, self.channels) if message and message[] == : context[].xcom_push(key=, value=message) self.pubsub.unsubscribe(self.channels) return True return False
Check for message on subscribed channels and write to xcom the message with key ``message`` An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}`` :param context: the context object :type context: dict :return: ``True`` if message (with type 'message') is available or ``False`` if not
7,630
def parse_plotProfile(self): self.deeptools_plotProfile = dict() for f in self.find_log_files(, filehandles=False): parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f) for k, v in parsed_data.items(): if k in self.deeptools_plotProfile: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_plotProfile[k] = v if len(parsed_data) > 0: self.add_data_source(f, section=) if len(self.deeptools_plotProfile) > 0: config = { : , : , : , : None, : 100, : [ {: converted_bin_labels[bin_labels.index()], : converted_bin_labels[-1], : }, {: converted_bin_labels[bin_labels.index()], : converted_bin_labels[bin_labels.index()], : }, {: converted_bin_labels[0], : converted_bin_labels[bin_labels.index()], : }, ], : [ {: 1, : converted_bin_labels[bin_labels.index()], : , : }, {: 1, : converted_bin_labels[bin_labels.index()], : , : }, ], } self.add_section ( name = , anchor = , description="Accumulated view of the distribution of sequence reads related to the closest annotated gene. All annotated genes have been normalized to the same size. Green: {} upstream of gene to {}; Yellow: {} to {}; Pink: {} to {} downstream of gene".format(list(filter(None,bin_labels))[0], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[3]), plot=linegraph.plot(self.deeptools_plotProfile, config) ) return len(self.deeptools_bamPEFragmentSizeDistribution)
Find plotProfile output
7,631
def Policy(self, data=None, subset=None): return self.factory.get_object(jssobjects.Policy, data, subset)
{dynamic_docstring}
7,632
def make_sentences(self, stream_item): self.make_label_index(stream_item) sentences = [] token_num = 0 new_mention_id = 0 for sent_start, sent_end, sent_str in self._sentences( stream_item.body.clean_visible): assert isinstance(sent_str, unicode) sent = Sentence() sentence_pos = 0 for start, end in self.word_tokenizer.span_tokenize(sent_str): token_str = sent_str[start:end].encode() tok = Token( token_num=token_num, token=token_str, sentence_pos=sentence_pos, ) tok.offsets[OffsetType.CHARS] = Offset( type=OffsetType.CHARS, first=sent_start + start, length=end - start, ) try: label = self.label_index.find_le(sent_start + start) except ValueError: label = None if label: off = label.offsets[OffsetType.CHARS] if off.first + off.length > sent_start + start: streamcorpus.add_annotation(tok, label) logger.debug(, tok.token, label.target.target_id) if label in self.label_to_mention_id: mention_id = self.label_to_mention_id[label] else: mention_id = new_mention_id new_mention_id += 1 self.label_to_mention_id[label] = mention_id tok.mention_id = mention_id token_num += 1 sentence_pos += 1 sent.tokens.append(tok) sentences.append(sent) return sentences
assemble Sentence and Token objects
7,633
def uppercase(self, value): if not isinstance(value, bool): raise TypeError() self._uppercase = value
Validate and set the uppercase flag.
7,634
def _parse_current_network_settings(): opts = salt.utils.odict.OrderedDict() opts[] = if os.path.isfile(_DEB_NETWORKING_FILE): with salt.utils.files.fopen(_DEB_NETWORKING_FILE) as contents: for line in contents: salt.utils.stringutils.to_unicode(line) if line.startswith(): continue elif line.startswith(): opts[] = line.split(, 1)[1].strip() hostname = _parse_hostname() domainname = _parse_domainname() searchdomain = _parse_searchdomain() opts[] = hostname opts[] = domainname opts[] = searchdomain return opts
Parse /etc/default/networking and return current configuration
7,635
def get_semester_title(self, node: BaseNode): log.debug("Getting Semester Title for %s" % node.course.id) return self._get_semester_from_id(node.course.semester)
get the semester of a node
7,636
def read(self): "Read and interpret data from the daemon." status = gpscommon.read(self) if status <= 0: return status if self.response.startswith("{") and self.response.endswith("}\r\n"): self.unpack(self.response) self.__oldstyle_shim() self.newstyle = True self.valid |= PACKET_SET elif self.response.startswith("GPSD"): self.__oldstyle_unpack(self.response) self.valid |= PACKET_SET return 0
Read and interpret data from the daemon.
7,637
def get_concept(self, conceptId, lang=): url = urljoin(self.concept_service + , conceptId) res, status_code = self.get(url, params={: lang}) if status_code != 200: logger.debug() return self.decode(res), status_code
Fetch the concept from the Knowledge base Args: id (str): The concept id to be fetched, it can be Wikipedia page id or Wikiedata id. Returns: dict, int: A dict containing the concept information; an integer representing the response code.
7,638
def _double_centered_imp(a, out=None): out = _float_copy_to_out(out, a) dim = np.size(a, 0) mu = np.sum(a) / (dim * dim) sum_cols = np.sum(a, 0, keepdims=True) sum_rows = np.sum(a, 1, keepdims=True) mu_cols = sum_cols / dim mu_rows = sum_rows / dim out -= mu_rows out -= mu_cols out += mu return out
Real implementation of :func:`double_centered`. This function is used to make parameter ``out`` keyword-only in Python 2.
7,639
def ReadAllFlowObjects( self, client_id = None, min_create_time = None, max_create_time = None, include_child_flows = True, ): res = [] for flow in itervalues(self.flows): if ((client_id is None or flow.client_id == client_id) and (min_create_time is None or flow.create_time >= min_create_time) and (max_create_time is None or flow.create_time <= max_create_time) and (include_child_flows or not flow.parent_flow_id)): res.append(flow.Copy()) return res
Returns all flow objects.
7,640
def languages(self, key, value): languages = self.get(, []) values = force_list(value.get()) for value in values: for language in RE_LANGUAGE.split(value): try: name = language.strip().capitalize() languages.append(pycountry.languages.get(name=name).alpha_2) except KeyError: pass return languages
Populate the ``languages`` key.
7,641
def res_from_en(pst,enfile): converters = {"name": str_con, "group": str_con} try: obs=pst.observation_data if isinstance(enfile,str): df=pd.read_csv(enfile,converters=converters) df.columns=df.columns.str.lower() df = df.set_index().T.rename_axis().rename_axis(None, 1) else: df = enfile.T if in df.columns: df[]=df[] df[]=df.std(axis=1) else: df[]=df.mean(axis=1) df[]=df.std(axis=1) res_df=df[[,]].copy() res_df[]=obs.loc[:,].copy() res_df[]=obs[].copy() res_df[]=obs[].copy() res_df[]=res_df[]-res_df[] except Exception as e: raise Exception("Pst.res_from_en:{0}".format(str(e))) return res_df
load ensemble file for residual into a pandas.DataFrame Parameters ---------- enfile : str ensemble file name Returns ------- pandas.DataFrame : pandas.DataFrame
7,642
def change_execution_time(self, job, date_time): with self.connection.pipeline() as pipe: while 1: try: pipe.watch(self.scheduled_jobs_key) if pipe.zscore(self.scheduled_jobs_key, job.id) is None: raise ValueError() pipe.zadd(self.scheduled_jobs_key, {job.id: to_unix(date_time)}) break except WatchError: if pipe.zscore(self.scheduled_jobs_key, job.id) is None: raise ValueError() continue
Change a job's execution time.
7,643
def media_new(self, mrl, *options): if in mrl and mrl.index() > 1: m = libvlc_media_new_location(self, str_to_bytes(mrl)) else: m = libvlc_media_new_path(self, str_to_bytes(os.path.normpath(mrl))) for o in options: libvlc_media_add_option(m, str_to_bytes(o)) m._instance = self return m
Create a new Media instance. If mrl contains a colon (:) preceded by more than 1 letter, it will be treated as a URL. Else, it will be considered as a local path. If you need more control, directly use media_new_location/media_new_path methods. Options can be specified as supplementary string parameters, but note that many options cannot be set at the media level, and rather at the Instance level. For instance, the marquee filter must be specified when creating the vlc.Instance or vlc.MediaPlayer. Alternatively, options can be added to the media using the Media.add_options method (with the same limitation). @param options: optional media option=value strings
7,644
def url(self): url = self.xml.find() if url is not None: url = url.get() return url
URL to the affiliation's profile page.
7,645
def add_jira_status(test_key, test_status, test_comment): global attachments if test_key and enabled: if test_key in jira_tests_status: previous_status = jira_tests_status[test_key] test_status = if previous_status[1] == and test_status == else if previous_status[2] and test_comment: test_comment = .format(previous_status[2], test_comment) elif previous_status[2] and not test_comment: test_comment = previous_status[2] attachments += previous_status[3] jira_tests_status[test_key] = (test_key, test_status, test_comment, attachments)
Save test status and comments to update Jira later :param test_key: test case key in Jira :param test_status: test case status :param test_comment: test case comments
7,646
def get_inverses(self, keys): return tuple([as_index(k, axis=0).inverse for k in keys])
Returns ------- Tuple of inverse indices
7,647
def get_jids(): ret = {} for returner_ in __opts__[CONFIG_KEY]: ret.update(_mminion().returners[.format(returner_)]()) return ret
Return all job data from all returners
7,648
def delete_sp_template_for_vlan(self, vlan_id): with self.session.begin(subtransactions=True): try: self.session.query( ucsm_model.ServiceProfileTemplate).filter_by( vlan_id=vlan_id).delete() except orm.exc.NoResultFound: return
Deletes SP Template for a vlan_id if it exists.
7,649
def visit_raise(self, node): if node.exc is None: return expr = node.exc if self._check_raise_value(node, expr): return try: value = next(astroid.unpack_infer(expr)) except astroid.InferenceError: return self._check_raise_value(node, value)
Visit a raise statement and check for raising strings or old-raise-syntax.
7,650
def get_details(self): data = [] if self.deployment == : data.append(VmDetailsProperty(key=,value= self.dep_attributes.get(,))) if self.deployment == : template = self.dep_attributes.get(,) snapshot = self.dep_attributes.get(,) data.append(VmDetailsProperty(key=,value= .format(template, snapshot))) if self.deployment == : data.append(VmDetailsProperty(key=,value= self.dep_attributes.get(,).split()[-1])) if self.deployment == : data.append(VmDetailsProperty(key=,value= self.dep_attributes.get(,))) return data
:rtype list[VmDataField]
7,651
def save_model(self, request, obj, form, change): if change and obj._old_slug != obj.slug: new_slug = obj.slug or obj.generate_unique_slug() obj.slug = obj._old_slug obj.set_slug(new_slug) parent = request.GET.get("parent") if parent is not None and not change: obj.parent_id = parent obj.save() super(PageAdmin, self).save_model(request, obj, form, change)
Set the ID of the parent page if passed in via querystring, and make sure the new slug propagates to all descendant pages.
7,652
def rsdl(self): return np.linalg.norm((self.X - self.Yprv).ravel())
Compute fixed point residual.
7,653
def to_bqstorage(self): if bigquery_storage_v1beta1 is None: raise ValueError(_NO_BQSTORAGE_ERROR) table_ref = bigquery_storage_v1beta1.types.TableReference() table_ref.project_id = self._project table_ref.dataset_id = self._dataset_id table_id = self._table_id if "@" in table_id: table_id = table_id.split("@")[0] if "$" in table_id: table_id = table_id.split("$")[0] table_ref.table_id = table_id return table_ref
Construct a BigQuery Storage API representation of this table. Install the ``google-cloud-bigquery-storage`` package to use this feature. If the ``table_id`` contains a partition identifier (e.g. ``my_table$201812``) or a snapshot identifier (e.g. ``mytable@1234567890``), it is ignored. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableReadOptions` to filter rows by partition. Use :class:`google.cloud.bigquery_storage_v1beta1.types.TableModifiers` to select a specific snapshot to read from. Returns: google.cloud.bigquery_storage_v1beta1.types.TableReference: A reference to this table in the BigQuery Storage API. Raises: ValueError: If the :mod:`google.cloud.bigquery_storage_v1beta1` module cannot be imported.
7,654
def _make_single_subvolume(self, only_one=True, **args): if only_one and self.volumes: return self.volumes[0] if self.parent.index is None: index = else: index = .format(self.parent.index) volume = self._make_subvolume(index=index, **args) return volume
Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it. :param bool only_one: if this volume system already has at least one volume, it is returned instead.
7,655
def get_message(self, timeout=0.5): try: return self.buffer.get(block=not self.is_stopped, timeout=timeout) except Empty: return None
Attempts to retrieve the latest message received by the instance. If no message is available it blocks for given timeout or until a message is received, or else returns None (whichever is shorter). This method does not block after :meth:`can.BufferedReader.stop` has been called. :param float timeout: The number of seconds to wait for a new message. :rytpe: can.Message or None :return: the message if there is one, or None if there is not.
7,656
def dice(edge=15, fn=32): edge = float(edge) c = ops.Cube(edge, center=True) s = ops.Sphere(edge * 3 / 4, center=True) dice = c & s c = ops.Circle(edge / 12, _fn=fn) h = 0.7 point = c.linear_extrude(height=h) point1 = point.translate([0, 0, edge / 2 - h / 2]) point2_1 = point1.rotate(a=90, v=[1, 0, 0]).translate([edge / 6, 0, edge / 6]) point2_2 = point2_1.mirror([-edge / 6, 0, -edge / 6]) point2 = point2_1 + point2_2 point3 = point2.rotate(a=90, v=[0, 0, 1]) + point1.rotate(a=90, v=[0, 1, 0]) point4_12 = point2.rotate(a=-90, v=[0, 0, 1]) point4 = point4_12 + point4_12.mirror([0, 1, 0]) point5_123 = point3.rotate(a=90, v=[0, 0, 1]) point5 = point5_123 + point5_123.mirror([1, 0, 0]) point6_1 = point.translate([0, 0, -(edge / 2 + h / 2)]).translate([0, edge / 6, 0]) point6_2 = point6_1.translate([edge / 4, 0, 0]) point6_3 = point6_1.translate([-edge / 4, 0, 0]) point6_123 = point6_1 + point6_2 + point6_3 point6_456 = point6_123.mirror([0, 1, 0]) point6 = point6_123 + point6_456 dice_with_holes = dice - point1 - point2 - point3 - point4 - point5 - point6 dice_with_holes = dice_with_holes.mirror([0, 0, 1]) return(dice_with_holes)
dice
7,657
def gather(obj): if hasattr(obj, ): return obj.__distob_gather__() elif (isinstance(obj, collections.Sequence) and not isinstance(obj, string_types)): return [gather(subobj) for subobj in obj] else: return obj
Retrieve objects that have been distributed, making them local again
7,658
def to_xml(self): element = etree.Element(self._tag_name) struct_to_xml(element, [ {"author": self.handle}, {"target_guid": self.target_guid}, {"target_type": DiasporaRetraction.entity_type_to_remote(self.entity_type)}, ]) return element
Convert to XML message.
7,659
async def play_url(self, url, position=0): headers = {: , : } body = {: url, : position} address = self._url(self.port, ) _LOGGER.debug(, url, address) resp = None try: resp = await self.session.post( address, headers=headers, data=plistlib.dumps(body, fmt=plistlib.FMT_BINARY), timeout=TIMEOUT) await self._wait_for_media_to_end() finally: if resp is not None: resp.close()
Play media from an URL on the device.
7,660
def lazy_reverse_binmap(f, xs): return (f(y, x) for x, y in zip(xs, xs[1:]))
Same as lazy_binmap, except the parameters are flipped for the binary function
7,661
def exists(self, regex): return self.search_full(regex, return_string=False, advance_pointer=False)
See what :meth:`skip_until` would return without advancing the pointer. >>> s = Scanner("test string") >>> s.exists(' ') 5 >>> s.pos 0 Returns the number of characters matched if it does exist, or ``None`` otherwise.
7,662
def _dialect(self, filepath): with open(filepath, self.read_mode) as csvfile: sample = csvfile.read(1024) dialect = csv.Sniffer().sniff(sample) if self.has_header == None: self.has_header = csv.Sniffer().has_header(sample) csvfile.seek(0) return dialect
returns detected dialect of filepath and sets self.has_header if not passed in __init__ kwargs Arguments: filepath (str): filepath of target csv file
7,663
def _defineVariables(self): logger.info(%(len(self.data))) mc_source_id_field = self.config[][] if mc_source_id_field is not None: if mc_source_id_field not in self.data.dtype.names: array = np.zeros(len(self.data),dtype=) self.data = mlab.rec_append_fields(self.data, names=mc_source_id_field, arrs=array) logger.info(%(np.sum(self.mc_source_id>0)))
Helper funtion to define pertinent variables from catalog data. ADW (20170627): This has largely been replaced by properties.
7,664
def libvlc_vlm_add_vod(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,), (1,), (1,), (1,), (1,), (1,), (1,),), None, ctypes.c_int, Instance, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_int, ListPOINTER(ctypes.c_char_p), ctypes.c_int, ctypes.c_char_p) return f(p_instance, psz_name, psz_input, i_options, ppsz_options, b_enabled, psz_mux)
Add a vod, with one input. @param p_instance: the instance. @param psz_name: the name of the new vod media. @param psz_input: the input MRL. @param i_options: number of additional options. @param ppsz_options: additional options. @param b_enabled: boolean for enabling the new vod. @param psz_mux: the muxer of the vod media. @return: 0 on success, -1 on error.
7,665
def close(self): if self._backend is not None and not self._closed: self._closed = True self.events.close() self._backend._vispy_close() forget_canvas(self)
Close the canvas Notes ----- This will usually destroy the GL context. For Qt, the context (and widget) will be destroyed only if the widget is top-level. To avoid having the widget destroyed (more like standard Qt behavior), consider making the widget a sub-widget.
7,666
def from_any(cls, obj, bucket): if isinstance(obj, cls): return cls(obj.raw) return cls({ : , : bucket, : obj if obj else N1QL_PRIMARY_INDEX, : })
Ensure the current object is an index. Always returns a new object :param obj: string or IndexInfo object :param bucket: The bucket name :return: A new IndexInfo object
7,667
def running(self): r = self._client._redis flag = .format(self._queue) if bool(r.exists(flag)): return r.ttl(flag) is None return False
Returns true if job still in running state :return:
7,668
def read(self, line, f, data): line = f.readline() assert(line == " $HESS\n") while line != " $END\n": line = f.readline()
See :meth:`PunchParser.read`
7,669
def _arm_thumb_filter_jump_successors(self, addr, size, successors, get_ins_addr, get_exit_stmt_idx): if not successors: return [ ] it_counter = 0 conc_temps = {} can_produce_exits = set() bb = self._lift(addr, size=size, thumb=True, opt_level=0) for stmt in bb.vex.statements: if stmt.tag == : if it_counter > 0: it_counter -= 1 can_produce_exits.add(stmt.addr + stmt.delta) elif stmt.tag == : val = stmt.data if val.tag == : conc_temps[stmt.tmp] = val.con.value elif stmt.tag == : if stmt.offset == self.project.arch.registers[][0]: val = stmt.data if val.tag == : if val.tmp in conc_temps: it_counter = 0 itstate = conc_temps[val.tmp] while itstate != 0: it_counter += 1 itstate >>= 8 if it_counter != 0: l.debug(, addr) THUMB_BRANCH_INSTRUCTIONS = (, , , , , , , , , , , , , , , , , ) for cs_insn in bb.capstone.insns: if cs_insn.mnemonic.split()[0] in THUMB_BRANCH_INSTRUCTIONS: can_produce_exits.add(cs_insn.address) successors_filtered = [suc for suc in successors if get_ins_addr(suc) in can_produce_exits or get_exit_stmt_idx(suc) == DEFAULT_STATEMENT] return successors_filtered
Filter successors for THUMB mode basic blocks, and remove those successors that won't be taken normally. :param int addr: Address of the basic block / SimIRSB. :param int size: Size of the basic block. :param list successors: A list of successors. :param func get_ins_addr: A callable that returns the source instruction address for a successor. :param func get_exit_stmt_idx: A callable that returns the source statement ID for a successor. :return: A new list of successors after filtering. :rtype: list
7,670
def father(self): if self._father == []: self._father = self.sub_tag("FAMC/HUSB") return self._father
Parent of this individual
7,671
def match(pattern): regex = re.compile(pattern) def validate(value): if not regex.match(value): return e("{} does not match the pattern {}", value, pattern) return validate
Validates that a field value matches the regex given to this validator.
7,672
def generic_http_header_parser_for(header_name): def parser(): request_id = request.headers.get(header_name, ).strip() if not request_id: return None return request_id return parser
A parser factory to extract the request id from an HTTP header :return: A parser that can be used to extract the request id from the current request context :rtype: ()->str|None
7,673
def copy_and_verify(path, source_path, sha256): if os.path.exists(path): if sha256 is None: print(.format( path, compute_sha256(path))) return path if not os.path.exists(source_path): return None unverified_path = path + dir_path = os.path.dirname(path) if not os.path.exists(dir_path): os.makedirs(dir_path) shutil.copy(source_path, unverified_path) if os.path.exists(unverified_path): if verify_file(unverified_path, sha256): os.rename(unverified_path, path) return path else: print(.format(source_path)) os.remove(unverified_path) return None
Copy a file to a given path from a given path, if it does not exist. After copying it, verify it integrity by checking the SHA-256 hash. Parameters ---------- path: str The (destination) path of the file on the local filesystem source_path: str The path from which to copy the file sha256: str The expected SHA-256 hex digest of the file, or `None` to print the digest of the file to the console Returns ------- str or None The path of the file if successfully downloaded otherwise `None`
7,674
def plot(self, vertices, show=False): if vertices.shape[1] != 2: raise ValueError() import matplotlib.pyplot as plt angle = np.degrees(self.angle(vertices)) plt.text(*vertices[self.origin], s=self.text, rotation=angle, ha=self.align[0], va=self.align[1], size=18) if show: plt.show()
Plot the text using matplotlib. Parameters -------------- vertices : (n, 2) float Vertices in space show : bool If True, call plt.show()
7,675
def run(self, *args, **kwargs): if self._source is not None: return self._source.run(*args, **kwargs) else: self.queue(*args, **kwargs) return self.wait()
Queue a first item to execute, then wait for the queue to be empty before returning. This should be the default way of starting any scraper.
7,676
def isconnected(mask): nodes_to_check = list((np.where(mask[0, :])[0])[1:]) seen = [True] + [False] * (len(mask) - 1) while nodes_to_check and not all(seen): node = nodes_to_check.pop() reachable = np.where(mask[node, :])[0] for i in reachable: if not seen[i]: nodes_to_check.append(i) seen[i] = True return all(seen)
Checks that all nodes are reachable from the first node - i.e. that the graph is fully connected.
7,677
def _shared_features(adense, bdense): a_indices = set(nonzero(adense)) b_indices = set(nonzero(bdense)) shared = list(a_indices & b_indices) diff = list(a_indices - b_indices) Ndiff = len(diff) return Ndiff
Number of features in ``adense`` that are also in ``bdense``.
7,678
def _log_prob_with_logsf_and_logcdf(self, y): logsf_y = self.log_survival_function(y) logsf_y_minus_1 = self.log_survival_function(y - 1) logcdf_y = self.log_cdf(y) logcdf_y_minus_1 = self.log_cdf(y - 1) big = tf.where(logsf_y < logcdf_y, logsf_y_minus_1, logcdf_y) small = tf.where(logsf_y < logcdf_y, logsf_y, logcdf_y_minus_1) return _logsum_expbig_minus_expsmall(big, small)
Compute log_prob(y) using log survival_function and cdf together.
7,679
def getAPIKey(self, keyID=None): kcfg = self.getKeyConfig(keyID) if not in kcfg: raise ConfigException() return kcfg[]
Retrieve the NS1 API Key for the given keyID :param str keyID: optional keyID to retrieve, or current if not passed :return: API Key for the given keyID
7,680
def rollback(self): commands = [] commands.append() commands.append() self.device.run_commands(commands)
Implementation of NAPALM method rollback.
7,681
def set_user_password(environment, parameter, password): username = % (environment, parameter) return password_set(username, password)
Sets a user's password in the keyring storage
7,682
def flush_job(self, job_id, body=None, params=None): if job_id in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument .") return self.transport.perform_request( "POST", _make_path("_ml", "anomaly_detectors", job_id, "_flush"), params=params, body=body, )
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/ml-flush-job.html>`_ :arg job_id: The name of the job to flush :arg body: Flush parameters :arg advance_time: Advances time to the given value generating results and updating the model for the advanced interval :arg calc_interim: Calculates interim results for the most recent bucket or all buckets within the latency period :arg end: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results :arg skip_time: Skips time to the given value without generating results or updating the model for the skipped interval :arg start: When used in conjunction with calc_interim, specifies the range of buckets on which to calculate interim results
7,683
def create_channel(current): channel = Channel(name=current.input[], description=current.input[], owner=current.user, typ=15).save() with BlockSave(Subscriber): Subscriber.objects.get_or_create(user=channel.owner, channel=channel, can_manage=True, can_leave=False) current.input[] = channel.key show_channel(current) current.output.update({ : , : 201 })
Create a public channel. Can be a broadcast channel or normal chat room. Chat room and broadcast distinction will be made at user subscription phase. .. code-block:: python # request: { 'view':'_zops_create_channel', 'name': string, 'description': string, } # response: { 'description': string, 'name': string, 'no_of_members': int, 'member_list': [ {'name': string, 'is_online': bool, 'avatar_url': string, }], 'last_messages': [MSG_DICT] 'status': 'Created', 'code': 201, 'key': key, # of just created channel }
7,684
def get_conn(self): if self.conn is None: cnopts = pysftp.CnOpts() if self.no_host_key_check: cnopts.hostkeys = None cnopts.compression = self.compress conn_params = { : self.remote_host, : self.port, : self.username, : cnopts } if self.password and self.password.strip(): conn_params[] = self.password if self.key_file: conn_params[] = self.key_file if self.private_key_pass: conn_params[] = self.private_key_pass self.conn = pysftp.Connection(**conn_params) return self.conn
Returns an SFTP connection object
7,685
def weighted_pixel_signals_from_images(pixels, signal_scale, regular_to_pix, galaxy_image): pixel_signals = np.zeros((pixels,)) pixel_sizes = np.zeros((pixels,)) for regular_index in range(galaxy_image.shape[0]): pixel_signals[regular_to_pix[regular_index]] += galaxy_image[regular_index] pixel_sizes[regular_to_pix[regular_index]] += 1 pixel_signals /= pixel_sizes pixel_signals /= np.max(pixel_signals) return pixel_signals ** signal_scale
Compute the (scaled) signal in each pixel, where the signal is the sum of its datas_-pixel fluxes. \ These pixel-signals are used to compute the effective regularization weight of each pixel. The pixel signals are scaled in the following ways: 1) Divided by the number of datas_-pixels in the pixel, to ensure all pixels have the same \ 'relative' signal (i.e. a pixel with 10 regular-pixels doesn't have x2 the signal of one with 5). 2) Divided by the maximum pixel-signal, so that all signals vary between 0 and 1. This ensures that the \ regularizations weights are defined identically for any datas_ units or signal-to-noise_map ratio. 3) Raised to the power of the hyper-parameter *signal_scale*, so the method can control the relative \ contribution regularization in different regions of pixelization. Parameters ----------- pixels : int The total number of pixels in the pixelization the regularization scheme is applied to. signal_scale : float A factor which controls how rapidly the smoothness of regularization varies from high signal regions to \ low signal regions. regular_to_pix : ndarray A 1D array mapping every pixel on the regular-grid to a pixel on the pixelization. galaxy_image : ndarray The image of the galaxy which is used to compute the weigghted pixel signals.
7,686
def odinweb_node_formatter(path_node): args = [path_node.name] if path_node.type: args.append(path_node.type.name) if path_node.type_args: args.append(path_node.type_args) return "{{{}}}".format(.join(args))
Format a node to be consumable by the `UrlPath.parse`.
7,687
def get_storyline(self, timezone_offset, first_date, start=0.0, end=0.0, track_points=False): headerscodeerrorurlhttps://api.moves-app.com/api/1.1/user/storyline/dailyjson title = % self.__class__.__name__ if {, } - set(self.service_scope): raise ValueError( % title) url_string = % self.endpoint parameters = self._process_dates(timezone_offset, first_date, start, end, title, track_points) if track_points: parameters[] = storyline_details = self._get_request(url_string, params=parameters) return storyline_details
a method to retrieve storyline details for a period of time NOTE: start and end must be no more than 30 days, 1 second apart NOTE: if track_points=True, start and end must be no more than 6 days, 1 second apart :param timezone_offset: integer with timezone offset from user profile details :param first_date: string with ISO date from user profile details firstDate :param start: [optional] float with starting datetime for daily summaries :param end: [optional] float with ending datetime for daily summaries :param track_points: [optional] boolean to provide detailed tracking of user movement :return: dictionary of response details with storyline list inside json key { 'headers': { ... }, 'code': 200, 'error': '', 'url': 'https://api.moves-app.com/api/1.1/user/storyline/daily' 'json': [ SEE RESPONSE in https://dev.moves-app.com/docs/api_storyline ] }
7,688
def check_proxy_setting(): try: http_proxy = os.environ[] except KeyError: return if not http_proxy.startswith(): match = re.match(, http_proxy) os.environ[] = % (match.group(2), match.group(3)) return
If the environmental variable 'HTTP_PROXY' is set, it will most likely be in one of these forms: proxyhost:8080 http://proxyhost:8080 urlllib2 requires the proxy URL to start with 'http://' This routine does that, and returns the transport for xmlrpc.
7,689
def convert_nonParametricSeismicSource(self, node): trt = node.attrib.get() rup_pmf_data = [] rups_weights = None if in node.attrib: tmp = node.attrib.get() rups_weights = numpy.array([float(s) for s in tmp.split()]) for i, rupnode in enumerate(node): probs = pmf.PMF(valid.pmf(rupnode[])) rup = RuptureConverter.convert_node(self, rupnode) rup.tectonic_region_type = trt rup.weight = None if rups_weights is None else rups_weights[i] rup_pmf_data.append((rup, probs)) nps = source.NonParametricSeismicSource( node[], node[], trt, rup_pmf_data) nps.splittable = not in node.attrib return nps
Convert the given node into a non parametric source object. :param node: a node with tag areaGeometry :returns: a :class:`openquake.hazardlib.source.NonParametricSeismicSource` instance
7,690
def check_meta_tag(domain, prefix, code): url = .format(domain) for proto in (, ): try: req = Request(proto + url, headers={: }) res = urlopen(req, timeout=2) if res.code == 200: content = str(res.read(100000)) res.close() return search_meta_tag(content, prefix, code) else: res.close() except: logger.debug(, exc_info=True) return False
Validates a domain by checking the existance of a <meta name="{prefix}" content="{code}"> tag in the <head> of the home page of the domain using either HTTP or HTTPs protocols. Returns true if verification suceeded.
7,691
def _create_PmtInf_node(self): ED = dict() ED[] = ET.Element("PmtInf") ED[] = ET.Element("PmtInfId") ED[] = ET.Element("PmtMtd") ED[] = ET.Element("BtchBookg") ED[] = ET.Element("NbOfTxs") ED[] = ET.Element("CtrlSum") ED[] = ET.Element("PmtTpInf") ED[] = ET.Element("SvcLvl") ED[] = ET.Element("Cd") ED[] = ET.Element("ReqdExctnDt") ED[] = ET.Element("Dbtr") ED[] = ET.Element("Nm") ED[] = ET.Element("DbtrAcct") ED[] = ET.Element("Id") ED[] = ET.Element("IBAN") ED[] = ET.Element("DbtrAgt") ED[] = ET.Element("FinInstnId") if in self._config: ED[] = ET.Element("BIC") ED[] = ET.Element("ChrgBr") return ED
Method to create the blank payment information nodes as a dict.
7,692
def validate(self, instance, value): if isinstance(value, string_types): if ( value.upper() not in VECTOR_DIRECTIONS or value.upper() in (, , , ) ): self.error(instance, value) value = VECTOR_DIRECTIONS[value.upper()][:2] return super(Vector2, self).validate(instance, value)
Check shape and dtype of vector validate also coerces the vector from valid strings (these include ZERO, X, Y, -X, -Y, EAST, WEST, NORTH, and SOUTH) and scales it to the given length.
7,693
def _rdsignal(fp, file_size, header_size, n_sig, bit_width, is_signed, cut_end): fp.seek(header_size) signal_size = file_size - header_size byte_width = int(bit_width / 8) dtype = str(byte_width) if is_signed: dtype = + dtype else: dtype = + dtype dtype = + dtype max_samples = int(signal_size / byte_width) max_samples = max_samples - max_samples % n_sig signal = np.empty(max_samples, dtype=dtype) markers = [] triggers = [] sample_num = 0 if cut_end: stop_byte = file_size - n_sig * byte_width + 1 while fp.tell() < stop_byte: chunk = fp.read(2) sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num) else: while True: chunk = fp.read(2) if not chunk: break sample_num = _get_sample(fp, chunk, n_sig, dtype, signal, markers, triggers, sample_num) signal = signal[:sample_num] signal = signal.reshape((-1, n_sig)) markers = np.array(markers, dtype=) triggers = np.array(triggers, dtype=) return signal, markers, triggers
Read the signal Parameters ---------- cut_end : bool, optional If True, enables reading the end of files which appear to terminate with the incorrect number of samples (ie. sample not present for all channels), by checking and skipping the reading the end of such files. Checking this option makes reading slower.
7,694
def lock(self, lease_time=-1): return self._encode_invoke(lock_lock_codec, invocation_timeout=MAX_SIZE, lease_time=to_millis(lease_time), thread_id=thread_id(), reference_id=self.reference_id_generator.get_and_increment())
Acquires the lock. If a lease time is specified, lock will be released after this lease time. If the lock is not available, the current thread becomes disabled for thread scheduling purposes and lies dormant until the lock has been acquired. :param lease_time: (long), time to wait before releasing the lock (optional).
7,695
def distance(p0, p1): r return math.sqrt(dist_2(p0[0], p0[1], p1[0], p1[1]))
r"""Return the distance between two points. Parameters ---------- p0: (X,Y) ndarray Starting coordinate p1: (X,Y) ndarray Ending coordinate Returns ------- d: float distance See Also -------- dist_2
7,696
def plot_spikes(spikes, view=False, filename=None, title=None): t_values = [t for t, I, v, u, f in spikes] v_values = [v for t, I, v, u, f in spikes] u_values = [u for t, I, v, u, f in spikes] I_values = [I for t, I, v, u, f in spikes] f_values = [f for t, I, v, u, f in spikes] fig = plt.figure() plt.subplot(4, 1, 1) plt.ylabel("Potential (mv)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, v_values, "g-") if title is None: plt.title("Izhikevichs spiking neuron model ({0!s})".format(title)) plt.subplot(4, 1, 2) plt.ylabel("Fired") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, f_values, "r-") plt.subplot(4, 1, 3) plt.ylabel("Recovery (u)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, u_values, "r-") plt.subplot(4, 1, 4) plt.ylabel("Current (I)") plt.xlabel("Time (in ms)") plt.grid() plt.plot(t_values, I_values, "r-o") if filename is not None: plt.savefig(filename) if view: plt.show() plt.close() fig = None return fig
Plots the trains for a single spiking neuron.
7,697
def _fix_review_dates(self, item): for date_field in [, , ]: if date_field in item.keys(): date_ts = item[date_field] item[date_field] = unixtime_to_datetime(date_ts).isoformat() if in item.keys(): for patch in item[]: pdate_ts = patch[] patch[] = unixtime_to_datetime(pdate_ts).isoformat() if in patch: for approval in patch[]: adate_ts = approval[] approval[] = unixtime_to_datetime(adate_ts).isoformat() if in item.keys(): for comment in item[]: cdate_ts = comment[] comment[] = unixtime_to_datetime(cdate_ts).isoformat()
Convert dates so ES detect them
7,698
def add_module_definition(self, module_definition): if module_definition.identity not in self._module_definitions.keys(): self._module_definitions[module_definition.identity] = module_definition else: raise ValueError("{} has already been defined".format(module_definition.identity))
Add a ModuleDefinition to the document
7,699
def lookup_field_orderable(self, field): try: self.model._meta.get_field_by_name(field) return True except Exception: return False
Returns whether the passed in field is sortable or not, by default all 'raw' fields, that is fields that are part of the model are sortable.