Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
387,100
def _make_entities_from_ids(entity_cls, entity_objs_and_ids, server_config): return [ _make_entity_from_id(entity_cls, entity_or_id, server_config) for entity_or_id in entity_objs_and_ids ]
Given an iterable of entities and/or IDs, return a list of entities. :param entity_cls: An :class:`Entity` subclass. :param entity_obj_or_id: An iterable of :class:`nailgun.entity_mixins.Entity` objects and/or entity IDs. All of the entities in this iterable should be of type ``entity_cls``. :returns: A list of ``entity_cls`` objects.
387,101
def find(self, name): if not isinstance(name, basestring): raise TypeError("name can only be an instance of type basestring") return_data = self._call("find", in_p=[name]) return_data = IExtPack(return_data) return return_data
Returns the extension pack with the specified name if found. in name of type str The name of the extension pack to locate. return return_data of type :class:`IExtPack` The extension pack if found. raises :class:`VBoxErrorObjectNotFound` No extension pack matching @a name was found.
387,102
def _compute_mean(self, C, mag, rhypo, hypo_depth, mean, idx): mean[idx] = (C[] + C[] * mag + C[] * np.log(rhypo[idx] + C[] * np.exp(C[] * mag)) + C[] * hypo_depth)
Compute mean value according to equations 10 and 11 page 226.
387,103
def delete(self, client=None): return self.taskqueue.delete_task(self.id, client=client)
Deletes a task from Task Queue. :type client: :class:`gcloud.taskqueue.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the task's taskqueue. :rtype: :class:`Task` :returns: The task that was just deleted. :raises: :class:`gcloud.exceptions.NotFound` (propagated from :meth:`gcloud.taskqueue.taskqueue.Taskqueue.delete_task`).
387,104
def RegisterProtoDescriptors(db, *additional_descriptors): db.RegisterFileDescriptor(artifact_pb2.DESCRIPTOR) db.RegisterFileDescriptor(client_pb2.DESCRIPTOR) db.RegisterFileDescriptor(config_pb2.DESCRIPTOR) db.RegisterFileDescriptor(cron_pb2.DESCRIPTOR) db.RegisterFileDescriptor(flow_pb2.DESCRIPTOR) db.RegisterFileDescriptor(hunt_pb2.DESCRIPTOR) db.RegisterFileDescriptor(output_plugin_pb2.DESCRIPTOR) db.RegisterFileDescriptor(reflection_pb2.DESCRIPTOR) db.RegisterFileDescriptor(stats_pb2.DESCRIPTOR) db.RegisterFileDescriptor(user_pb2.DESCRIPTOR) db.RegisterFileDescriptor(vfs_pb2.DESCRIPTOR) db.RegisterFileDescriptor(checks_pb2.DESCRIPTOR) db.RegisterFileDescriptor(deprecated_pb2.DESCRIPTOR) db.RegisterFileDescriptor(flows_pb2.DESCRIPTOR) db.RegisterFileDescriptor(jobs_pb2.DESCRIPTOR) db.RegisterFileDescriptor(osquery_pb2.DESCRIPTOR) db.RegisterFileDescriptor(wrappers_pb2.DESCRIPTOR) for d in additional_descriptors: db.RegisterFileDescriptor(d)
Registers all API-releated descriptors in a given symbol DB.
387,105
def get_condarc_channels(self, normalize=False, conda_url=, channels=None): default_channels = self.load_rc(system=True).get(, self.DEFAULT_CHANNELS) normalized_channels = [] if channels is None: condarc = self.load_rc() channels = condarc.get() if channels is None: channels = [] if normalize: template = if conda_url[-1] != else for channel in channels: if channel == : normalized_channels += default_channels elif channel.startswith(): normalized_channels.append(channel) else: normalized_channels.append(template.format(conda_url, channel)) channels = normalized_channels return channels
Return all the channel urls defined in .condarc. If no condarc file is found, use the default channels. the `default_channel_alias` key is ignored and only the anaconda client `url` key is used.
387,106
def layout(self, dimensions=None, **kwargs): dimensions = self._valid_dimensions(dimensions) if len(dimensions) == self.ndims: with item_check(False): return NdLayout(self, **kwargs).reindex(dimensions) return self.groupby(dimensions, container_type=NdLayout, **kwargs)
Group by supplied dimension(s) and lay out groups Groups data by supplied dimension(s) laying the groups along the dimension(s) out in a NdLayout. Args: dimensions: Dimension(s) to group by Returns: NdLayout with supplied dimensions
387,107
def cli(env, package_keyname): manager = ordering.OrderingManager(env.client) table = formatting.Table(COLUMNS) locations = manager.package_locations(package_keyname) for region in locations: for datacenter in region[]: table.add_row([ datacenter[][], datacenter[][], region[], region[] ]) env.fout(table)
List Datacenters a package can be ordered in. Use the location Key Name to place orders
387,108
def _check_cargs(self, cargs): if not all(isinstance(i, tuple) and isinstance(i[0], ClassicalRegister) and isinstance(i[1], int) for i in cargs): raise QiskitError("carg not (ClassicalRegister, int) tuple") if not all(self.has_register(i[0]) for i in cargs): raise QiskitError("register not in this circuit") for clbit in cargs: clbit[0].check_range(clbit[1])
Raise exception if clbit is not in this circuit or bad format.
387,109
def combobox_set_model_from_list(cb, items): cb.clear() model = gtk.ListStore(str) for i in items: model.append([i]) cb.set_model(model) if type(cb) == gtk.ComboBoxEntry: cb.set_text_column(0) elif type(cb) == gtk.ComboBox: cell = gtk.CellRendererText() cb.pack_start(cell, True) cb.add_attribute(cell, , 0)
Setup a ComboBox or ComboBoxEntry based on a list of strings.
387,110
def fastaIterator(fn, useMutableString=False, verbose=False): fh = fn if type(fh).__name__ == "str": fh = open(fh) if verbose: try: pind = __build_progress_indicator(fh) except ProgressIndicatorError as e: sys.stderr.write("Warning: unable to show progress for stream. " + "Reason: " + str(e)) verbose = False prev_line = None while True: seqHeader = __read_seq_header(fh, prev_line) name = seqHeader[1:].strip() seq_data, prev_line = __read_seq_data(fh) if verbose: pind.done = fh.tell() pind.showProgress(to_strm=sys.stderr) yield Sequence(name, seq_data, useMutableString) if prev_line == "": break
A generator function which yields fastaSequence objects from a fasta-format file or stream. :param fn: a file-like stream or a string; if this is a string, it's treated as a filename, else it's treated it as a file-like object, which must have a readline() method. :param useMustableString: if True, construct sequences from lists of chars, rather than python string objects, to allow more efficient editing. Use with caution. :param verbose: if True, output additional status messages to stderr about progress
387,111
def substitute_variables(cls, configuration, value, ref): if isinstance(value, str): while True: match = cls.REF_PATTERN.search(value) if match is None: break path = os.path.join(os.path.dirname(ref), match.group()) try: value = value.replace( match.group(0), str(util.get_value(configuration, path))) except KeyError: raise KeyError(path) while True: match = cls.VAR_PATTERN.search(value) if match is None: break value = value.replace( match.group(0), str(util.get_value(cls.VARIABLES, match.group(), ))) return value
Substitute variables in `value` from `configuration` where any path reference is relative to `ref`. Parameters ---------- configuration : dict configuration (required to resolve intra-document references) value : value to resolve substitutions for ref : str path to `value` in the `configuration` Returns ------- value : value after substitution
387,112
def plot_best_worst_fits(assignments_df, data, modality_col=, score=): ncols = 2 nrows = len(assignments_df.groupby(modality_col).groups.keys()) fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(nrows*4, ncols*6)) axes_iter = axes.flat fits = , for modality, df in assignments_df.groupby(modality_col): df = df.sort_values(score) color = MODALITY_TO_COLOR[modality] for fit in fits: if fit == : ids = df[][-10:] else: ids = df[][:10] fit_psi = data[ids] tidy_fit_psi = fit_psi.stack().reset_index() tidy_fit_psi = tidy_fit_psi.rename(columns={: , : , 0: }) if tidy_fit_psi.empty: continue ax = six.next(axes_iter) violinplot(x=, y=, data=tidy_fit_psi, color=color, ax=ax) ax.set(title=.format(fit, score, modality), xticks=[]) sns.despine() fig.tight_layout()
Violinplots of the highest and lowest scoring of each modality
387,113
def build_opener(self): http_handler = urllib2.HTTPHandler() if util.empty(self.transport.proxy_url): return urllib2.build_opener(http_handler) proxy_handler = urllib2.ProxyHandler( {self.transport.proxy_url[:4]: self.transport.proxy_url}) return urllib2.build_opener(http_handler, proxy_handler)
Builds url opener, initializing proxy. @return: OpenerDirector
387,114
def int_filter(text): res = list() for char in text: if char.isdigit(): res.append(char) return int("".join(res))
Extract integer from text. **中文文档** 摘除文本内的整数。
387,115
def regularrun( shell, prompt_template="default", aliases=None, envvars=None, extra_commands=None, speed=1, test_mode=False, commentecho=False, ): loop_again = True command_string = regulartype(prompt_template) if command_string == TAB: loop_again = False return loop_again run_command( command_string, shell, aliases=aliases, envvars=envvars, extra_commands=extra_commands, test_mode=test_mode, ) return loop_again
Allow user to run their own live commands until CTRL-Z is pressed again.
387,116
def delete_device(name, safety_on=True): hostname-101.mycompany.comhostname-101hostname-1 config = _get_vistara_configuration() if not config: return False access_token = _get_oath2_access_token(config[], config[]) if not access_token: return query_string = .format(name) devices = _search_devices(query_string, config[], access_token) if not devices: return "No devices found" device_count = len(devices) if safety_on and device_count != 1: return "Expected to delete 1 device and found {0}. "\ "Set safety_on=False to override.".format(device_count) delete_responses = [] for device in devices: device_id = device[] log.debug(device_id) delete_response = _delete_resource(device_id, config[], access_token) if not delete_response: return False delete_responses.append(delete_response) return delete_responses
Deletes a device from Vistara based on DNS name or partial name. By default, delete_device will only perform the delete if a single host is returned. Set safety_on=False to delete all matches (up to default API search page size) CLI Example: .. code-block:: bash salt-run vistara.delete_device 'hostname-101.mycompany.com' salt-run vistara.delete_device 'hostname-101' salt-run vistara.delete_device 'hostname-1' safety_on=False
387,117
def get_folder(service_instance, datacenter, placement, base_vm_name=None): log.trace() if base_vm_name: vm_object = get_vm_by_property(service_instance, base_vm_name, vm_properties=[]) vm_props = salt.utils.vmware.get_properties_of_managed_object(vm_object, properties=[]) if in vm_props: folder_object = vm_props[] else: raise salt.exceptions.VMwareObjectRetrievalError(.join([ , ])) elif in placement: folder_objects = salt.utils.vmware.get_folders(service_instance, [placement[]], datacenter) if len(folder_objects) > 1: raise salt.exceptions.VMwareMultipleObjectsError(.join([ , .format(placement[])])) folder_object = folder_objects[0] elif datacenter: datacenter_object = salt.utils.vmware.get_datacenter(service_instance, datacenter) dc_props = salt.utils.vmware.get_properties_of_managed_object(datacenter_object, properties=[]) if in dc_props: folder_object = dc_props[] else: raise salt.exceptions.VMwareObjectRetrievalError() return folder_object
Returns a Folder Object service_instance Service instance object datacenter Name of the datacenter placement Placement dictionary base_vm_name Existing virtual machine name (for cloning)
387,118
def random_jpath(depth = 3): chunks = [] while depth > 0: length = random.randint(5, 15) ident = .join(random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(length)) if random.choice((True, False)): index = random.randint(0, 10) ident = "{:s}[{:d}]".format(ident, index) chunks.append(ident) depth -= 1 return ".".join(chunks)
Generate random JPath with given node depth.
387,119
def reinit(self): log.debug("Reinitializing socket connection for %s:%d" % (self.host, self.port)) if self._sock: self.close() try: self._sock = socket.create_connection((self.host, self.port), self.timeout) except socket.error: log.exception( % (self.host, self.port)) self._raise_connection_error()
Re-initialize the socket connection close current socket (if open) and start a fresh connection raise ConnectionError on error
387,120
def _setup_freqs(self): if self.header[b] > 0: self.f_start = self.f_begin + self.chan_start_idx*abs(self.header[b]) self.f_stop = self.f_begin + self.chan_stop_idx*abs(self.header[b]) else: self.f_start = self.f_end - self.chan_stop_idx*abs(self.header[b]) self.f_stop = self.f_end - self.chan_start_idx*abs(self.header[b])
Updating frequency borders from channel values
387,121
def make_optimize_tensor(self, model, session=None, var_list=None, **kwargs): session = model.enquire_session(session) objective = model.objective full_var_list = self._gen_var_list(model, var_list) with session.as_default(): minimize = self.optimizer.minimize(objective, var_list=full_var_list, **kwargs) model.initialize(session=session) self._initialize_optimizer(session) return minimize
Make Tensorflow optimization tensor. This method builds optimization tensor and initializes all necessary variables created by optimizer. :param model: GPflow model. :param session: Tensorflow session. :param var_list: List of variables for training. :param kwargs: Dictionary of extra parameters passed to Tensorflow optimizer's minimize method. :return: Tensorflow optimization tensor or operation.
387,122
def get_service_definitions(self, service_type=None): route_values = {} if service_type is not None: route_values[] = self._serialize.url(, service_type, ) response = self._send(http_method=, location_id=, version=, route_values=route_values) return self._deserialize(, self._unwrap_collection(response))
GetServiceDefinitions. [Preview API] :param str service_type: :rtype: [ServiceDefinition]
387,123
def parse_line(self, line, lineno): if line.startswith(): self.is_taskcluster = True if self.is_taskcluster: line = re.sub(self.RE_TASKCLUSTER_NORMAL_PREFIX, "", line) if self.is_error_line(line): self.add(line, lineno)
Check a single line for an error. Keeps track of the linenumber
387,124
def parse_commandline(argv): ap = ArgumentParser( prog=, description=DESCRIPTION, epilog=EPILOG, ) ap.add_argument( , action=, version=.format(version), help="shows version and exits" ) ap.add_argument( , metavar=, help="original file" ) ap.add_argument( , metavar=, help="changed file" ) g_html = ap.add_argument_group( , "Without these settings, only the `wdiff` output is returned (with INS " "and DEL tags). Here are some options to wrap the output in a HTML " "document." ) g_html.add_argument( , , action=, help="wrap the diff with a HTML document" ) g_html.add_argument( , , action=, help="allow INS and DEL tags to span linebraks" ) g_html.add_argument( , , action=, help="replace line breaks with BR tags" ) g_context = ap.add_argument_group( , "With these options you can add additional information to the HTML " "output (means these only work alongside the `--wrap-with-html` option)." ) g_context.add_argument( , , metavar=, help="add a revision tag or version number to the output" ) x_stamp = g_context.add_mutually_exclusive_group() x_stamp.add_argument( , , action=, help="add a date to the output (UTC now)" ) x_stamp.add_argument( , , action=, help="add date and time to the output (UTC now)" ) g_files = ap.add_argument_group( , "Instead of using the default templates, you can use your own files. " "These only work alongside the `--wrap-with-html` option" ) g_files.add_argument( , , type=FileType(), metavar=, help="load the Jinja2 template from this file" ) g_files.add_argument( , , type=FileType(), metavar=, help="load CSS from this file" ) g_files.add_argument( , , type=FileType(), metavar=, help="load Javascript from this file" ) g_files.add_argument( , , type=FileType(), metavar=, help="load another Javascript from this file (like Zepto)" ) args = ap.parse_args(argv) if not args.wrap_with_html: for group in (g_context, g_files): args_to_check = [opt.dest for opt in group._group_actions] if any([getattr(args, attr) for attr in args_to_check]): msg = "the options require that `--wrap-with-html` is used" ap.error(msg) return args
Returns the arguments parsed from *argv* as a namespace.
387,125
def on_post(self): request = self.environ[] try: return self.process_request(request) except ClientError as exc: return self.on_client_error(exc) except BadGateway as exc: return self.on_bad_gateway(exc) except InvalidConfig: raise except Exception as exc: logging.error(, exc_info=exc) return self.on_internal_error()
Extracts the request, feeds the module, and returns the response.
387,126
def random_word(length,dictionary = False): if dictionary: try: with open() as fp: words = [word.lower()[:-1] for word in fp.readlines() if re.match(.format(+str(length)+),word)] return random.choice(words) except FileNotFoundError: pass vowels = list() consonants = list() pairs = [(random.choice(consonants),random.choice(vowels)) for _ in range(length//2+1)] return .join([l for p in pairs for l in p])[:length]
Creates random lowercase words from dictionary or by alternating vowels and consonants The second method chooses from 85**length words. The dictionary method chooses from 3000--12000 words for 3<=length<=12 (though this of course depends on the available dictionary) :param length: word length :param dictionary: Try reading from dictionary, else fall back to artificial words
387,127
def mkdir_command(endpoint_plus_path): endpoint_id, path = endpoint_plus_path client = get_client() autoactivate(client, endpoint_id, if_expires_in=60) res = client.operation_mkdir(endpoint_id, path=path) formatted_print(res, text_format=FORMAT_TEXT_RAW, response_key="message")
Executor for `globus mkdir`
387,128
def info(self, *msg): label = colors.blue("INFO") self._msg(label, *msg)
Prints a message with an info prefix
387,129
def optimize(population, toolbox, ngen, archive=None, stats=None, verbose=False, history=None): start = time.time() if history is not None: history.update(population) logbook = tools.Logbook() logbook.header = [, , ] + (stats.fields if stats else []) render_fitness(population, toolbox, history) record_information(population, stats, start, archive, logbook, verbose) for gen in range(1, ngen + 1): offspring = generate_next_population(population, toolbox) render_fitness(offspring, toolbox, history) population = offspring record_information(population, stats, start, archive, logbook, verbose) return population, logbook, history
Optimize a population of individuals. :param population: :param toolbox: :param mut_prob: :param ngen: :param archive: :param stats: :param verbose: :param history: :return:
387,130
def unimapping(arg, level): if not isinstance(arg, collections.Mapping): raise TypeError( .format(type(arg).__name__) ) result = [] for i in arg.items(): result.append( pretty_spaces(level) + u.join(map(functools.partial(convert, level=level), i)) ) string = join_strings(result, level) if level is not None: string += pretty_spaces(level - 1) return u.format(string)
Mapping object to unicode string. :type arg: collections.Mapping :param arg: mapping object :type level: int :param level: deep level :rtype: unicode :return: mapping object as unicode string
387,131
def store(self, text, tier): store = self._stores.get(tier, None) if not store: store = AutoSplittingFile(self._dir, self._lines_per_store, self._file_name, tier) self._stores[tier] = store store.write(text)
Writes text to the underlying Store mapped at tier. If the store doesn't exists, yet, it creates it :param text: the text to write :param tier: the tier used to identify the store :return:
387,132
def check_file_for_tabs(cls, filename, verbose=True): filename = path_expand(filename) file_contains_tabs = False with open(filename, ) as f: lines = f.read().split("\n") line_no = 1 for line in lines: if "\t" in line: file_contains_tabs = True location = [ i for i in range(len(line)) if line.startswith(, i)] if verbose: print("Tab found in line", line_no, "and column(s)", location) line_no += 1 return file_contains_tabs
identifies if the file contains tabs and returns True if it does. It also prints the location of the lines and columns. If verbose is set to False, the location is not printed. :param verbose: if true prints issues :param filename: the filename :type filename: str :rtype: True if there are tabs in the file
387,133
def remove_repositories(repositories, default_repositories): repos = [] for repo in repositories: if repo in default_repositories: repos.append(repo) return repos
Remove no default repositories
387,134
def combine_mv_and_lv(mv, lv): combined = { c: pd.concat([mv[c], lv[c]], axis=0) for c in list(lv.keys()) } combined[] = mv[] return combined
Combine MV and LV grid topology in PyPSA format
387,135
def is_deletion(self): return (len(self.ref) > len(self.alt)) and self.ref.startswith(self.alt)
Does this variant represent the deletion of nucleotides from the reference genome?
387,136
def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing.
387,137
def from_array(array): if array is None or not array: return None assert_type_or_raise(array, dict, parameter_name="array") data = {} data[] = u(array.get()) data[] = int(array.get()) instance = LabeledPrice(**data) instance._raw = array return instance
Deserialize a new LabeledPrice from a given dictionary. :return: new LabeledPrice instance. :rtype: LabeledPrice
387,138
def guinierplot(*args, **kwargs): ret=plotsascurve(*args, **kwargs) plt.xscale(,exponent=2) plt.yscale() return ret
Make a Guinier plot. This is simply a wrapper around plotsascurve().
387,139
def handle_exists(self, spec, checkable): $existst exist, we return False, because that can if not isinstance(spec, bool): msg = raise InvalidQuery(msg) return spec
The implementation of this one is weird. By the time the {'$exists': True} spec gets to the dispatched handler, the key presumably exists. So we just parrot the assertion the spec makes. If it asserts the key exists, we return True. If it asserts the key doesn't exist, we return False, because that can't be true.
387,140
def find(self, which, param): for i, layer in enumerate(self.layers): if which == i or which == layer.name: return layer.find(param) raise KeyError(which)
Get a parameter from a layer in the network. Parameters ---------- which : int or str The layer that owns the parameter to return. If this is an integer, then 0 refers to the input layer, 1 refers to the first hidden layer, 2 to the second, and so on. If this is a string, the layer with the corresponding name, if any, will be used. param : int or str Name of the parameter to retrieve from the specified layer, or its index in the parameter list of the layer. Raises ------ KeyError If there is no such layer, or if there is no such parameter in the specified layer. Returns ------- param : Theano shared variable A shared parameter variable from the indicated layer.
387,141
def user_filter(config, message, fasnick=None, *args, **kw): fasnick = kw.get(, fasnick) if fasnick: return fasnick in fmn.rules.utils.msg2usernames(message, **config)
A particular user Use this rule to include messages that are associated with a specific user.
387,142
def remove_core_element(self, model): gv_name = model if self.global_variable_is_editable(gv_name, "Deletion"): try: self.model.global_variable_manager.delete_variable(gv_name) except AttributeError as e: logger.warning("The respective global variable couldn't be removed. -> {0}" "".format(e, model))
Remove respective core element of handed global variable name :param str model: String that is the key/gv_name of core element which should be removed :return:
387,143
def get_tree(cls, *condition, **kwargs): parent_field = kwargs.pop(, ) parent = kwargs.pop(, None) parent_order_by = kwargs.pop(, None) current = kwargs.pop(, None) order_by = kwargs.pop(, None) id_field = kwargs.pop(, ) mode = kwargs.pop(, ) if mode not in (, ): raise Exception("mode parameter should be or , but found.".format(mode)) def _f(parent): query = cls.filter(cls.c[parent_field]==parent, *condition) if order_by is not None: query.order_by(order_by) for row in query: if mode == : yield row for _row in _f(getattr(row, id_field)): yield _row if mode == : yield row if current: query = cls.filter(cls.c[id_field]==current) else: if is_condition(parent): query = cls.filter(parent) else: query = cls.filter(cls.c[parent_field]==parent) if parent_order_by is not None: query.order_by(parent_order_by) for row in query: if mode == : yield row for r in _f(getattr(row, id_field)): yield r if mode == : yield row
parent is root parent value, default is None current is current value condition is extra condition for select root records mode is search method, value is 'wide' or 'deep'
387,144
def get_is_value(tag): if tag.VR == or tag.VR == : value = int(tag.value.decode("ascii").replace(" ", "")) return value return int(tag.value)
Getters for data that also work with implicit transfersyntax :param tag: the tag to read
387,145
def get_obs_route(value): obs_route = ObsRoute() while value and (value[0]== or value[0] in CFWS_LEADER): if value[0] in CFWS_LEADER: token, value = get_cfws(value) obs_route.append(token) elif value[0] == : obs_route.append(ListSeparator) value = value[1:] if not value or value[0] != : raise errors.HeaderParseError( "expected obs-route domain but found ".format(value)) obs_route.append(RouteComponentMarker) token, value = get_domain(value[1:]) obs_route.append(token) while value and value[0]==: obs_route.append(ListSeparator) value = value[1:] if not value: break if value[0] in CFWS_LEADER: token, value = get_cfws(value) obs_route.append(token) if value[0] == : obs_route.append(RouteComponentMarker) token, value = get_domain(value[1:]) obs_route.append(token) if not value: raise errors.HeaderParseError("end of header while parsing obs-route") if value[0] != : raise errors.HeaderParseError( "expected marking end of " "obs-route but found ".format(value)) obs_route.append(ValueTerminal(, )) return obs_route, value[1:]
obs-route = obs-domain-list ":" obs-domain-list = *(CFWS / ",") "@" domain *("," [CFWS] ["@" domain]) Returns an obs-route token with the appropriate sub-tokens (that is, there is no obs-domain-list in the parse tree).
387,146
def _recv(self): prefix = self._read(self.prefix_len) msg = self._read(self._extract_len(prefix)) return prefix + msg
Receives and returns a message from Scratch
387,147
def extract_notebook_metatab(nb_path: Path): from metatab.rowgenerators import TextRowGenerator import nbformat with nb_path.open() as f: nb = nbformat.read(f, as_version=4) lines = .join([] + [get_cell_source(nb, tag) for tag in [, , ]]) doc = MetapackDoc(TextRowGenerator(lines)) doc[].get_or_new_term().value = get_cell_source(nb, ).strip().strip() doc[].get_or_new_term().value = get_cell_source(nb, ) doc[].get_or_new_term().value = get_cell_source(nb, ) return doc
Extract the metatab lines from a notebook and return a Metapack doc
387,148
def read_from_cache(self, domains=None): logger.info(f) if domains is not None and isinstance(domains, list): dfs = {domain: self.read_entry(domain) for domain in domains} else: dfs = {name: self.read_entry(name) for name in os.listdir(self.EXTRACTION_CACHE_PATH)} return dfs
Returns: dict: Dict[str, DataFrame]
387,149
def _python_rpath(self):
The relative path (from environment root) to python.
387,150
def compute_checksum(self, payload_offset: Optional[int]=None): if not self.block_file: self.fields[] = return block_hasher = hashlib.sha1() payload_hasher = hashlib.sha1() with wpull.util.reset_file_offset(self.block_file): if payload_offset is not None: data = self.block_file.read(payload_offset) block_hasher.update(data) while True: data = self.block_file.read(4096) if data == b: break block_hasher.update(data) payload_hasher.update(data) content_length = self.block_file.tell() content_hash = block_hasher.digest() self.fields[] = .format( base64.b32encode(content_hash).decode() ) if payload_offset is not None: payload_hash = payload_hasher.digest() self.fields[] = .format( base64.b32encode(payload_hash).decode() ) self.fields[] = str(content_length)
Compute and add the checksum data to the record fields. This function also sets the content length.
387,151
def _recipients_from_cloud(self, recipients, field=None): recipients_data = [] for recipient in recipients: recipients_data.append( self._recipient_from_cloud(recipient, field=field)) return Recipients(recipients_data, parent=self, field=field)
Transform a recipient from cloud data to object data
387,152
def _get_event_id(object_type: str) -> str: key = _keys.event_counter(object_type) DB.watch(key, pipeline=True) count = DB.get_value(key) DB.increment(key) DB.execute() if count is None: count = 0 return .format(object_type, int(count))
Return an event key for the event on the object type. This must be a unique event id for the object. Args: object_type (str): Type of object Returns: str, event id
387,153
def ParseOptions(cls, options, configuration_object): if not isinstance(configuration_object, tools.CLITool): raise errors.BadConfigObject( ) hashers = cls._ParseStringOption( options, , default_value=cls._DEFAULT_HASHER_STRING) hasher_file_size_limit = cls._ParseNumericOption( options, , default_value=0) if hasher_file_size_limit < 0: raise errors.BadConfigOption( ) setattr(configuration_object, , hashers) setattr( configuration_object, , hasher_file_size_limit)
Parses and validates options. Args: options (argparse.Namespace): parser options. configuration_object (CLITool): object to be configured by the argument helper. Raises: BadConfigObject: when the configuration object is of the wrong type. BadConfigOption: when a configuration parameter fails validation.
387,154
def get_parser(self): parser = self.parser_cls(prog=self.prog_name, usage=self.get_usage(), stream=self.stderr) subparsers = parser.add_subparsers( title=, ) for name, command in self.registry.items(): cmdparser = subparsers.add_parser(name, help=command.help) for argument in command.get_args(): cmdparser.add_argument(*argument.args, **argument.kwargs) command.setup_parser(parser, cmdparser) cmdparser.set_defaults(func=command.handle) return parser
Returns :class:`monolith.cli.Parser` instance for this *ExecutionManager*.
387,155
def from_conversation_event(conversation, conv_event, prev_conv_event, datetimefmt, watermark_users=None): user = conversation.get_user(conv_event.user_id) if prev_conv_event is not None: is_new_day = (conv_event.timestamp.astimezone(tz=None).date() != prev_conv_event.timestamp.astimezone(tz=None).date()) else: is_new_day = False if isinstance(conv_event, hangups.ChatMessageEvent): return MessageWidget(conv_event.timestamp, conv_event.text, datetimefmt, user, show_date=is_new_day, watermark_users=watermark_users) elif isinstance(conv_event, hangups.RenameEvent): if conv_event.new_name == : text = ( .format(user.first_name)) else: text = ( .format(user.first_name, conv_event.new_name)) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day, watermark_users=watermark_users) elif isinstance(conv_event, hangups.MembershipChangeEvent): event_users = [conversation.get_user(user_id) for user_id in conv_event.participant_ids] names = .join([user.full_name for user in event_users]) if conv_event.type_ == hangups.MEMBERSHIP_CHANGE_TYPE_JOIN: text = ( .format(user.first_name, names)) else: text = (.format(names)) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day, watermark_users=watermark_users) elif isinstance(conv_event, hangups.HangoutEvent): text = { hangups.HANGOUT_EVENT_TYPE_START: ( ), hangups.HANGOUT_EVENT_TYPE_END: ( ), hangups.HANGOUT_EVENT_TYPE_ONGOING: ( ), }.get(conv_event.event_type, ) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day, watermark_users=watermark_users) elif isinstance(conv_event, hangups.GroupLinkSharingModificationEvent): status_on = hangups.GROUP_LINK_SHARING_STATUS_ON status_text = ( if conv_event.new_status == status_on else ) text = .format(user.first_name, status_text) return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day, watermark_users=watermark_users) else: text = return MessageWidget(conv_event.timestamp, text, datetimefmt, show_date=is_new_day, watermark_users=watermark_users)
Return MessageWidget representing a ConversationEvent. Returns None if the ConversationEvent does not have a widget representation.
387,156
def create_ip_arp_reply(srchw, dsthw, srcip, targetip): pkt = create_ip_arp_request(srchw, srcip, targetip) pkt[0].dst = dsthw pkt[1].operation = ArpOperation.Reply pkt[1].targethwaddr = dsthw return pkt
Create an ARP reply (just change what needs to be changed from a request)
387,157
def layers(self): try: response = self.d.history(self.image_id) except docker.errors.NotFound: raise NotAvailableAnymore() layers = [] for l in response: layer_id = l["Id"] if layer_id == "<missing>": layers.append(DockerImage(l, self.docker_backend)) else: layers.append(self.docker_backend.get_image_by_id(layer_id)) return layers
similar as parent images, except that it uses /history API endpoint :return:
387,158
def parse_relations( belstr: str, char_locs: CharLocs, parsed: Parsed, errors: Errors ) -> Tuple[Parsed, Errors]: quotes = char_locs["quotes"] quoted_range = set([i for start, end in quotes.items() for i in range(start, end)]) for match in relations_pattern_middle.finditer(belstr): (start, end) = match.span(1) end = end - 1 if start != end: test_range = set(range(start, end)) else: test_range = set(start) if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } for match in relations_pattern_end.finditer(belstr): (start, end) = match.span(1) log.debug(f"Relation-end {match}") end = end - 1 if start != end: test_range = set(range(start, end)) else: test_range = set(start) if test_range.intersection(quoted_range): continue span_key = (start, end) parsed[span_key] = { "type": "Relation", "name": match.group(1), "span": (start, end), } return parsed, errors
Parse relations from BEL string Args: belstr: BEL string as one single string (not list of chars) char_locs: paren, comma and quote char locations parsed: data structure for parsed functions, relations, nested errors: error messages Returns: (parsed, errors):
387,159
def set_channel_created(self, channel_link, channel_id): self.channel_link = channel_link self.channel_id = channel_id self.__record_progress(Status.PUBLISH_CHANNEL if config.PUBLISH else Status.DONE)
set_channel_created: records progress after creating channel on Kolibri Studio Args: channel_link (str): link to uploaded channel channel_id (str): id of channel that has been uploaded Returns: None
387,160
def apply_sfr_seg_parameters(seg_pars=True, reach_pars=False): if not seg_pars and not reach_pars: raise Exception("gw_utils.apply_sfr_pars() error: both seg_pars and reach_pars are False") import flopy bak_sfr_file,pars = None,None if seg_pars: assert os.path.exists("sfr_seg_pars.config") with open("sfr_seg_pars.config",) as f: pars = {} for line in f: line = line.strip().split() pars[line[0]] = line[1] bak_sfr_file = pars["nam_file"]+"_backup_.sfr" m = flopy.modflow.Modflow.load(pars["nam_file"], load_only=[], check=False) sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m) sfrfile = pars["sfr_filename"] mlt_df = pd.read_csv(pars["mult_file"], delim_whitespace=False, index_col=0) time_mlt_df = None if "time_mult_file" in pars: time_mult_file = pars["time_mult_file"] time_mlt_df = pd.read_csv(pars["time_mult_file"], delim_whitespace=False,index_col=0) idx_cols = [, , , , , ] present_cols = [c for c in idx_cols if c in mlt_df.columns] mlt_cols = mlt_df.columns.drop(present_cols) for key, val in m.sfr.segment_data.items(): df = pd.DataFrame.from_records(val) df.loc[:, mlt_cols] *= mlt_df.loc[:, mlt_cols] val = df.to_records(index=False) sfr.segment_data[key] = val if reach_pars: assert os.path.exists("sfr_reach_pars.config") with open("sfr_reach_pars.config", ) as f: r_pars = {} for line in f: line = line.strip().split() r_pars[line[0]] = line[1] if bak_sfr_file is None: bak_sfr_file = r_pars["nam_file"]+"_backup_.sfr" m = flopy.modflow.Modflow.load(r_pars["nam_file"], load_only=[], check=False) sfr = flopy.modflow.ModflowSfr2.load(os.path.join(bak_sfr_file), m) sfrfile = r_pars["sfr_filename"] r_mlt_df = pd.read_csv(r_pars["mult_file"],sep=,index_col=0) r_idx_cols = ["node", "k", "i", "j", "iseg", "ireach", "reachID", "outreach"] r_mlt_cols = r_mlt_df.columns.drop(r_idx_cols) r_df = pd.DataFrame.from_records(m.sfr.reach_data) r_df.loc[:, r_mlt_cols] *= r_mlt_df.loc[:, r_mlt_cols] sfr.reach_data = r_df.to_records(index=False) if pars is not None and "time_mult_file" in pars: time_mult_file = pars["time_mult_file"] time_mlt_df = pd.read_csv(time_mult_file, delim_whitespace=False, index_col=0) for kper, sdata in m.sfr.segment_data.items(): assert kper in time_mlt_df.index, "gw_utils.apply_sfr_seg_parameters() error: kper " + \ "{0} not in time_mlt_df index".format(kper) for col in time_mlt_df.columns: sdata[col] *= time_mlt_df.loc[kper, col] sfr.write_file(filename=sfrfile) return sfr
apply the SFR segement multiplier parameters. Expected to be run in the same dir as the model exists Parameters ---------- reach_pars : bool if reach paramters need to be applied Returns ------- sfr : flopy.modflow.ModflowSfr instance Note ---- expects "sfr_seg_pars.config" to exist expects <nam_file>+"_backup_.sfr" to exist
387,161
def resetAndRejoin(self, timeout): print % self.port print timeout try: self._sendline() self.isPowerDown = True time.sleep(timeout) if self.deviceRole == Thread_Device_Role.SED: self.setPollingRate(self.sedPollingRate) self.__startOpenThread() time.sleep(3) if self.__sendCommand()[0] == : print return False return True except Exception, e: ModuleHelper.WriteIntoDebugLogger("resetAndRejoin() Error: " + str(e))
reset and join back Thread Network with a given timeout delay Args: timeout: a timeout interval before rejoin Thread Network Returns: True: successful to reset and rejoin Thread Network False: fail to reset and rejoin the Thread Network
387,162
def generateSplines(self): _ = returnSplineList(self.dependentVar, self.independentVar, subsetPercentage=self.splineSubsetPercentage, cycles=self.splineCycles, minKnotPoints=self.splineMinKnotPoins, initialKnots=self.splineInitialKnots, splineOrder=self.splineOrder, terminalExpansion=self.splineTerminalExpansion ) self.splines = _
#TODO: docstring
387,163
def _equalizeHistogram(img): intType = None if not in img.dtype.str: TO_FLOAT_TYPES = {np.dtype(): np.float16, np.dtype(): np.float32, np.dtype(): np.float64, np.dtype(): np.float64} intType = img.dtype img = img.astype(TO_FLOAT_TYPES[intType], copy=False) DEPTH_TO_NBINS = {np.dtype(): 256, np.dtype(): 32768, np.dtype(): 2147483648} nBins = DEPTH_TO_NBINS[img.dtype] mn, mx = np.amin(img), np.amax(img) if abs(mn) > abs(mx): mx = mn img /= mx img = exposure.equalize_hist(img, nbins=nBins) img *= mx if intType: img = img.astype(intType) return img
histogram equalisation not bounded to int() or an image depth of 8 bit works also with negative numbers
387,164
def check_purge_status(self, purge_id): content = self._fetch("/purge?id=%s" % purge_id) return map(lambda x: FastlyPurgeStatus(self, x), content)
Get the status and times of a recently completed purge.
387,165
def iptag_clear(self, iptag, x, y): self._send_scp(x, y, 0, SCPCommands.iptag, int(consts.IPTagCommands.clear) << 16 | iptag)
Clear an IPTag. Parameters ---------- iptag : int Index of the IPTag to clear.
387,166
def mix(self, color1, color2, weight=50, *args): if color1 and color2: if isinstance(weight, string_types): weight = float(weight.strip()) weight = ((weight / 100.0) * 2) - 1 rgb1 = self._hextorgb(color1) rgb2 = self._hextorgb(color2) alpha = 0 w1 = (((weight if weight * alpha == -1 else weight + alpha) / (1 + weight * alpha)) + 1) w1 = w1 / 2.0 w2 = 1 - w1 rgb = [ rgb1[0] * w1 + rgb2[0] * w2, rgb1[1] * w1 + rgb2[1] * w2, rgb1[2] * w1 + rgb2[2] * w2, ] return self._rgbatohex(rgb) raise ValueError()
This algorithm factors in both the user-provided weight and the difference between the alpha values of the two colors to decide how to perform the weighted average of the two RGB values. It works by first normalizing both parameters to be within [-1, 1], where 1 indicates "only use color1", -1 indicates "only use color 0", and all values in between indicated a proportionately weighted average. Once we have the normalized variables w and a, we apply the formula (w + a)/(1 + w*a) to get the combined weight (in [-1, 1]) of color1. This formula has two especially nice properties: * When either w or a are -1 or 1, the combined weight is also that number (cases where w * a == -1 are undefined, and handled as a special case). * When a is 0, the combined weight is w, and vice versa Finally, the weight of color1 is renormalized to be within [0, 1] and the weight of color2 is given by 1 minus the weight of color1. Copyright (c) 2006-2009 Hampton Catlin, Nathan Weizenbaum, and Chris Eppstein http://sass-lang.com args: color1 (str): first color color2 (str): second color weight (int/str): weight raises: ValueError returns: str
387,167
def _create_archive_table(self, table_name): if table_name in self._get_table_names(): raise KeyError(.format(table_name)) try: table = self._resource.create_table( TableName=table_name, KeySchema=[{: , : }], AttributeDefinitions=[ {: , : }], ProvisionedThroughput={ : 123, : 123}) table.meta.client.get_waiter().wait( TableName=table_name) except ValueError: msg = assert table_name in self._get_table_names(), msg
Dynamo implementation of BaseDataManager create_archive_table waiter object is implemented to ensure table creation before moving on this will slow down table creation. However, since we are only creating table once this should no impact users. Parameters ---------- table_name: str Returns ------- None
387,168
def serialize_gen( obj_pyxb, encoding=, pretty=False, strip_prolog=False, xslt_url=None ): assert d1_common.type_conversions.is_pyxb(obj_pyxb) assert encoding in (None, , ) try: obj_dom = obj_pyxb.toDOM() except pyxb.ValidationError as e: raise ValueError( .format(e.details()) ) except pyxb.PyXBException as e: raise ValueError(.format(str(e))) if xslt_url: xslt_processing_instruction = obj_dom.createProcessingInstruction( , .format(xslt_url) ) root = obj_dom.firstChild obj_dom.insertBefore(xslt_processing_instruction, root) if pretty: xml_str = obj_dom.toprettyxml(indent=, encoding=encoding) if encoding is None: xml_str = re.sub(r, r, xml_str, flags=re.MULTILINE) else: xml_str = re.sub(b, b, xml_str, flags=re.MULTILINE) else: xml_str = obj_dom.toxml(encoding) if strip_prolog: if encoding is None: xml_str = re.sub(r, r, xml_str) else: xml_str = re.sub(b, b, xml_str) return xml_str.strip()
Serialize PyXB object to XML. Args: obj_pyxb: PyXB object PyXB object to serialize. encoding: str Encoding to use for XML doc bytes pretty: bool True: Use pretty print formatting for human readability. strip_prolog: True: remove any XML prolog (e.g., ``<?xml version="1.0" encoding="utf-8"?>``), from the resulting XML doc. xslt_url: str If specified, add a processing instruction to the XML doc that specifies the download location for an XSLT stylesheet. Returns: XML document
387,169
def toLily(self): lilystring = "" if not self.autoBeam: lilystring += "\\autoBeamOff" children = self.SortedChildren() if not hasattr(self, "transpose"): self.transpose = None for child in range(len(children)): measureNode = self.GetChild(children[child]) measureNode.autoBeam = self.autoBeam lilystring += " % measure " + str(children[child]) + "\n" lilystring += measureNode.toLily() + "\n\n" return lilystring
Method which converts the object instance, its attributes and children to a string of lilypond code :return: str of lilypond code
387,170
def basename_without_extension(self): ret = self.basename.rsplit(, 1)[0] if ret.endswith(): ret = ret[0:len(ret)-4] return ret
Get the ``os.path.basename`` of the local file, if any, with extension removed.
387,171
def insert_paulis(self, indices=None, paulis=None, pauli_labels=None): if pauli_labels is not None: if paulis is not None: raise QiskitError("Please only provide either `paulis` or `pauli_labels`") if isinstance(pauli_labels, str): pauli_labels = list(pauli_labels) paulis = Pauli.from_label(pauli_labels[::-1]) if indices is None: self._z = np.concatenate((self._z, paulis.z)) self._x = np.concatenate((self._x, paulis.x)) else: if not isinstance(indices, list): indices = [indices] self._z = np.insert(self._z, indices, paulis.z) self._x = np.insert(self._x, indices, paulis.x) return self
Insert or append pauli to the targeted indices. If indices is None, it means append at the end. Args: indices (list[int]): the qubit indices to be inserted paulis (Pauli): the to-be-inserted or appended pauli pauli_labels (list[str]): the to-be-inserted or appended pauli label Note: the indices refers to the localion of original paulis, e.g. if indices = [0, 2], pauli_labels = ['Z', 'I'] and original pauli = 'ZYXI' the pauli will be updated to ZY'I'XI'Z' 'Z' and 'I' are inserted before the qubit at 0 and 2. Returns: Pauli: self Raises: QiskitError: provide both `paulis` and `pauli_labels` at the same time
387,172
def sparse_to_unmasked_sparse(self): return mapping_util.sparse_to_unmasked_sparse_from_mask_and_pixel_centres( total_sparse_pixels=self.total_sparse_pixels, mask=self.regular_grid.mask, unmasked_sparse_grid_pixel_centres=self.unmasked_sparse_grid_pixel_centres).astype()
The 1D index mappings between the masked sparse-grid and unmasked sparse grid.
387,173
def rotate(a, th): return np.sum(a[..., np.newaxis] * R_rot(th), axis=-2)
Return cartesian vectors, after rotation by specified angles about each degree of freedom. Parameters ---------- a: array, shape (n, d) Input d-dimensional cartesian vectors, left unchanged. th: array, shape (n, m) Angles by which to rotate about each m rotational degree of freedom (m=1 in 2 dimensions, m=3 in 3 dimensions). Returns ------- ar: array, shape of a Rotated cartesian vectors.
387,174
def find_converting_reactions(model, pair): first = set(find_met_in_model(model, pair[0])) second = set(find_met_in_model(model, pair[1])) hits = list() for rxn in model.reactions: if len(first & set(rxn.reactants)) > 0 and len( second & set(rxn.products)) > 0: hits.append(rxn) elif len(first & set(rxn.products)) > 0 and len( second & set(rxn.reactants)) > 0: hits.append(rxn) return frozenset(hits)
Find all reactions which convert a given metabolite pair. Parameters ---------- model : cobra.Model The metabolic model under investigation. pair: tuple or list A pair of metabolite identifiers without compartment suffix. Returns ------- frozenset The set of reactions that have one of the pair on their left-hand side and the other on the right-hand side.
387,175
def combine_tax_scales(node): combined_tax_scales = None for child_name in node: child = node[child_name] if not isinstance(child, AbstractTaxScale): log.info(.format(child_name, child)) continue if combined_tax_scales is None: combined_tax_scales = MarginalRateTaxScale(name = child_name) combined_tax_scales.add_bracket(0, 0) combined_tax_scales.add_tax_scale(child) return combined_tax_scales
Combine all the MarginalRateTaxScales in the node into a single MarginalRateTaxScale.
387,176
def _distort_color(image, color_ordering=0, scope=None): with tf.name_scope(scope, "distort_color", [image]): if color_ordering == 0: image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) elif color_ordering == 1: image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) elif color_ordering == 2: image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_brightness(image, max_delta=32. / 255.) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) elif color_ordering == 3: image = tf.image.random_hue(image, max_delta=0.2) image = tf.image.random_saturation(image, lower=0.5, upper=1.5) image = tf.image.random_contrast(image, lower=0.5, upper=1.5) image = tf.image.random_brightness(image, max_delta=32. / 255.) else: raise ValueError("color_ordering must be in [0, 3]") return tf.clip_by_value(image, 0.0, 1.0)
Distort the color of a Tensor image. Each color distortion is non-commutative and thus ordering of the color ops matters. Ideally we would randomly permute the ordering of the color ops. Rather then adding that level of complication, we select a distinct ordering of color ops for each preprocessing thread. Args: image: 3-D Tensor containing single image in [0, 1]. color_ordering: Python int, a type of distortion (valid values: 0-3). scope: Optional scope for name_scope. Returns: 3-D Tensor color-distorted image on range [0, 1] Raises: ValueError: if color_ordering not in [0, 3]
387,177
def from_text(cls, text, mapping=): graphemes = Counter(grapheme_pattern.findall(text)) specs = [ OrderedDict([ (cls.GRAPHEME_COL, grapheme), (, frequency), (mapping, grapheme)]) for grapheme, frequency in graphemes.most_common()] return cls(*specs)
Create a Profile instance from the Unicode graphemes found in `text`. Parameters ---------- text mapping Returns ------- A Profile instance.
387,178
def index_agreement(s, o): ia = 1 - (np.sum((o-s)**2)) /\ (np.sum((np.abs(s-np.mean(o))+np.abs(o-np.mean(o)))**2)) return ia
index of agreement input: s: simulated o: observed output: ia: index of agreement
387,179
def random(cls, num_qubits, seed=None): if seed is not None: np.random.seed(seed) z = np.random.randint(2, size=num_qubits).astype(np.bool) x = np.random.randint(2, size=num_qubits).astype(np.bool) return cls(z, x)
Return a random Pauli on number of qubits. Args: num_qubits (int): the number of qubits seed (int): Optional. To set a random seed. Returns: Pauli: the random pauli
387,180
def _get_directives_and_roles_from_sphinx(): if SPHINX_INSTALLED: sphinx_directives = list(sphinx.domains.std.StandardDomain.directives) sphinx_roles = list(sphinx.domains.std.StandardDomain.roles) for domain in [sphinx.domains.c.CDomain, sphinx.domains.cpp.CPPDomain, sphinx.domains.javascript.JavaScriptDomain, sphinx.domains.python.PythonDomain]: sphinx_directives += list(domain.directives) + [ .format(domain.name, item) for item in list(domain.directives)] sphinx_roles += list(domain.roles) + [ .format(domain.name, item) for item in list(domain.roles)] else: sphinx_roles = [ , , , , , , , , , , , , , , , , , , , , , , , ] sphinx_directives = [ , , , , , , , , , , , , , , , , , , , , ] return (sphinx_directives, sphinx_roles)
Return a tuple of Sphinx directive and roles.
387,181
def download_image(self, img_url): img_request = None try: img_request = requests.request( , img_url, stream=True, proxies=self.proxies) if img_request.status_code != 200: raise ImageDownloadError(img_request.status_code) except: raise ImageDownloadError() if img_url[-3:] == "svg" or (int(img_request.headers[]) > self.min_filesize and\ int(img_request.headers[]) < self.max_filesize): img_content = img_request.content with open(os.path.join(self.download_path, img_url.split()[-1]), ) as f: byte_image = bytes(img_content) f.write(byte_image) else: raise ImageSizeError(img_request.headers[]) return True
Downloads a single image. Downloads img_url using self.page_url as base. Also, raises the appropriate exception if required.
387,182
def load_gffutils_db(f): import gffutils db = gffutils.FeatureDB(f, keep_order=True) return db
Load database for gffutils. Parameters ---------- f : str Path to database. Returns ------- db : gffutils.FeatureDB gffutils feature database.
387,183
def get_compatible_generator_action(self, filename): for action in self.__generator_actions: if action.act_on_file(filename): return action return None
Return the **first** compatible :class:`GeneratorAction` for a given filename or ``None`` if none is found. Args: filename (str): The filename of the template to process.
387,184
def rotate_content(day=None): for main in Main.objects.all(): site = main.sites_rooted_here.all().first() main_lang = Languages.for_site(site).languages.filter( is_main_language=True).first() index = SectionIndexPage.objects.live().child_of(main).first() site_settings = SiteSettings.for_site(site) if day is None: day = timezone.now().weekday() if main and index: rotate_latest(main_lang, index, main, site_settings, day) rotate_featured_in_homepage(main_lang, day, main)
this method gets the parameters that are needed for rotate_latest and rotate_featured_in_homepage methods, and calls them both
387,185
def read_from_LSQ(self, LSQ_file): cont = self.user_warning( "LSQ import only works if all measurements are present and not averaged during import from magnetometer files to magic format. Do you wish to continue reading interpretations?") if not cont: return self.clear_interpretations( message=) old_s = self.s for specimen in self.specimens: self.select_specimen(specimen) for i in range(len(self.Data[specimen][])): self.mark_meas_good(i) self.select_specimen(old_s) print("Reading LSQ file") interps = read_LSQ(LSQ_file) for interp in interps: specimen = interp[] if specimen not in self.specimens: print( ("specimen %s has no registered measurement data, skipping interpretation import" % specimen)) continue PCA_type = interp[].split()[0] tmin = self.Data[specimen][][interp[]] tmax = self.Data[specimen][][interp[]] if in list(interp.keys()): name = interp[] else: name = None new_fit = self.add_fit(specimen, name, tmin, tmax, PCA_type) if in list(interp.keys()): old_s = self.s self.select_specimen(specimen) for bmi in interp["bad_measurement_index"]: try: self.mark_meas_bad(bmi) except IndexError: print( "Magic Measurments length does not match that recorded in LSQ file") self.select_specimen(old_s) if self.ie_open: self.ie.update_editor() self.update_selection()
Clears all current interpretations and replaces them with interpretations read from LSQ file. Parameters ---------- LSQ_file : path to LSQ file to read in
387,186
def concatenate_not_none(l, axis=0): mask = [] for i in range(len(l)): if l[i] is not None: mask.append(i) l_stacked = np.concatenate([l[i] for i in mask], axis=axis) return l_stacked
Construct a numpy array by stacking not-None arrays in a list Parameters ---------- data : list of arrays The list of arrays to be concatenated. Arrays have same shape in all but one dimension or are None, in which case they are ignored. axis : int, default = 0 Axis for the concatenation Returns ------- data_stacked : array The resulting concatenated array.
387,187
def get_gtf_db(gtf, in_memory=False): db_file = gtf + if gtf.endswith(): db_file = gtf[:-3] + if file_exists(db_file): return gffutils.FeatureDB(db_file) db_file = if in_memory else db_file if in_memory or not file_exists(db_file): debug() infer_extent = guess_infer_extent(gtf) db = gffutils.create_db(gtf, dbfn=db_file, infer_gene_extent=infer_extent) return db else: return gffutils.FeatureDB(db_file)
create a gffutils DB
387,188
def remove(self, flag, extra): self.flag = flag self.extra = extra self.dep_path = self.meta.log_path + "dep/" dependencies, rmv_list = [], [] self.removed = self._view_removed() if not self.removed: print("") else: msg = "package" if len(self.removed) > 1: msg = msg + "s" try: if self.meta.default_answer in ["y", "Y"]: remove_pkg = self.meta.default_answer else: remove_pkg = raw_input( "\nAre you sure to remove {0} {1} [y/N]? ".format( str(len(self.removed)), msg)) except EOFError: print("") raise SystemExit() if remove_pkg in ["y", "Y"]: self._check_if_used(self.binary) for rmv in self.removed: if (os.path.isfile(self.dep_path + rmv) and self.meta.del_deps in ["on", "ON"] or os.path.isfile(self.dep_path + rmv) and "--deps" in self.extra): dependencies = self._view_deps(self.dep_path, rmv) if dependencies and self._rmv_deps_answer() in ["y", "Y"]: rmv_list += self._rmv_deps(dependencies, rmv) else: rmv_list += self._rmv_pkg(rmv) else: rmv_list += self._rmv_pkg(rmv) self._reference_rmvs(rmv_list)
Remove Slackware binary packages
387,189
def get_uuid(type=4): import uuid name = +str(type) u = getattr(uuid, name) return u().hex
Get uuid value
387,190
def encode(self, uuid, pad_length=22): return self._num_to_string(uuid.int, pad_to_length=pad_length)
Encodes a UUID into a string (LSB first) according to the alphabet If leftmost (MSB) bits 0, string might be shorter
387,191
def create_configuration(self, node, ports): target_raid_config = node.get(, {}).copy() return hpssa_manager.create_configuration( raid_config=target_raid_config)
Create RAID configuration on the bare metal. This method creates the desired RAID configuration as read from node['target_raid_config']. :param node: A dictionary of the node object :param ports: A list of dictionaries containing information of ports for the node :returns: The current RAID configuration of the below format. raid_config = { 'logical_disks': [{ 'size_gb': 100, 'raid_level': 1, 'physical_disks': [ '5I:0:1', '5I:0:2'], 'controller': 'Smart array controller' }, ] }
387,192
def get_energies(atoms_list): if len(atoms_list) == 1: return atoms_list[0].get_potential_energy() elif len(atoms_list) > 1: energies = [] for atoms in atoms_list: energies.append(atoms.get_potential_energy()) return energies
Potential energy for a list of atoms objects
387,193
def get_thin_rect_vertices(ox, oy, dx, dy, r): if ox < dx: leftx = ox rightx = dx xco = 1 elif ox > dx: leftx = ox * -1 rightx = dx * -1 xco = -1 else: return [ ox - r, oy, ox + r, oy, ox + r, dy, ox - r, dy ] if oy < dy: boty = oy topy = dy yco = 1 elif oy > dy: boty = oy * -1 topy = dy * -1 yco = -1 else: return [ ox, oy - r, dx, oy - r, dx, oy + r, ox, oy + r ] rise = topy - boty run = rightx - leftx theta = atan(rise/run) theta_prime = ninety - theta xoff = cos(theta_prime) * r yoff = sin(theta_prime) * r x1 = leftx + xoff y1 = boty - yoff x2 = rightx + xoff y2 = topy - yoff x3 = rightx - xoff y3 = topy + yoff x4 = leftx - xoff y4 = boty + yoff return [ x1 * xco, y1 * yco, x2 * xco, y2 * yco, x3 * xco, y3 * yco, x4 * xco, y4 * yco ]
Given the starting point, ending point, and width, return a list of vertex coordinates at the corners of the line segment (really a thin rectangle).
387,194
def get_arguments(self): args = loads(self.grid_arguments)[] if isinstance(self.grid_arguments, bytes) else loads(self.grid_arguments.encode())[] retval = {} if in args: retval[] = args[] if in args and args[] is not None: retval[] = args[] if in args and args[] is not None: retval[] = args[] if in args and args[] is not None: retval[] = args[] if in args and len(args[]) > 0: retval[] = args[] if in args and args[]: retval[] = True if self.queue_name is not None: retval[] = str(self.queue_name) return retval
Returns the additional options for the grid (such as the queue, memory requirements, ...).
387,195
def set_common_fields(self, warc_type: str, content_type: str): self.fields[self.WARC_TYPE] = warc_type self.fields[self.CONTENT_TYPE] = content_type self.fields[self.WARC_DATE] = wpull.util.datetime_str() self.fields[self.WARC_RECORD_ID] = .format(uuid.uuid4().urn)
Set the required fields for the record.
387,196
def load_modes(node): if isinstance(node, list): values = [load_mode(child) for child in node] keys = [mode.key for mode in values] return dict(zip(keys,values)) elif isinstance(node, dict): values = {key: load_mode(child) for key, child in node} return values else: raise NotImplementedError
Load all observing modes
387,197
def load_modules(self): if self.INTERFACES_MODULE is None: raise NotImplementedError("A module containing interfaces modules " "should be setup in INTERFACES_MODULE !") else: for module, permission in self.modules.items(): i = getattr(self.INTERFACES_MODULE, module).Interface(self, permission) self.interfaces[module] = i
Should instance interfaces and set them to interface, following `modules`
387,198
def update_asset(self, asset_form=None): if asset_form is None: raise NullArgument() if not isinstance(asset_form, abc_repository_objects.AssetForm): raise InvalidArgument() if not asset_form.is_for_update(): raise InvalidArgument() try: if self._forms[asset_form.get_id().get_identifier()] == UPDATED: raise IllegalState() except KeyError: raise Unsupported() if not asset_form.is_valid(): raise InvalidArgument() url_path = construct_url(, bank_id=self._catalog_idstr) try: result = self._put_request(url_path, asset_form._my_map) except Exception: raise self._forms[asset_form.get_id().get_identifier()] = UPDATED return objects.Asset(result)
Updates an existing asset. :param asset_form: the form containing the elements to be updated :type asset_form: ``osid.repository.AssetForm`` :raise: ``IllegalState`` -- ``asset_form`` already used in anupdate transaction :raise: ``InvalidArgument`` -- the form contains an invalid value :raise: ``NullArgument`` -- ``asset_form`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``PermissionDenied`` -- authorization failure :raise: ``Unsupported`` -- ``asset_form`` did not originate from ``get_asset_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
387,199
def downsample(self, factor): if int(factor) != factor or factor < 1: raise ValueError("Argument `factor` must be a positive integer greater than or equal to 1. Got: <{}>({})", type(factor), factor) paths = self.interjoint_paths() for i, path in enumerate(paths): paths[i] = np.concatenate( (path[0::factor, :], path[-1:, :]) ) ds_skel = PrecomputedSkeleton.simple_merge( [ PrecomputedSkeleton.from_path(path) for path in paths ] ).consolidate() ds_skel.id = self.id index = {} for i, vert in enumerate(self.vertices): vert = tuple(vert) index[vert] = i for i, vert in enumerate(ds_skel.vertices): vert = tuple(vert) ds_skel.radii[i] = self.radii[index[vert]] ds_skel.vertex_types[i] = self.vertex_types[index[vert]] return ds_skel
Compute a downsampled version of the skeleton by striding while preserving endpoints. factor: stride length for downsampling the saved skeleton paths. Returns: downsampled PrecomputedSkeleton