Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
3,500
def obj_res(data, fail_on=[, , ]): errors = [] if not data.get(, None) and in fail_on: errors += [] obj = None obj_type = None for fd in ACTOR_FIELDS: if data.get(fd, False): if not obj: obj = data[fd] obj_type = fd else: errors += [ ] break if not obj and in fail_on: errors += [ ] res = None res_type = None for fd in RESOURCE_FIELDS: if data.get(fd, False): if not res: res = data[fd] res_type = fd if res_type == : res_type = else: errors += [ ] break if not res and in fail_on: errors += [ ] if errors: raise exc.UsageError("\n".join(errors)) return obj, obj_type, res, res_type
Given some CLI input data, Returns the following and their types: obj - the role grantee res - the resource that the role applies to
3,501
def _sentence_context(match, language=, case_insensitive=True): language_punct = {: r, : r} assert language in language_punct.keys(), \ .format(language_punct.keys()) start = match.start() end = match.end() window = 1000 snippet_left = match.string[start - window:start + 1] snippet_right = match.string[end:end + window] re_match = match.string[match.start():match.end()] comp_sent_boundary = regex.compile(language_punct[language], flags=regex.VERSION1) left_punct = [] for punct in comp_sent_boundary.finditer(snippet_left): end = punct.end() left_punct.append(end) try: last_period = left_punct.pop() + 1 except IndexError: last_period = 0 right_punct = [] for punct in comp_sent_boundary.finditer(snippet_right): end = punct.end() right_punct.append(end) try: first_period = right_punct.pop(0) except IndexError: first_period = 0 sentence = snippet_left[last_period:-1] + + re_match + + snippet_right[0:first_period] return sentence
Take one incoming regex match object and return the sentence in which the match occurs. :rtype : str :param match: regex.match :param language: str
3,502
def _graphite_url(self, query, raw_data=False, graphite_url=None): query = escape.url_escape(query) graphite_url = graphite_url or self.reactor.options.get() url = "{base}/render/?target={query}&from=-{from_time}&until=-{until}".format( base=graphite_url, query=query, from_time=self.from_time.as_graphite(), until=self.until.as_graphite(), ) if raw_data: url = "{}&format=raw".format(url) return url
Build Graphite URL.
3,503
def create_graph_rules(address_mapper): @rule(AddressMapper, []) def address_mapper_singleton(): return address_mapper return [ address_mapper_singleton, hydrate_struct, parse_address_family, addresses_from_address_families, RootRule(Address), RootRule(BuildFileAddress), RootRule(BuildFileAddresses), RootRule(Specs), ]
Creates tasks used to parse Structs from BUILD files. :param address_mapper_key: The subject key for an AddressMapper instance. :param symbol_table: A SymbolTable instance to provide symbols for Address lookups.
3,504
def _repack_options(options): return dict( [ (six.text_type(x), _normalize(y)) for x, y in six.iteritems(salt.utils.data.repack_dictlist(options)) ] )
Repack the options data
3,505
def calculate_leapdays(init_date, final_date): leap_days = (final_date.year - 1) // 4 - (init_date.year - 1) // 4 leap_days -= (final_date.year - 1) // 100 - (init_date.year - 1) // 100 leap_days += (final_date.year - 1) // 400 - (init_date.year - 1) // 400 return datetime.timedelta(days=leap_days)
Currently unsupported, it only works for differences in years.
3,506
def get_auth_header(self, user_payload): auth_token = self.get_auth_token(user_payload) return .format( auth_header_prefix=self.auth_header_prefix, auth_token=auth_token )
Returns the value for authorization header Args: user_payload(dict, required): A `dict` containing required information to create authentication token
3,507
def to_cloudformation(self): rest_api = self._construct_rest_api() deployment = self._construct_deployment(rest_api) swagger = None if rest_api.Body is not None: swagger = rest_api.Body elif rest_api.BodyS3Location is not None: swagger = rest_api.BodyS3Location stage = self._construct_stage(deployment, swagger) permissions = self._construct_authorizer_lambda_permission() return rest_api, deployment, stage, permissions
Generates CloudFormation resources from a SAM API resource :returns: a tuple containing the RestApi, Deployment, and Stage for an empty Api. :rtype: tuple
3,508
def filterMapNames(regexText, records=getIndex(), excludeRegex=False, closestMatch=True): bestScr = 99999 regex = re.compile(regexText, flags=re.IGNORECASE) ret = [] if excludeRegex: if regexText and closestMatch: for m in list(records): if re.search(regex, m.name): continue score = len(m.name) if score == bestScr: bestScr = score ret.append(m) elif score < bestScr: bestScr = score ret = [m] else: for m in list(records): if re.search(regex, m.name): continue ret.append(m) else: if regexText and closestMatch: for m in records: if not re.search(regex, m.name): continue score = len(m.name) if score == bestScr: bestScr = score ret.append(m) elif score < bestScr: bestScr = score ret = [m] else: for m in records: if not re.search(regex, m.name): continue ret.append(m) return ret
matches each record against regexText according to parameters NOTE: the code could be written more simply, but this is loop-optimized to scale better with a large number of map records
3,509
def parse(cls, fptr, offset, length): nbytes = offset + length - fptr.tell() fptr.read(nbytes) return cls(length=length, offset=offset)
Parse JPX free box. Parameters ---------- f : file Open file object. offset : int Start position of box in bytes. length : int Length of the box in bytes. Returns ------- FreeBox Instance of the current free box.
3,510
def config(config, fork_name="", origin_name=""): state = read(config.configfile) any_set = False if fork_name: update(config.configfile, {"FORK_NAME": fork_name}) success_out("fork-name set to: {}".format(fork_name)) any_set = True if origin_name: update(config.configfile, {"ORIGIN_NAME": origin_name}) success_out("origin-name set to: {}".format(origin_name)) any_set = True if not any_set: info_out("Fork-name: {}".format(state["FORK_NAME"]))
Setting various configuration options
3,511
def cmd(command, *args, **kwargs): *show ver**$5$lkjsdfoi$blahblahblah proxy_prefix = __opts__[][] proxy_cmd = .join([proxy_prefix, command]) if proxy_cmd not in __proxy__: return False for k in list(kwargs): if k.startswith(): kwargs.pop(k) return __proxy__[proxy_cmd](*args, **kwargs)
run commands from __proxy__ :mod:`salt.proxy.onyx<salt.proxy.onyx>` command function from `salt.proxy.onyx` to run args positional args to pass to `command` function kwargs key word arguments to pass to `command` function .. code-block:: bash salt '*' onyx.cmd sendline 'show ver' salt '*' onyx.cmd show_run salt '*' onyx.cmd check_password username=admin password='$5$lkjsdfoi$blahblahblah' encrypted=True
3,512
def _read_set(ctx: ReaderContext) -> lset.Set: start = ctx.reader.advance() assert start == "{" def set_if_valid(s: Collection) -> lset.Set: if len(s) != len(set(s)): raise SyntaxError("Duplicated values in set") return lset.set(s) return _read_coll(ctx, set_if_valid, "}", "set")
Return a set from the input stream.
3,513
def mkstemp(suffix="", prefix=template, dir=None, text=False): if dir is None: dir = gettempdir() if text: flags = _text_openflags else: flags = _bin_openflags return _mkstemp_inner(dir, prefix, suffix, flags)
User-callable function to create and return a unique temporary file. The return value is a pair (fd, name) where fd is the file descriptor returned by os.open, and name is the filename. If 'suffix' is specified, the file name will end with that suffix, otherwise there will be no suffix. If 'prefix' is specified, the file name will begin with that prefix, otherwise a default prefix is used. If 'dir' is specified, the file will be created in that directory, otherwise a default directory is used. If 'text' is specified and true, the file is opened in text mode. Else (the default) the file is opened in binary mode. On some operating systems, this makes no difference. The file is readable and writable only by the creating user ID. If the operating system uses permission bits to indicate whether a file is executable, the file is executable by no one. The file descriptor is not inherited by children of this process. Caller is responsible for deleting the file when done with it.
3,514
def username_user_password(self, **kwargs): config = ET.Element("config") username = ET.SubElement(config, "username", xmlns="urn:brocade.com:mgmt:brocade-aaa") name_key = ET.SubElement(username, "name") name_key.text = kwargs.pop() user_password = ET.SubElement(username, "user-password") user_password.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
3,515
def get_properties(self, feature): if not isinstance(feature, b2.build.feature.Feature): feature = b2.build.feature.get(feature) assert isinstance(feature, b2.build.feature.Feature) result = [] for p in self.all_: if p.feature == feature: result.append(p) return result
Returns all contained properties associated with 'feature
3,516
def hflip(img): if not _is_pil_image(img): raise TypeError(.format(type(img))) return img.transpose(Image.FLIP_LEFT_RIGHT)
Horizontally flip the given PIL Image. Args: img (PIL Image): Image to be flipped. Returns: PIL Image: Horizontall flipped image.
3,517
def table_add(tab, data, col): x = [] for i in range(len(data)): if col not in data[i]: temp = else: temp = data[i][col] if temp is None: temp = x.append(temp) print(.format(col)) tab.add_column(Column(x, name=col))
Function to parse dictionary list **data** and add the data to table **tab** for column **col** Parameters ---------- tab: Table class Table to store values data: list Dictionary list from the SQL query col: str Column name (ie, dictionary key) for the column to add
3,518
def lintersects(self, span): if isinstance(span, list): return [sp for sp in span if self._lintersects(sp)] return self._lintersects(span)
If this span intersects the left (starting) side of the given span.
3,519
def reordi(iorder, ndim, array): iorder = stypes.toIntVector(iorder) ndim = ctypes.c_int(ndim) array = stypes.toIntVector(array) libspice.reordi_c(iorder, ndim, array) return stypes.cVectorToPython(array)
Re-order the elements of an integer array according to a given order vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/reordi_c.html :param iorder: Order vector to be used to re-order array. :type iorder: Array of ints :param ndim: Dimension of array. :type ndim: int :param array: Array to be re-ordered. :type array: Array of ints :return: Re-ordered Array. :rtype: Array of ints
3,520
def unpublish(scm, published_branch, verbose, fake): scm.fake = fake scm.verbose = fake or verbose scm.repo_check(require_remote=True) branch = scm.fuzzy_match_branch(published_branch) if not branch: scm.display_available_branches() raise click.BadArgumentUsage() branch_names = scm.get_branch_names(local=False) if branch not in branch_names: raise click.BadArgumentUsage( "Branch {0} is not published. Use a branch that is published." .format(crayons.yellow(branch))) status_log(scm.unpublish_branch, .format( crayons.yellow(branch)), branch)
Removes a published branch from the remote repository.
3,521
def share(track_id=None, url=None, users=None): client = get_client() if url: track_id = client.get(, url=url).id if not users: return client.get( % track_id) permissions = {: []} for username in users: user = settings.users.get(username, None) if user: permissions[].append(user[]) else: user = client.get(, url= % username) permissions[].append(user.id) settings.users[username] = user.obj settings.save() return client.put( % track_id, permissions=permissions)
Returns list of users track has been shared with. Either track or url need to be provided.
3,522
def delete_host(zone, name, nameserver=, timeout=5, port=53, **kwargs): fqdn = .format(name, zone) request = dns.message.make_query(fqdn, ) answer = dns.query.udp(request, nameserver, timeout, port) try: ips = [i.address for i in answer.answer[0].items] except IndexError: ips = [] res = delete(zone, name, nameserver=nameserver, timeout=timeout, port=port, **kwargs) fqdn = fqdn + for ip in ips: parts = ip.split()[::-1] popped = [] while len(parts) > 1: p = parts.pop(0) popped.append(p) zone = .format(.join(parts), ) name = .join(popped) ptr = delete(zone, name, , fqdn, nameserver=nameserver, timeout=timeout, port=port, **kwargs) if ptr: res = True return res
Delete the forward and reverse records for a host. Returns true if any records are deleted. CLI Example: .. code-block:: bash salt ns1 ddns.delete_host example.com host1
3,523
def add_multiifo_input_list_opt(self, opt, inputs): self.add_raw_arg(opt) self.add_raw_arg() for infile in inputs: self.add_raw_arg(infile.ifo) self.add_raw_arg() self.add_raw_arg(infile.name) self.add_raw_arg() self._add_input(infile)
Add an option that determines a list of inputs from multiple detectors. Files will be supplied as --opt ifo1:input1 ifo2:input2 .....
3,524
def watch_from_file(connection, file_name): with open(file_name, ) as filehandle: for line in filehandle.xreadlines(): volume, interval, retention = line.rstrip().split() watch( connection, get_volume_id(connection, volume), interval, retention)
Start watching a new volume :type connection: boto.ec2.connection.EC2Connection :param connection: EC2 connection object :type file_name: str :param file_name: path to config file :returns: None
3,525
def plot_precision_recall_curve(y_true, y_probas, title=, curves=(, ), ax=None, figsize=None, cmap=, title_fontsize="large", text_fontsize="medium"): y_true = np.array(y_true) y_probas = np.array(y_probas) classes = np.unique(y_true) probas = y_probas if not in curves and not in curves: raise ValueError( ) precision = dict() recall = dict() average_precision = dict() for i in range(len(classes)): precision[i], recall[i], _ = precision_recall_curve( y_true, probas[:, i], pos_label=classes[i]) y_true = label_binarize(y_true, classes=classes) if len(classes) == 2: y_true = np.hstack((1 - y_true, y_true)) for i in range(len(classes)): average_precision[i] = average_precision_score(y_true[:, i], probas[:, i]) micro_key = i = 0 while micro_key in precision: i += 1 micro_key += str(i) precision[micro_key], recall[micro_key], _ = precision_recall_curve( y_true.ravel(), probas.ravel()) average_precision[micro_key] = average_precision_score(y_true, probas, average=) if ax is None: fig, ax = plt.subplots(1, 1, figsize=figsize) ax.set_title(title, fontsize=title_fontsize) if in curves: for i in range(len(classes)): color = plt.cm.get_cmap(cmap)(float(i) / len(classes)) ax.plot(recall[i], precision[i], lw=2, label= .format(classes[i], average_precision[i]), color=color) if in curves: ax.plot(recall[micro_key], precision[micro_key], label= .format(average_precision[micro_key]), color=, linestyle=, linewidth=4) ax.set_xlim([0.0, 1.0]) ax.set_ylim([0.0, 1.05]) ax.set_xlabel() ax.set_ylabel() ax.tick_params(labelsize=text_fontsize) ax.legend(loc=, fontsize=text_fontsize) return ax
Generates the Precision Recall Curve from labels and probabilities Args: y_true (array-like, shape (n_samples)): Ground truth (correct) target values. y_probas (array-like, shape (n_samples, n_classes)): Prediction probabilities for each class returned by a classifier. curves (array-like): A listing of which curves should be plotted on the resulting plot. Defaults to `("micro", "each_class")` i.e. "micro" for micro-averaged curve ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to plot the curve. If None, the plot is drawn on a new set of axes. figsize (2-tuple, optional): Tuple denoting figure size of the plot e.g. (6, 6). Defaults to ``None``. cmap (string or :class:`matplotlib.colors.Colormap` instance, optional): Colormap used for plotting the projection. View Matplotlib Colormap documentation for available options. https://matplotlib.org/users/colormaps.html title_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "large". text_fontsize (string or int, optional): Matplotlib-style fontsizes. Use e.g. "small", "medium", "large" or integer-values. Defaults to "medium". Returns: ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was drawn. Example: >>> import scikitplot.plotters as skplt >>> nb = GaussianNB() >>> nb = nb.fit(X_train, y_train) >>> y_probas = nb.predict_proba(X_test) >>> skplt.plot_precision_recall_curve(y_test, y_probas) <matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490> >>> plt.show() .. image:: _static/examples/plot_precision_recall_curve.png :align: center :alt: Precision Recall Curve
3,526
def assign(self, value, termenc): if self._termenc != termenc: self._decoder = codecs.getincrementaldecoder(termenc)(errors=) self._termenc = termenc self._data = self._decoder.decode(value)
>>> scanner = DefaultScanner() >>> scanner.assign("01234", "ascii") >>> scanner._data u'01234'
3,527
def active_tcp(): * if __grains__[] == : return salt.utils.network.active_tcp() elif __grains__[] == : ret = {} for connection in _netstat_sunos(): if not connection[].startswith(): continue if connection[] != : continue ret[len(ret)+1] = { : .join(connection[].split()[:-1]), : .join(connection[].split()[-1:]), : .join(connection[].split()[:-1]), : .join(connection[].split()[-1:]) } return ret elif __grains__[] == : ret = {} for connection in _netstat_aix(): if not connection[].startswith(): continue if connection[] != : continue ret[len(ret)+1] = { : .join(connection[].split()[:-1]), : .join(connection[].split()[-1:]), : .join(connection[].split()[:-1]), : .join(connection[].split()[-1:]) } return ret else: return {}
Return a dict containing information on all of the running TCP connections (currently linux and solaris only) .. versionchanged:: 2015.8.4 Added support for SunOS CLI Example: .. code-block:: bash salt '*' network.active_tcp
3,528
def to_bool(s): if isinstance(s, bool): return s elif s.lower() in [, ]: return True elif s.lower() in [, ]: return False else: raise ValueError("Can%s' to bool" % (s))
Convert string `s` into a boolean. `s` can be 'true', 'True', 1, 'false', 'False', 0. Examples: >>> to_bool("true") True >>> to_bool("0") False >>> to_bool(True) True
3,529
def _update(self, layer=None): meta = getattr(self, ModelBase._meta_attr) if not layer: layers = self.layers else: layers = _listify(layer) for layer in layers: path = os.path.abspath(os.path.join(meta.modelpath, layer)) getattr(self, layer).load(path)
Update layers in model.
3,530
def cmd_set(context): if context.value in EFConfig.SPECIAL_VERSIONS and context.env_short not in EFConfig.SPECIAL_VERSION_ENVS: fail("special version: {} not allowed in env: {}".format(context.value, context.env_short)) if context.value in EFConfig.SPECIAL_VERSIONS and context.stable: fail("special versions such as: {} cannot be marked ".format(context.value)) if context.value == "=prod": context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "prod", context.service_name)) elif context.value == "=staging": context.value = context.versionresolver.lookup("{},{}/{}".format(context.key, "staging", context.service_name)) elif context.value == "=latest": if not EFConfig.VERSION_KEYS[context.key]["allow_latest"]: fail("=latest cannot be used with key: {}".format(context.key)) func_name = "_getlatest_" + context.key.replace("-", "_") if func_name in globals() and isfunction(globals()[func_name]): context.value = globals()[func_name](context) else: raise RuntimeError("{} version for {}/{} is but cancurrentt do it context.limit = 1 current_version = get_versions(context) ) print("set key: {} with value: {} {} {} {} {}".format( s3_key, context.value, context.build_number, context.commit_hash, context.location, s3_version_status))
Set the new "current" value for a key. If the existing current version and the new version have identical /value/ and /status, then nothing is written, to avoid stacking up redundant entreis in the version table. Args: context: a populated EFVersionContext object
3,531
def get_offset_range(self, row_offset, column_offset): return self._get_range(, rowOffset=row_offset, columnOffset=column_offset)
Gets an object which represents a range that's offset from the specified range. The dimension of the returned range will match this range. If the resulting range is forced outside the bounds of the worksheet grid, an exception will be thrown. :param int row_offset: The number of rows (positive, negative, or 0) by which the range is to be offset. :param int column_offset: he number of columns (positive, negative, or 0) by which the range is to be offset. :return: Range
3,532
def Async(f, n=None, timeout=None): return threads(n=n, timeout=timeout)(f)
Concise usage for pool.submit. Basic Usage Asnyc & threads :: from torequests.main import Async, threads import time def use_submit(i): time.sleep(i) result = 'use_submit: %s' % i print(result) return result @threads() def use_decorator(i): time.sleep(i) result = 'use_decorator: %s' % i print(result) return result new_use_submit = Async(use_submit) tasks = [new_use_submit(i) for i in (2, 1, 0) ] + [use_decorator(i) for i in (2, 1, 0)] print([type(i) for i in tasks]) results = [i.x for i in tasks] print(results) # use_submit: 0 # use_decorator: 0 # [<class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>, <class 'torequests.main.NewFuture'>] # use_submit: 1 # use_decorator: 1 # use_submit: 2 # use_decorator: 2 # ['use_submit: 2', 'use_submit: 1', 'use_submit: 0', 'use_decorator: 2', 'use_decorator: 1', 'use_decorator: 0']
3,533
def bus_inspector(self, bus, message): if message.get_interface() != self.screensaver_uri: return True member = message.get_member() if member in ("SessionIdleChanged", "ActiveChanged"): logger.debug("%s -> %s" % (member, message.get_args_list())) idle_state = message.get_args_list()[0] if idle_state: self.idle_from = dt.datetime.now() if member == "SessionIdleChanged": delay_key = "/apps/gnome-screensaver/idle_delay" else: delay_key = "/desktop/gnome/session/idle_delay" client = gconf.Client.get_default() self.timeout_minutes = client.get_int(delay_key) else: self.screen_locked = False self.idle_from = None if member == "ActiveChanged": def dispatch_active_changed(idle_state): if not self.idle_was_there: self.emit(, idle_state) self.idle_was_there = False gobject.timeout_add_seconds(1, dispatch_active_changed, idle_state) else: self.idle_was_there = True self.emit(, idle_state) elif member == "Lock": logger.debug("Screen Lock Requested") self.screen_locked = True return
Inspect the bus for screensaver messages of interest
3,534
def _session(self): if self._http_session is None: self._http_session = requests.Session() self._http_session.headers.update(self._get_headers()) self._http_session.verify = self._verify_https_request() if all(self._credentials): username, password = self._credentials self._http_session.auth = requests_ntlm.HttpNtlmAuth( username=username, password=password) return self._http_session
The current session used by the client. The Session object allows you to persist certain parameters across requests. It also persists cookies across all requests made from the Session instance, and will use urllib3's connection pooling. So if you're making several requests to the same host, the underlying TCP connection will be reused, which can result in a significant performance increase.
3,535
def tag_remove(self, *tags): return View({**self.spec, : list(set(self.tags) - set(tags))})
Return a view with the specified tags removed
3,536
def google_poem(self, message, topic): r = requests.get("http://www.google.com/complete/search?output=toolbar&q=" + topic + "%20") xmldoc = minidom.parseString(r.text) item_list = xmldoc.getElementsByTagName("suggestion") context = {"topic": topic, "lines": [x.attributes["data"].value for x in item_list[:4]]} self.say(rendered_template("gpoem.html", context), message, html=True)
make a poem about __: show a google poem about __
3,537
def find_module(self, fullname, path=None): if fullname.startswith() and hasattr( maps, fullname.split()[2]): return self return None
Tell if the module to load can be loaded by the load_module function, ie: if it is a ``pygal.maps.*`` module.
3,538
def savefig(writekey, dpi=None, ext=None): if dpi is None: if not isinstance(rcParams[], str) and rcParams[] < 150: if settings._low_resolution_warning: logg.warn( savefig.dpi\) settings._low_resolution_warning = False else: dpi = rcParams[] if not os.path.exists(settings.figdir): os.makedirs(settings.figdir) if settings.figdir[-1] != : settings.figdir += if ext is None: ext = settings.file_format_figs filename = settings.figdir + writekey + settings.plot_suffix + + ext
Save current figure to file. The `filename` is generated as follows: filename = settings.figdir + writekey + settings.plot_suffix + '.' + settings.file_format_figs
3,539
def set_mode(self, mode): keys = if mode == : pass elif mode == : keys += elif mode == : keys += elif mode == : keys += elif mode == : keys += else: raise ValueError(.format(mode)) self.send_keys(keys)
Set *Vim* mode to ``mode``. Supported modes: * ``normal`` * ``insert`` * ``command`` * ``visual`` * ``visual-block`` This method behave as setter-only property. Example: >>> import headlessvim >>> with headlessvim.open() as vim: ... vim.set_mode('insert') ... vim.mode = 'normal' # also accessible as property ... :param string mode: *Vim* mode to set :raises ValueError: if ``mode`` is not supported
3,540
def insert(self, tag, identifier, parent, data): if self.global_plate_definitions.contains(identifier): raise KeyError("Identifier {} already exists in tree".format(identifier)) self.global_plate_definitions.create_node(tag=tag, identifier=identifier, parent=parent, data=data) with switch_db(MetaDataModel, ): meta_data = MetaDataModel(tag=tag, parent=parent, data=data) meta_data.save() logging.info("Meta data {} inserted".format(identifier))
Insert the given meta data into the database :param tag: The tag (equates to meta_data_id) :param identifier: The identifier (a combination of the meta_data_id and the plate value) :param parent: The parent plate identifier :param data: The data (plate value) :return: None
3,541
def _get_args_for_reloading(): rv = [sys.executable] py_script = sys.argv[0] if os.name == and not os.path.exists(py_script) and \ os.path.exists(py_script + ): py_script += rv.append(py_script) rv.extend(sys.argv[1:]) return rv
Returns the executable. This contains a workaround for windows if the executable is incorrectly reported to not have the .exe extension which can cause bugs on reloading.
3,542
def _validate_row_label(dataset, label=None, default_label=): if not label: if not dataset[label].dtype in (str, int): raise TypeError("Row labels must be integers or strings.") return dataset, label
Validate a row label column. If the row label is not specified, a column is created with row numbers, named with the string in the `default_label` parameter. Parameters ---------- dataset : SFrame Input dataset. label : str, optional Name of the column containing row labels. default_label : str, optional The default column name if `label` is not specified. A column with row numbers is added to the output SFrame in this case. Returns ------- dataset : SFrame The input dataset, but with an additional row label column, *if* there was no input label. label : str The final label column name.
3,543
def get_view_attr(view, key, default=None, cls_name=None): ns = view_namespace(view, cls_name) if ns: if ns not in _views_attr: return default return _views_attr[ns].get(key, default) return default
Get the attributes that was saved for the view :param view: object (class or instance method) :param key: string - the key :param default: mixed - the default value :param cls_name: str - To pass the class name associated to the view in the case of decorators that may not give the real class name :return: mixed
3,544
def add_columns(self, layers: Union[np.ndarray, Dict[str, np.ndarray], loompy.LayerManager], col_attrs: Dict[str, np.ndarray], *, row_attrs: Dict[str, np.ndarray] = None, fill_values: Dict[str, np.ndarray] = None) -> None: if self._file.mode != "r+": raise IOError("Cannot add columns when connected in read-only mode") is_new = self.shape == (0, 0) if is_new: if row_attrs is None: raise ValueError("row_attrs must be provided when adding to an empty (new) Loom file") for k, v in row_attrs.items(): self.ra[k] = v self.shape = (self.ra[k].shape[0], self.shape[1]) if len(self.ca) == 0: for k, v in col_attrs.items(): self.ca[k] = np.zeros(0, v.dtype) layers_dict: Dict[str, np.ndarray] = {} if isinstance(layers, np.ndarray): layers_dict = {"": layers} elif isinstance(layers, loompy.LayerManager): layers_dict = {k: v[:, :] for k, v in layers.items()} elif isinstance(layers, dict): layers_dict = layers else: raise ValueError("Invalid type for layers argument") n_cols = 0 for layer, matrix in layers_dict.items(): if not is_new and layer not in self.layers.keys(): raise ValueError(f"Layer {layer} does not exist in the target loom file") if matrix.shape[0] != self.shape[0]: raise ValueError(f"Layer {layer} has {matrix.shape[0]} rows but file has {self.shape[0]}") if n_cols == 0: n_cols = matrix.shape[1] elif matrix.shape[1] != n_cols: raise ValueError(f"Layer {layer} has {matrix.shape[1]} columns but the first layer had {n_cols}") did_remove = False todel = [] for key, vals in col_attrs.items(): if key not in self.col_attrs: if fill_values is not None: if fill_values == "auto": fill_with = np.zeros(1, dtype=col_attrs[key].dtype)[0] else: fill_with = fill_values[key] self.ca[key] = np.array([fill_with] * self.shape[1]) else: did_remove = True todel.append(key) if len(vals) != n_cols: raise ValueError(f"Each column attribute must have exactly {n_cols} values, but {key} had {len(vals)}") for key in todel: del col_attrs[key] if did_remove: logging.debug("Some column attributes were removed: " + ",".join(todel)) todel = [] did_remove = False for key in self.col_attrs.keys(): if key not in col_attrs: if fill_values is not None: if fill_values == "auto": fill_with = np.zeros(1, dtype=self.col_attrs[key].dtype)[0] else: fill_with = fill_values[key] col_attrs[key] = np.array([fill_with] * n_cols) else: did_remove = True todel.append(key) for key in todel: del self.ca[key] if did_remove: logging.debug("Some column attributes were removed: " + ",".join(todel)) if is_new: for k, v in layers_dict.items(): self.layers[k] = v for k, v in col_attrs.items(): self.ca[k] = v else: n_cols = n_cols + self.shape[1] old_n_cols = self.shape[1] self.shape = (self.shape[0], n_cols) todel = [] for key, vals in col_attrs.items(): if vals.shape[1:] != self.col_attrs[key].shape[1:]: logging.debug(f"Removing attribute {key} because shape {vals.shape} did not match existing shape {self.col_attrs[key].shape} beyond first dimension") todel.append(key) else: self.ca[key] = np.concatenate([self.ca[key], vals]) for key in todel: del self.ca[key] for key in self.layers.keys(): self.layers[key]._resize(n_cols, axis=1) self.layers[key][:, old_n_cols:n_cols] = layers_dict[key] self._file.flush()
Add columns of data and attribute values to the dataset. Args: layers (dict or numpy.ndarray or LayerManager): Either: 1) A N-by-M matrix of float32s (N rows, M columns) in this case columns are added at the default layer 2) A dict {layer_name : matrix} specified so that the matrix (N, M) will be added to layer `layer_name` 3) A LayerManager object (such as what is returned by view.layers) col_attrs (dict): Column attributes, where keys are attribute names and values are numpy arrays (float or string) of length M row_attrs (dict): Optional row attributes, where keys are attribute names and values are numpy arrays (float or string) of length M fill_values: dictionary of values to use if a column attribute is missing, or "auto" to fill with zeros or empty strings Returns: Nothing. Notes ----- - This will modify the underlying HDF5 file, which will interfere with any concurrent readers. - Column attributes in the file that are NOT provided, will be deleted (unless fill value provided). - Array with Nan should not be provided
3,545
def unload(module): * ret = {} fmadm = _check_fmadm() cmd = .format( cmd=fmadm, module=module ) res = __salt__[](cmd) retcode = res[] result = {} if retcode != 0: result[] = res[] else: result = True return result
Unload specified fault manager module module: string module to unload CLI Example: .. code-block:: bash salt '*' fmadm.unload software-response
3,546
def _already_resized_on_flickr(self,fn,pid,_megapixels): logger.debug("%s - resize requested"%(fn)) width_flickr,height_flickr=self._getphoto_originalsize(pid) new_width,new_height=pusher_utils.resize_compute_width_height(\ fn,_megapixels) if width_flickr==new_width and height_flickr==new_height: return True elif not new_width: return True return False
Checks if image file (fn) with photo_id (pid) has already been resized on flickr. If so, returns True
3,547
def from_pubsec_file(cls: Type[SigningKeyType], path: str) -> SigningKeyType: with open(path, ) as fh: pubsec_content = fh.read() regex_pubkey = compile("pub: ([1-9A-HJ-NP-Za-km-z]{43,44})", MULTILINE) regex_signkey = compile("sec: ([1-9A-HJ-NP-Za-km-z]{88,90})", MULTILINE) match = search(regex_pubkey, pubsec_content) if not match: raise Exception() match = search(regex_signkey, pubsec_content) if not match: raise Exception() signkey_hex = match.groups()[0] seed = bytes(Base58Encoder.decode(signkey_hex)[0:32]) return cls(seed)
Return SigningKey instance from Duniter WIF file :param path: Path to WIF file
3,548
def lml(self): reml = (self._logdetXX() - self._logdetH()) / 2 if self._optimal["scale"]: lml = self._lml_optimal_scale() else: lml = self._lml_arbitrary_scale() return lml + reml
Log of the marginal likelihood. Returns ------- lml : float Log of the marginal likelihood. Notes ----- The log of the marginal likelihood is given by :: 2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| - (Qᵀ𝐲)ᵀs⁻¹D⁻¹(Qᵀ𝐲) + (Qᵀ𝐲)ᵀs⁻¹D⁻¹(QᵀX𝜷)/2 - (QᵀX𝜷)ᵀs⁻¹D⁻¹(QᵀX𝜷). By using the optimal 𝜷, the log of the marginal likelihood can be rewritten as:: 2⋅log(p(𝐲)) = -n⋅log(2π) - n⋅log(s) - log|D| + (Qᵀ𝐲)ᵀs⁻¹D⁻¹Qᵀ(X𝜷-𝐲). In the extreme case where 𝜷 is such that 𝐲 = X𝜷, the maximum is attained as s→0. For optimals 𝜷 and s, the log of the marginal likelihood can be further simplified to :: 2⋅log(p(𝐲; 𝜷, s)) = -n⋅log(2π) - n⋅log s - log|D| - n.
3,549
def _send_request(self, enforce_json, method, raise_for_status, url, **kwargs): r = self.session.request(method, url, **kwargs) if raise_for_status: r.raise_for_status() if enforce_json: if in self.session.headers.get( , ): try: r.json() except ValueError as e: raise PanCloudError( "Invalid JSON: {}".format(e) ) return r
Send HTTP request. Args: enforce_json (bool): Require properly-formatted JSON or raise :exc:`~pancloud.exceptions.PanCloudError`. Defaults to ``False``. method (str): HTTP method. raise_for_status (bool): If ``True``, raises :exc:`~pancloud.exceptions.HTTPError` if status_code not in 2XX. Defaults to ``False``. url (str): Request URL. **kwargs (dict): Re-packed key-word arguments. Returns: requests.Response: Requests Response() object
3,550
def __set_private_key(self, pk): self.__private_key = pk self.__public_key = pk.public_key()
Internal method that sets the specified private key :param pk: private key to set :return: None
3,551
def setActiveState(self, active): st = DISABLED if active: st = NORMAL self.entry.configure(state=st) self.inputLabel.configure(state=st) self.promptLabel.configure(state=st)
Use this to enable or disable (grey out) a parameter.
3,552
def read_local_conf(local_conf): log = logging.getLogger(__name__) log.info(, local_conf) try: config = read_config(os.path.dirname(local_conf), ) except HandledError: log.warning() return dict() return {k[4:]: v for k, v in config.items() if k.startswith() and not k[4:].startswith()}
Search for conf.py in any rel_source directory in CWD and if found read it and return. :param str local_conf: Path to conf.py to read. :return: Loaded conf.py. :rtype: dict
3,553
def get_element_types(obj, **kwargs): max_iterable_length = kwargs.get(, 10000) consume_generator = kwargs.get(, False) if not isiterable(obj): return None if isgenerator(obj) and not consume_generator: return None t = get_types(obj, **kwargs) if not t[]: if t[]: return "Element types: {}".format(.join([extract_type(t) for t in t[]])) else: return None else: return "Element types: {}".format(.join([extract_type(t) for t in t[]])) + " (based on first {} elements.)".format(max_iterable_length)
Get element types as a set.
3,554
def disconnect(self, forced=False): reversed_postorder = reversed(self.postorder()) self.log.debug( % \ (repr(self), repr(reversed_postorder))) for piper in reversed_postorder: if piper.connected:
Given the pipeline topology disconnects ``Pipers`` in the order output -> input. This also disconnects inputs. See ``Dagger.connect``, ``Piper.connect`` and ``Piper.disconnect``. If "forced" is ``True`` ``NuMap`` instances will be emptied. Arguments: - forced(``bool``) [default: ``False``] If set ``True`` all tasks from all ``NuMaps`` instances used in the ``Dagger`` will be removed even if they did not belong to this ``Dagger``.
3,555
def data(self, **query): objects = self.cache[] data = self.api.data.get(**query)[] data_objects = [] for d in data: _id = d[] if _id in objects: objects[_id].update(d) else: objects[_id] = GenData(d, self) data_objects.append(objects[_id]) for d in data_objects: count += 1 while True: ref_annotation = {} remove_annotation = [] for path, ann in d.annotation.items(): if ann[].startswith(): _id = ann[] if _id not in objects: try: d_tmp = self.api.data(_id).get() except slumber.exceptions.HttpClientError as ex: if ex.response.status_code == 404: continue else: raise ex objects[_id] = GenData(d_tmp, self) annotation = objects[_id].annotation ref_annotation.update({path + + k: v for k, v in annotation.items()}) remove_annotation.append(path) if ref_annotation: d.annotation.update(ref_annotation) for path in remove_annotation: del d.annotation[path] else: break return data_objects
Query for Data object annotation.
3,556
def sshpull(host, maildir, localmaildir, noop=False, verbose=False, filterfile=None): store = _SSHStore(host, maildir) _pull(store, localmaildir, noop, verbose, filterfile)
Pull a remote maildir to the local one.
3,557
def add_zoom_buttons(viewer, canvas=None, color=): def zoom(box, canvas, event, pt, viewer, n): zl = viewer.get_zoom() zl += n if zl == 0.0: zl += n viewer.zoom_to(zl + n) def add_buttons(viewer, canvas, tag): objs = [] wd, ht = viewer.get_window_size() SquareBox = canvas.get_draw_class() Text = canvas.get_draw_class() Compound = canvas.get_draw_class() x1, y1 = wd - 20, ht // 2 + 20 zoomin = SquareBox(x1, y1, 15, color=, fill=True, fillcolor=, fillalpha=0.5, coord=) zoomin.editable = False zoomin.pickable = True zoomin.add_callback(, zoom, viewer, 1) objs.append(zoomin) x2, y2 = wd - 20, ht // 2 - 20 zoomout = SquareBox(x2, y2, 15, color=, fill=True, fillcolor=, fillalpha=0.5, coord=) zoomout.editable = False zoomout.pickable = True zoomout.add_callback(, zoom, viewer, -1) objs.append(zoomout) objs.append(Text(x1 - 4, y1 + 6, text=, fontsize=18, color=color, coord=)) objs.append(Text(x2 - 4, y2 + 6, text=, fontsize=18, color=color, coord=)) obj = Compound(*objs) obj.opaque = False canvas.add(obj, tag=tag) def zoom_resize(viewer, width, height, canvas, tag): try: canvas.get_object_by_tag(tag) except KeyError: return False canvas.delete_object_by_tag(tag) add_buttons(viewer, canvas, tag) tag = if canvas is None: canvas = viewer.get_private_canvas() canvas.ui_set_active(True) canvas.register_for_cursor_drawing(viewer) canvas.set_draw_mode() viewer.add_callback(, zoom_resize, canvas, tag) add_buttons(viewer, canvas, tag)
Add zoom buttons to a canvas. Parameters ---------- viewer : an ImageView subclass instance If True, show the color bar; else remove it if present. canvas : a DrawingCanvas instance The canvas to which the buttons should be added. If not supplied defaults to the private canvas of the viewer. color : str A color name, hex triplet. The default is 'black'.
3,558
def to_json(obj): try: return json.dumps(obj, default=lambda obj: {k.lower(): v for k, v in obj.__dict__.items()}, sort_keys=False, separators=(, )).encode() except Exception as e: logger.info("to_json: ", e, obj)
Convert obj to json. Used mostly to convert the classes in json_span.py until we switch to nested dicts (or something better) :param obj: the object to serialize to json :return: json string
3,559
def simple_attention(memory, att_size, mask, keep_prob=1.0, scope="simple_attention"): with tf.variable_scope(scope): BS, ML, MH = tf.unstack(tf.shape(memory)) memory_do = tf.nn.dropout(memory, keep_prob=keep_prob, noise_shape=[BS, 1, MH]) logits = tf.layers.dense(tf.layers.dense(memory_do, att_size, activation=tf.nn.tanh), 1, use_bias=False) logits = softmax_mask(tf.squeeze(logits, [2]), mask) att_weights = tf.expand_dims(tf.nn.softmax(logits), axis=2) res = tf.reduce_sum(att_weights * memory, axis=1) return res
Simple attention without any conditions. Computes weighted sum of memory elements.
3,560
def default_styles(): styles = {} def _add_style(name, **kwargs): styles[name] = _create_style(name, **kwargs) _add_style(, family=, fontsize=, fontweight=, ) _add_style(, family=, fontsize=, fontweight=, ) _add_style(, family=, fontsize=, fontweight=, ) _add_style(, family=, fontsize=, fontweight=, ) _add_style(, family=, fontsize=, fontweight=, ) _add_style(, family=, fontsize=, fontweight=, ) _add_style(, family=, fontsize=, marginbottom=, ) _add_style(, family=, fontsize=, fontweight=, fontfamily=, color=, ) _add_style(, family=, fontsize=, fontstyle=, ) _add_style(, family=, fontsize=, marginbottom=, ) _add_style(, family=, fontsize=, marginbottom=, ) _add_style(, family=, fontsize=, marginbottom=, ) _add_style(, family=, fontsize=, ) _add_style(, family=, fontstyle=, fontsize=, ) _add_style(, family=, fontweight=, fontsize=, ) _add_style(, family=, fontsize=, fontweight=, fontfamily=, ) _add_style(, family=, fontsize=, fontweight=, fontfamily=, color=, ) styles[] = _numbered_style() return styles
Generate default ODF styles.
3,561
def build_parser(self, context): context.parser, context.max_level = self._create_parser(context)
Create the final argument parser. This method creates the non-early (full) argparse argument parser. Unlike the early counterpart it is expected to have knowledge of the full command tree. This method relies on ``context.cmd_tree`` and produces ``context.parser``. Other ingredients can interact with the parser up until :meth:`parse()` is called.
3,562
def resolve_response_data(head_key, data_key, data): new_data = [] if isinstance(data, list): for data_row in data: if head_key in data_row and data_key in data_row[head_key]: if isinstance(data_row[head_key][data_key], list): new_data += data_row[head_key][data_key] else: new_data.append(data_row[head_key][data_key]) elif data_key in data_row: return data_row[data_key] else: if head_key in data and data_key in data[head_key]: new_data += data[head_key][data_key] elif data_key in data: return data[data_key] return new_data
Resolves the responses you get from billomat If you have done a get_one_element request then you will get a dictionary If you have done a get_all_elements request then you will get a list with all elements in it :param head_key: the head key e.g: CLIENTS :param data_key: the data key e.g: CLIENT :param data: the responses you got :return: dict or list
3,563
def uncomment_or_update_or_append_line(filename, prefix, new_line, comment=, keep_backup=True, update_or_append_line=update_or_append_line): uncommented = update_or_append_line(filename, prefix=comment+prefix, new_line=new_line, keep_backup=keep_backup, append=False) if not uncommented: update_or_append_line(filename, prefix, new_line, keep_backup=keep_backup, append=True)
Remove the comment of an commented out line and make the line "active". If such an commented out line not exists it would be appended.
3,564
def autocorrect(query, possibilities, delta=0.75): possibilities = [possibility.lower() for possibility in possibilities] return matches[0]
Attempts to figure out what possibility the query is This autocorrect function is rather simple right now with plans for later improvement. Right now, it just attempts to finish spelling a word as much as possible, and then determines which possibility is closest to said word. Args: query (unicode): query to attempt to complete possibilities (list): list of unicodes of possible answers for query delta (float): Minimum delta similarity between query and any given possibility for possibility to be considered. Delta used by difflib.get_close_matches(). Returns: unicode: best guess of correct answer Raises: AssertionError: raised if no matches found Example: .. code-block:: Python >>> autocorrect('bowtei', ['bowtie2', 'bot']) 'bowtie2'
3,565
def _filter_by_moys_slow(self, moys): _filt_values = [] _filt_datetimes = [] for i, d in enumerate(self.datetimes): if d.moy in moys: _filt_datetimes.append(d) _filt_values.append(self._values[i]) return _filt_values, _filt_datetimes
Filter the Data Collection with a slow method that always works.
3,566
def run_linters(files): data = {} for file_type, file_list in list(files.items()): linter = LintFactory.get_linter(file_type) if linter is not None: data[file_type] = linter.run(file_list) return data
Run through file list, and try to find a linter that matches the given file type. If it finds a linter, it will run it, and store the resulting data in a dictionary (keyed to file_type). :param files: :return: {file_extension: lint_data}
3,567
def query_relations(self, environment_id, collection_id, entities=None, context=None, sort=None, filter=None, count=None, evidence_count=None, **kwargs): if environment_id is None: raise ValueError() if collection_id is None: raise ValueError() if entities is not None: entities = [ self._convert_model(x, QueryRelationsEntity) for x in entities ] if context is not None: context = self._convert_model(context, QueryEntitiesContext) if filter is not None: filter = self._convert_model(filter, QueryRelationsFilter) headers = {} if in kwargs: headers.update(kwargs.get()) sdk_headers = get_sdk_headers(, , ) headers.update(sdk_headers) params = {: self.version} data = { : entities, : context, : sort, : filter, : count, : evidence_count } url = .format( *self._encode_path_vars(environment_id, collection_id)) response = self.request( method=, url=url, headers=headers, params=params, json=data, accept_json=True) return response
Knowledge Graph relationship query. See the [Knowledge Graph documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-kg#kg) for more details. :param str environment_id: The ID of the environment. :param str collection_id: The ID of the collection. :param list[QueryRelationsEntity] entities: An array of entities to find relationships for. :param QueryEntitiesContext context: Entity text to provide context for the queried entity and rank based on that association. For example, if you wanted to query the city of London in England your query would look for `London` with the context of `England`. :param str sort: The sorting method for the relationships, can be `score` or `frequency`. `frequency` is the number of unique times each entity is identified. The default is `score`. This parameter cannot be used in the same query as the **bias** parameter. :param QueryRelationsFilter filter: :param int count: The number of results to return. The default is `10`. The maximum is `1000`. :param int evidence_count: The number of evidence items to return for each result. The default is `0`. The maximum number of evidence items per query is 10,000. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse
3,568
def patch(self, *args, **kwargs): return super(Deposit, self).patch(*args, **kwargs)
Patch only drafts. Status required: ``'draft'``. Meta information inside `_deposit` are preserved.
3,569
def _CheckCollation(cursor): cur_collation_connection = _ReadVariable("collation_connection", cursor) if cur_collation_connection != COLLATION: logging.warning("Require MySQL collation_connection of %s, got %s.", COLLATION, cur_collation_connection) cur_collation_database = _ReadVariable("collation_database", cursor) if cur_collation_database != COLLATION: logging.warning( "Require MySQL collation_database of %s, got %s." " To create your database, use: %s", COLLATION, cur_collation_database, CREATE_DATABASE_QUERY)
Checks MySQL collation and warns if misconfigured.
3,570
def built_datetime(self): from datetime import datetime try: return datetime.fromtimestamp(self.state.build_done) except TypeError: return None
Return the built time as a datetime object
3,571
def add_minutes(self, datetimestr, n): a_datetime = self.parse_datetime(datetimestr) return a_datetime + timedelta(seconds=60 * n)
Returns a time that n minutes after a time. :param datetimestr: a datetime object or a datetime str :param n: number of minutes, value can be negative **中文文档** 返回给定日期N分钟之后的时间。
3,572
def merge_ids(self, token, channel, ids, delete=False): url = self.url() + "/merge/{}/".format(.join([str(i) for i in ids])) req = self.remote_utils.get_url(url) if req.status_code is not 200: raise RemoteDataUploadError(.format( .join([str(i) for i in ids]))) if delete: self.delete_ramon(token, channel, ids[1:]) return True
Call the restful endpoint to merge two RAMON objects into one. Arguments: token (str): The token to inspect channel (str): The channel to inspect ids (int[]): the list of the IDs to merge delete (bool : False): Whether to delete after merging. Returns: json: The ID as returned by ndstore
3,573
def get_ttext(value): m = _non_token_end_matcher(value) if not m: raise errors.HeaderParseError( "expected ttext but found ".format(value)) ttext = m.group() value = value[len(ttext):] ttext = ValueTerminal(ttext, ) _validate_xtext(ttext) return ttext, value
ttext = <matches _ttext_matcher> We allow any non-TOKEN_ENDS in ttext, but add defects to the token's defects list if we find non-ttext characters. We also register defects for *any* non-printables even though the RFC doesn't exclude all of them, because we follow the spirit of RFC 5322.
3,574
def _add_secondary_if_exists(secondary, out, get_retriever): secondary = [_file_local_or_remote(y, get_retriever) for y in secondary] secondary = [z for z in secondary if z] if secondary: out["secondaryFiles"] = [{"class": "File", "path": f} for f in secondary] return out
Add secondary files only if present locally or remotely.
3,575
def Chen_Friedel(m, x, rhol, rhog, mul, mug, sigma, D, roughness=0, L=1): r v_lo = m/rhol/(pi/4*D**2) Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D) fd_lo = friction_factor(Re=Re_lo, eD=roughness/D) dP_lo = fd_lo*L/D*(0.5*rhol*v_lo**2) v_go = m/rhog/(pi/4*D**2) Re_go = Reynolds(V=v_go, rho=rhog, mu=mug, D=D) fd_go = friction_factor(Re=Re_go, eD=roughness/D) F = x**0.78*(1-x)**0.224 H = (rhol/rhog)**0.91*(mug/mul)**0.19*(1 - mug/mul)**0.7 E = (1-x)**2 + x**2*(rhol*fd_go/(rhog*fd_lo)) rho_h = 1./(x/rhog + (1-x)/rhol) Q_h = m/rho_h v_h = Q_h/(pi/4*D**2) Fr = Froude(V=v_h, L=D, squared=True) We = Weber(V=v_h, L=D, rho=rho_h, sigma=sigma) phi_lo2 = E + 3.24*F*H/(Fr**0.0454*We**0.035) dP = phi_lo2*dP_lo Bo = Bond(rhol=rhol, rhog=rhog, sigma=sigma, L=D)/4 if Bo < 2.5: v_g = m*x/rhog/(pi/4*D**2) Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D) Omega = 0.0333*Re_lo**0.45/(Re_g**0.09*(1 + 0.5*exp(-Bo))) else: Omega = We**0.2/(2.5 + 0.06*Bo) return dP*Omega
r'''Calculates two-phase pressure drop with the Chen modification of the Friedel correlation, as given in [1]_ and also shown in [2]_ and [3]_. .. math:: \Delta P = \Delta P_{Friedel}\Omega For Bo < 2.5: .. math:: \Omega = \frac{0.0333Re_{lo}^{0.45}}{Re_g^{0.09}(1 + 0.4\exp(-Bo))} For Bo >= 2.5: .. math:: \Omega = \frac{We^{0.2}}{2.5 + 0.06Bo} Parameters ---------- m : float Mass flow rate of fluid, [kg/s] x : float Quality of fluid, [-] rhol : float Liquid density, [kg/m^3] rhog : float Gas density, [kg/m^3] mul : float Viscosity of liquid, [Pa*s] mug : float Viscosity of gas, [Pa*s] sigma : float Surface tension, [N/m] D : float Diameter of pipe, [m] roughness : float, optional Roughness of pipe for use in calculating friction factor, [m] L : float, optional Length of pipe, [m] Returns ------- dP : float Pressure drop of the two-phase flow, [Pa] Notes ----- Applicable ONLY to mini/microchannels; yields drastically too low pressure drops for larger channels. For more details, see the `Friedel` correlation. It is not explicitly stated in [1]_ how to calculate the liquid mixture density for use in calculation of Weber number; the homogeneous model is assumed as it is used in the Friedel model. The bond number used here is 1/4 the normal value, i.e.: .. math:: Bo = \frac{g(\rho_l-\rho_g)D^2}{4\sigma} Examples -------- >>> Chen_Friedel(m=.0005, x=0.9, rhol=950., rhog=1.4, mul=1E-3, mug=1E-5, ... sigma=0.02, D=0.003, roughness=0, L=1) 6249.247540588871 References ---------- .. [1] Chen, Ing Youn, Kai-Shing Yang, Yu-Juei Chang, and Chi-Chung Wang. "Two-Phase Pressure Drop of Air–water and R-410A in Small Horizontal Tubes." International Journal of Multiphase Flow 27, no. 7 (July 2001): 1293-99. doi:10.1016/S0301-9322(01)00004-0. .. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/ Micro-Channel Flows." International Journal of Heat and Mass Transfer 55, no. 11–12 (May 2012): 3246-61. doi:10.1016/j.ijheatmasstransfer.2012.02.047. .. [3] Choi, Kwang-Il, A. S. Pamitran, Chun-Young Oh, and Jong-Taek Oh. "Two-Phase Pressure Drop of R-410A in Horizontal Smooth Minichannels." International Journal of Refrigeration 31, no. 1 (January 2008): 119-29. doi:10.1016/j.ijrefrig.2007.06.006.
3,576
def verify_files(files, user): if salt.utils.platform.is_windows(): return True import pwd try: pwnam = pwd.getpwnam(user) uid = pwnam[2] except KeyError: err = ( ).format(user) sys.stderr.write(err) sys.exit(salt.defaults.exitcodes.EX_NOUSER) for fn_ in files: dirname = os.path.dirname(fn_) try: if dirname: try: os.makedirs(dirname) except OSError as err: if err.errno != errno.EEXIST: raise if not os.path.isfile(fn_): with salt.utils.files.fopen(fn_, ): pass except IOError as err: if os.path.isfile(dirname): msg = .format(fn_, dirname) raise SaltSystemExit(msg=msg) if err.errno != errno.EACCES: raise msg = .format(fn_) raise SaltSystemExit(msg=msg) except OSError as err: msg = .format(fn_, err) raise SaltSystemExit(msg=msg) stats = os.stat(fn_) if uid != stats.st_uid: try: os.chown(fn_, uid, -1) except OSError: pass return True
Verify that the named files exist and are owned by the named user
3,577
def session(self): if not self.__session: self.__session = dal.get_default_session() return self.__session
Returns the current db session
3,578
def contains_remove(self, item): try: self._lens_contains_remove except AttributeError: message = t know how to remove an item from {}' raise NotImplementedError(message.format(type(self))) else: return self._lens_contains_remove(item)
Takes a collection and an item and returns a new collection of the same type with that item removed. The notion of "contains" is defined by the object itself; the following must be ``True``: .. code-block:: python item not in contains_remove(obj, item) This function is used by some lenses (particularly ContainsLens) to remove items from containers when necessary. The corresponding method call for this hook is ``obj._lens_contains_remove(item)``. There is no default implementation.
3,579
def load_instackenv(self): self.add_environment_file(user=, filename=) self.run(, user=) ironic_node_nbr = 0 count_cmd = for f in [, ]: try: ironic_node_nbr = int( self.run(count_cmd.format(filter=f), user=)[0]) except ValueError: pass if ironic_node_nbr > 0: break self._wait_for_ironic_nodes(ironic_node_nbr) self.baremetal_factory.set_ironic_uuid(self.list_nodes()) self.run(, user=)
Load the instackenv.json file and wait till the ironic nodes are ready. TODO(Gonéri): should be splitted, write_instackenv() to generate the instackenv.json and instackenv_import() for the rest.
3,580
def get_dataset(self, key, info): if self._polarization != key.polarization: return logger.debug(, key.name) if key.name in [, ]: logger.debug() if self.lons is None or self.lats is None: self.lons, self.lats, self.alts = self.get_lonlatalts() if key.name == : data = self.lats else: data = self.lons data.attrs.update(info) else: calibration = key.calibration or if calibration == : calibration = elif calibration == : calibration = data = self.read_band() chunks = CHUNK_SIZE logger.debug() noise = self.noise.get_noise_correction(data.shape, chunks=chunks).fillna(0) logger.debug() cal = self.calibration.get_calibration(calibration, data.shape, chunks=chunks) cal_constant = self.calibration.get_calibration_constant() logger.debug() data = data.where(data > 0) data = data.astype(np.float64) dn = data * data data = ((dn - noise).clip(min=0) + cal_constant) data = (np.sqrt(data) / cal).clip(min=0) data.attrs.update(info) del noise, cal data.attrs[] = calibration return data
Load a dataset.
3,581
def get_summary(self): return (self.contract.name, self.full_name, self.visibility, [str(x) for x in self.modifiers], [str(x) for x in self.state_variables_read + self.solidity_variables_read], [str(x) for x in self.state_variables_written], [str(x) for x in self.internal_calls], [str(x) for x in self.external_calls_as_expressions])
Return the function summary Returns: (str, str, str, list(str), list(str), listr(str), list(str), list(str); contract_name, name, visibility, modifiers, vars read, vars written, internal_calls, external_calls_as_expressions
3,582
def branches(self): local_branches = self.repo.branches data = [[x.name, True] for x in list(local_branches)] remote_branches = self.repo.git.branch(all=True).split() if sys.version_info.major == 2: remote_branches = set([x.split()[-1] for x in remote_branches if in x]) else: remote_branches = {x.split()[-1] for x in remote_branches if in x} data += [[x, False] for x in remote_branches] df = DataFrame(data, columns=[, ]) df[] = self._repo_name() return df
Returns a data frame of all branches in origin. The DataFrame will have the columns: * repository * branch * local :returns: DataFrame
3,583
def return_values_ssa(self): from slither.core.cfg.node import NodeType from slither.slithir.operations import Return from slither.slithir.variables import Constant if self._return_values_ssa is None: return_values_ssa = list() returns = [n for n in self.nodes if n.type == NodeType.RETURN] [return_values_ssa.extend(ir.values) for node in returns for ir in node.irs_ssa if isinstance(ir, Return)] self._return_values_ssa = list(set([x for x in return_values_ssa if not isinstance(x, Constant)])) return self._return_values_ssa
list(Return Values in SSA form): List of the return values in ssa form
3,584
def compute_geometric_median(X, eps=1e-5): y = np.mean(X, 0) while True: D = scipy.spatial.distance.cdist(X, [y]) nonzeros = (D != 0)[:, 0] Dinv = 1 / D[nonzeros] Dinvs = np.sum(Dinv) W = Dinv / Dinvs T = np.sum(W * X[nonzeros], 0) num_zeros = len(X) - np.sum(nonzeros) if num_zeros == 0: y1 = T elif num_zeros == len(X): return y else: R = (T - y) * Dinvs r = np.linalg.norm(R) rinv = 0 if r == 0 else num_zeros/r y1 = max(0, 1-rinv)*T + min(1, rinv)*y if scipy.spatial.distance.euclidean(y, y1) < eps: return y1 y = y1
Estimate the geometric median of points in 2D. Code from https://stackoverflow.com/a/30305181 Parameters ---------- X : (N,2) ndarray Points in 2D. Second axis must be given in xy-form. eps : float, optional Distance threshold when to return the median. Returns ------- (2,) ndarray Geometric median as xy-coordinate.
3,585
def normalizeGroupValue(value): if not isinstance(value, (tuple, list)): raise TypeError("Group value must be a list, not %s." % type(value).__name__) value = [normalizeGlyphName(v) for v in value] return tuple([unicode(v) for v in value])
Normalizes group value. * **value** must be a ``list``. * **value** items must normalize as glyph names with :func:`normalizeGlyphName`. * Returned value will be a ``tuple`` of unencoded ``unicode`` strings.
3,586
def recommendations(self, **kwargs): path = self._get_id_path() response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get a list of recommended movies for a movie. Args: language: (optional) ISO 639-1 code. page: (optional) Minimum value of 1. Expected value is an integer. Returns: A dict representation of the JSON returned from the API.
3,587
def emit(self, record): if record.name.startswith(): return data, header = self._prepPayload(record) try: self.session.post(self._getEndpoint(), data=data, headers={: header}) except Exception: self.handleError(record)
Override emit() method in handler parent for sending log to RESTful API
3,588
def _nested_cwl_record(xs, want_attrs, input_files): if isinstance(xs, (list, tuple)): return [_nested_cwl_record(x, want_attrs, input_files) for x in xs] else: assert isinstance(xs, dict), pprint.pformat(xs) return _collapse_to_cwl_record_single(xs, want_attrs, input_files)
Convert arbitrarily nested samples into a nested list of dictionaries. nests only at the record level, rather than within records. For batching a top level list is all of the batches and sub-lists are samples within the batch.
3,589
def parse(self, response): content_raw = response.body.decode() self.logger.debug(.format(content_raw)) content = json.loads(content_raw, encoding=) self.logger.debug(content) date = datetime.datetime.strptime(content[], ) strftime = date.strftime("%Y-%m-%d") self.logger.info(.format(strftime)) if in content: self.logger.info() for item in content[]: for story in content[]: if item[] == story[]: story[] = 1 break self.logger.debug(item) post_num = len(content[]) self.logger.info(.format(post_num)) for item in content[]: self.logger.info(item) post_num = 0 if post_num < 0 else post_num pub_time = date + datetime.timedelta(minutes=post_num) post_num -= 1 url = .format(item[]) request = scrapy.Request(url, callback=self.parse_post) post_dict = { : ZhihuDailySpider.name, : pub_time.strftime("%Y-%m-%d %H:%M:%S"), : { : str(item.get(, )) } } if item.get(): post_dict[][] = \ str(item.get(, 0)) request.meta[] = post_dict self.item_list.append(post_dict) yield request
根据对 ``start_urls`` 中提供链接的请求响应包内容,解析生成具体文章链接请求 :param Response response: 由 ``Scrapy`` 调用并传入的请求响应对象
3,590
def __set_window_title(self): if self.has_editor_tab(): windowTitle = "{0} - {1}".format(self.__default_window_title, self.get_current_editor().file) else: windowTitle = "{0}".format(self.__default_window_title) LOGGER.debug("> Setting window title to .".format(windowTitle)) self.setWindowTitle(windowTitle)
Sets the Component window title.
3,591
def from_scf_task(cls, scf_task, ddk_tolerance=None, ph_tolerance=None, manager=None): new = cls(manager=manager) new.add_becs_from_scf_task(scf_task, ddk_tolerance, ph_tolerance) return new
Build tasks for the computation of Born effective charges from a ground-state task. Args: scf_task: ScfTask object. ddk_tolerance: tolerance used in the DDK run if with_becs. None to use AbiPy default. ph_tolerance: dict {"varname": value} with the tolerance used in the phonon run. None to use AbiPy default. manager: :class:`TaskManager` object.
3,592
def estimate_clock_model(params): if assure_tree(params, tmp_dir=): return 1 dates = utils.parse_dates(params.dates) if len(dates)==0: return 1 outdir = get_outdir(params, ) aln, ref, fixed_pi = read_if_vcf(params) is_vcf = True if ref is not None else False if params.aln is None and params.sequence_length is None: print("one of arguments and is required.", file=sys.stderr) return 1 basename = get_basename(params, outdir) myTree = TreeTime(dates=dates, tree=params.tree, aln=aln, gtr=, verbose=params.verbose, seq_len=params.sequence_length, ref=ref) myTree.tip_slack=params.tip_slack if myTree.tree is None: print("ERROR: tree loading failed. exiting...") return 1 if params.clock_filter: n_bad = [n.name for n in myTree.tree.get_terminals() if n.bad_branch] myTree.clock_filter(n_iqd=params.clock_filter, reroot=params.reroot or ) n_bad_after = [n.name for n in myTree.tree.get_terminals() if n.bad_branch] if len(n_bad_after)>len(n_bad): print("The following leaves donleast-squaresMLML-rough\nThe R^2 value indicates the fraction of variation in\nroot-to-tip distance explained by the sampling times.\nHigher values corresponds more clock-like behavior (max 1.0).\nThe rate is the slope of the best fit of the date to\nthe root-to-tip distance and provides an estimate of\nthe substitution rate. The rate needs to be positive!\nNegative rates suggest an inappropriate root.\n\nThe estimated rate and tree correspond to a root date:interceptslopeslopecov\n--- root-date:\t %3.2f +/- %1.2f (one std-dev)\n\n\n--- root-date:\t %3.2f\n\nrerooted.newicknewickrtt.csvwt have a specified date are inferred from the root-to-tip regression.\n") for n in myTree.tree.get_terminals(): if hasattr(n, "raw_date_constraint") and (n.raw_date_constraint is not None): if np.isscalar(n.raw_date_constraint): tmp_str = str(n.raw_date_constraint) elif len(n.raw_date_constraint): tmp_str = str(n.raw_date_constraint[0])++str(n.raw_date_constraint[1]) else: tmp_str = ofile.write("%s, %s, %f\n"%(n.name, tmp_str, n.dist2root)) else: ofile.write("%s, %f, %f\n"%(n.name, d2d.numdate_from_dist2root(n.dist2root), n.dist2root)) for n in myTree.tree.get_nonterminals(order=): ofile.write("%s, %f, %f\n"%(n.name, d2d.numdate_from_dist2root(n.dist2root), n.dist2root)) print("--- wrote dates and root-to-tip distances to \n\t%s\n"%table_fname) plot_rtt(myTree, outdir+params.plot_rtt) return 0
implementing treetime clock
3,593
def approx(x, y, xout, method=, rule=1, f=0, yleft=None, yright=None, ties=): if method not in VALID_APPROX: raise ValueError( % VALID_APPROX) xout = c(xout).astype(np.float64) method_key = method method = get_callable(method_key, VALID_APPROX) x, y = _regularize(x, y, ties) nx = x.shape[0] if yleft is None: yleft = y[0] if rule != 1 else np.nan if yright is None: yright = y[-1] if rule != 1 else np.nan yout = C_Approx(x, y, xout, method, f, yleft, yright) return xout, np.asarray(yout)
Linearly interpolate points. Return a list of points which (linearly) interpolate given data points, or a function performing the linear (or constant) interpolation. Parameters ---------- x : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. y : array-like, shape=(n_samples,) Numeric vector giving the coordinates of the points to be interpolated. xout : int, float or iterable A scalar or iterable of numeric values specifying where interpolation is to take place. method : str, optional (default='linear') Specifies the interpolation method to be used. Choices are "linear" or "constant". rule : int, optional (default=1) An integer describing how interpolation is to take place outside the interval ``[min(x), max(x)]``. If ``rule`` is 1 then np.nans are returned for such points and if it is 2, the value at the closest data extreme is used. f : int, optional (default=0) For ``method`` = "constant" a number between 0 and 1 inclusive, indicating a compromise between left- and right-continuous step functions. If y0 and y1 are the values to the left and right of the point then the value is y0 if f == 0, y1 if f == 1, and y0*(1-f)+y1*f for intermediate values. In this way the result is right-continuous for f == 0 and left-continuous for f == 1, even for non-finite ``y`` values. yleft : float, optional (default=None) The value to be returned when input ``x`` values are less than ``min(x)``. The default is defined by the value of rule given below. yright : float, optional (default=None) The value to be returned when input ``x`` values are greater than ``max(x)``. The default is defined by the value of rule given below. ties : str, optional (default='mean') Handling of tied ``x`` values. Choices are "mean" or "ordered".
3,594
def add_peer(self, peer_addr): "Build a connection to the Hub at a given ``(host, port)`` address" peer = connection.Peer( self._ident, self._dispatcher, peer_addr, backend.Socket()) peer.start() self._started_peers[peer_addr] = peer
Build a connection to the Hub at a given ``(host, port)`` address
3,595
def main(): org = Organization(name=).create() pprint(org.get_values()) org.delete()
Create an organization, print out its attributes and delete it.
3,596
def subscribe_to_address_webhook(callback_url, subscription_address, event=, confirmations=0, confidence=0.00, coin_symbol=, api_key=None): assert is_valid_coin_symbol(coin_symbol) assert is_valid_address_for_coinsymbol(subscription_address, coin_symbol) assert api_key, url = make_url(coin_symbol, ) params = {: api_key} data = { : event, : callback_url, : subscription_address, } if event == and confirmations: data[] = confirmations elif event == and confidence: data[] = confidence r = requests.post(url, json=data, params=params, verify=True, timeout=TIMEOUT_IN_SECONDS) response_dict = get_valid_json(r) return response_dict[]
Subscribe to transaction webhooks on a given address. Webhooks for transaction broadcast and each confirmation (up to 6). Returns the blockcypher ID of the subscription
3,597
def logs(ctx, services, num, follow): logger.debug("running command %s (%s)", ctx.command.name, ctx.params, extra={"command": ctx.command.name, "params": ctx.params}) home = ctx.obj["HOME"] services_path = os.path.join(home, SERVICES) tail_threads = [] for service in services: logpath = os.path.join(services_path, service, LOGS_DIR, STDOUTLOG) if os.path.exists(logpath): logger.debug("tailing %s", logpath) t = threading.Thread(target=Tailer, kwargs={"name": service, "nlines": num, "filepath": logpath, "follow": follow}) t.daemon = True t.start() tail_threads.append(t) if tail_threads: while tail_threads[0].isAlive(): tail_threads[0].join(0.1)
Show logs of daemonized service.
3,598
def purge(self): try: self._device.setblocking(0) while(self._device.recv(1)): pass except socket.error as err: pass finally: self._device.setblocking(1)
Purges read/write buffers.
3,599
def update_type_lookups(self): self.type_to_typestring = dict(zip(self.types, self.python_type_strings)) self.typestring_to_type = dict(zip(self.python_type_strings, self.types))
Update type and typestring lookup dicts. Must be called once the ``types`` and ``python_type_strings`` attributes are set so that ``type_to_typestring`` and ``typestring_to_type`` are constructed. .. versionadded:: 0.2 Notes ----- Subclasses need to call this function explicitly.