Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
387,600 | def _get_assessment_taken(self, assessment_taken_id):
if assessment_taken_id not in self._assessments_taken:
mgr = self._get_provider_manager()
lookup_session = mgr.get_assessment_taken_lookup_session(proxy=self._proxy)
lookup_session.use_federated_bank_view()
self._assessments_taken[assessment_taken_id] = (
lookup_session.get_assessment_taken(assessment_taken_id))
return self._assessments_taken[assessment_taken_id] | Helper method for getting an AssessmentTaken objects given an Id. |
387,601 | def get_next_first(intersection, intersections, to_end=True):
along_edge = None
index_first = intersection.index_first
s = intersection.s
for other_int in intersections:
other_s = other_int.s
if other_int.index_first == index_first and other_s > s:
if along_edge is None or other_s < along_edge.s:
along_edge = other_int
if along_edge is None:
if to_end:
return _intersection_helpers.Intersection(
index_first,
1.0,
None,
None,
interior_curve=CLASSIFICATION_T.FIRST,
)
else:
return None
else:
return along_edge | Gets the next node along the current (first) edge.
.. note::
This is a helper used only by :func:`get_next`, which in
turn is only used by :func:`basic_interior_combine`, which itself
is only used by :func:`combine_intersections`.
Along with :func:`get_next_second`, this function does the majority of the
heavy lifting in :func:`get_next`. **Very** similar to
:func:`get_next_second`, but this works with the first curve while the
other function works with the second.
Args:
intersection (.Intersection): The current intersection.
intersections (List[.Intersection]): List of all detected
intersections, provided as a reference for potential
points to arrive at.
to_end (Optional[bool]): Indicates if the next node should just be
the end of the first edge or :data:`None`.
Returns:
Optional[.Intersection]: The "next" point along a surface of
intersection. This will produce the next intersection along the
current (first) edge or the end of the same edge. If ``to_end`` is
:data:`False` and there are no other intersections along the current
edge, will return :data:`None` (rather than the end of the same edge). |
387,602 | def psd(self):
if not self._psd:
errMsg = "The PSD has not been set in the metricParameters "
errMsg += "instance."
raise ValueError(errMsg)
return self._psd | A pyCBC FrequencySeries holding the appropriate PSD.
Return the PSD used in the metric calculation. |
387,603 | def save_ds9(output, filename):
ds9_file = open(filename, )
ds9_file.write(output)
ds9_file.close() | Save ds9 region output info filename.
Parameters
----------
output : str
String containing the full output to be exported as a ds9 region
file.
filename : str
Output file name. |
387,604 | def to_dict(self):
data = {}
data["name"] = self.name
data["description"] = self.description if self.description and len(self.description) else None
data["type"] = self.type if self.type and len(self.type) else None
data["allowed_chars"] = self.allowed_chars if self.allowed_chars and len(self.allowed_chars) else None
data["allowed_choices"] = self.allowed_choices
data["autogenerated"] = self.autogenerated
data["channel"] = self.channel if self.channel and len(self.channel) else None
data["creation_only"] = self.creation_only
data["default_order"] = self.default_order
data["default_value"] = self.default_value if self.default_value and len(self.default_value) else None
data["deprecated"] = self.deprecated
data["exposed"] = self.exposed
data["filterable"] = self.filterable
data["format"] = self.format if self.format and len(self.format) else None
data["max_length"] = int(self.max_length) if self.max_length is not None else None
data["max_value"] = int(self.max_value) if self.max_value is not None else None
data["min_length"] = int(self.min_length) if self.min_length is not None else None
data["min_value"] = int(self.min_value) if self.min_value is not None else None
data["orderable"] = self.orderable
data["read_only"] = self.read_only
data["required"] = self.required
data["transient"] = self.transient
data["unique"] = self.unique
data["uniqueScope"] = self.unique_scope if self.unique_scope and len(self.unique_scope) else None
data["subtype"] = self.subtype if self.subtype and len(self.subtype) else None
data["userlabel"] = self.userlabel if self.userlabel and len(self.userlabel) else None
return data | Transform an attribute to a dict |
387,605 | def process_blast(
blast_dir,
org_lengths,
fraglengths=None,
mode="ANIb",
identity=0.3,
coverage=0.7,
logger=None,
):
blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab")
results = ANIResults(list(org_lengths.keys()), mode)
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
for blastfile in blastfiles:
qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_")
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % blastfile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % blastfile
)
continue
resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode)
query_cover = float(resultvals[0]) / org_lengths[qname]
results.add_tot_length(qname, sname, resultvals[0], sym=False)
results.add_sim_errors(qname, sname, resultvals[1], sym=False)
results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False)
results.add_coverage(qname, sname, query_cover)
return results | Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis. |
387,606 | def get_users(self, fetch=True):
return Users(self.resource.users, self.client, populate=fetch) | Return this Applications's users object, populating it if fetch
is True. |
387,607 | def _reset_cache(self, key=None):
if getattr(self, , None) is None:
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None) | Reset cached properties. If ``key`` is passed, only clears that key. |
387,608 | def read(self, ncfile, timegrid_data) -> None:
array = query_array(ncfile, self.name)
idxs: Tuple[Any] = (slice(None),)
subdev2index = self.query_subdevice2index(ncfile)
for devicename, seq in self.sequences.items():
if seq.NDIM:
if self._timeaxis:
subshape = (array.shape[1],) + seq.shape
else:
subshape = (array.shape[0],) + seq.shape
subarray = numpy.empty(subshape)
temp = devicename +
for prod in self._product(seq.shape):
station = temp + .join(str(idx) for idx in prod)
idx0 = subdev2index.get_index(station)
subarray[idxs+prod] = array[self.get_timeplaceslice(idx0)]
else:
idx = subdev2index.get_index(devicename)
subarray = array[self.get_timeplaceslice(idx)]
seq.series = seq.adjust_series(timegrid_data, subarray) | Read the data from the given NetCDF file.
The argument `timegrid_data` defines the data period of the
given NetCDF file.
See the general documentation on class |NetCDFVariableFlat|
for some examples. |
387,609 | def resolve_links(self):
for resource in self.items_mapped[].values():
for dct in [getattr(resource, , {}), resource.fields]:
for k, v in dct.items():
if isinstance(v, ResourceLink):
resolved = self._resolve_resource_link(v)
if resolved is not None:
dct[k] = resolved
elif isinstance(v, (MultipleAssets, MultipleEntries, list)):
for idx, ele in enumerate(v):
if not isinstance(ele, ResourceLink):
break
resolved = self._resolve_resource_link(ele)
if resolved is not None:
v[idx] = resolved | Attempt to resolve all internal links (locally).
In case the linked resources are found either as members of the array or within
the `includes` element, those will be replaced and reference the actual resources.
No network calls will be performed. |
387,610 | def QA_indicator_WR(DataFrame, N, N1):
HIGH = DataFrame[]
LOW = DataFrame[]
CLOSE = DataFrame[]
WR1 = 100 * (HHV(HIGH, N) - CLOSE) / (HHV(HIGH, N) - LLV(LOW, N))
WR2 = 100 * (HHV(HIGH, N1) - CLOSE) / (HHV(HIGH, N1) - LLV(LOW, N1))
DICT = {: WR1, : WR2}
return pd.DataFrame(DICT) | 威廉指标 |
387,611 | def _import_all_modules():
import traceback
import os
global results
globals_, locals_ = globals(), locals()
def load_module(modulename, package_module):
try:
names = []
module = __import__(package_module, globals_, locals_, [modulename])
for name in module.__dict__:
if not name.startswith():
globals_[name] = module.__dict__[name]
names.append(name)
except Exception:
traceback.print_exc()
raise
return module, names
def load_dir(abs_dirpath, rel_dirpath=):
results = []
for filename in os.listdir(abs_dirpath):
rel_filepath = os.path.join(rel_dirpath, filename)
abs_filepath = os.path.join(abs_dirpath, filename)
if filename[0] != and os.path.isfile(abs_filepath) and filename.split()[-1] in (, ):
modulename = .join(os.path.normpath(os.path.splitext(rel_filepath)[0]).split(os.sep))
package_module = .join([__name__, modulename])
module, names = load_module(modulename, package_module)
results += names
elif os.path.isdir(abs_filepath):
results += load_dir(abs_filepath, rel_filepath)
return results
return load_dir(os.path.dirname(__file__)) | dynamically imports all modules in the package |
387,612 | def add_console_logger(logger, level=):
logger.setLevel(getattr(logging, level.upper()))
if not logger.handlers:
color = False
if curses and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except:
pass
console = logging.StreamHandler()
console.setFormatter(_LogFormatter(color=color))
logger.addHandler(console)
return logger | 增加console作为日志输入. |
387,613 | def remove(self):
lib.gp_camera_folder_remove_dir(
self._cam._cam, self.parent.path.encode(), self.name.encode(),
self._cam._ctx) | Remove the directory. |
387,614 | def transform(self, fn, dtype=None, *args, **kwargs):
rdd = self._rdd.map(fn)
if dtype is None:
return self.__class__(rdd, noblock=True, **self.get_params())
if dtype is np.ndarray:
return ArrayRDD(rdd, bsize=self.bsize, noblock=True)
elif dtype is sp.spmatrix:
return SparseRDD(rdd, bsize=self.bsize, noblock=True)
else:
return BlockRDD(rdd, bsize=self.bsize, dtype=dtype, noblock=True) | Equivalent to map, compatibility purpose only.
Column parameter ignored. |
387,615 | def get_all_sources(self):
if self.__all_sources is None:
self.__all_sources = OrderedDict()
self.walk(self.__add_one)
return self.__all_sources | Returns:
OrderedDict: all source file names in the hierarchy, paired with
the names of their subpages. |
387,616 | def parse_baxter(reading):
initial =
medial =
final =
tone =
inienv = True
medienv = False
finenv = False
tonenv = False
inichars = "pbmrtdnkgnsyhzljwXHptkRPjyjuww' + medial
return initial,medial,final,tone | Parse a Baxter string and render it with all its contents, namely
initial, medial, final, and tone. |
387,617 | def _app_cache_deepcopy(obj):
if isinstance(obj, defaultdict):
return deepcopy(obj)
elif isinstance(obj, dict):
return type(obj)((_app_cache_deepcopy(key), _app_cache_deepcopy(val)) for key, val in obj.items())
elif isinstance(obj, list):
return list(_app_cache_deepcopy(val) for val in obj)
elif isinstance(obj, AppConfig):
app_conf = Empty()
app_conf.__class__ = AppConfig
app_conf.__dict__ = _app_cache_deepcopy(obj.__dict__)
return app_conf
return obj | An helper that correctly deepcopy model cache state |
387,618 | def fence_point_encode(self, target_system, target_component, idx, count, lat, lng):
return MAVLink_fence_point_message(target_system, target_component, idx, count, lat, lng) | A fence point. Used to set a point when from GCS -> MAV. Also used to
return a point from MAV -> GCS
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
idx : point index (first point is 1, 0 is for return point) (uint8_t)
count : total number of points (for sanity checking) (uint8_t)
lat : Latitude of point (float)
lng : Longitude of point (float) |
387,619 | def Var(self, mu=None):
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.iteritems():
var += p * (x - mu) ** 2
return var | Computes the variance of a PMF.
Args:
mu: the point around which the variance is computed;
if omitted, computes the mean
Returns:
float variance |
387,620 | def model(self, value):
if value == self._defaults[] and in self._values:
del self._values[]
else:
self._values[] = value | The model property.
Args:
value (string). the property value. |
387,621 | def migrate(connection, dsn):
all_migrations = _get_all_migrations()
logger.debug(.format(all_migrations))
for version, modname in all_migrations:
if _is_missed(connection, version) and version <= SCHEMA_VERSION:
logger.info(.format(version))
module = __import__(modname, fromlist=)
trans = connection.begin()
try:
module.Migration().migrate(connection)
_update_version(connection, version)
trans.commit()
except:
trans.rollback()
logger.error("Failed to migrate on {} ".format(version, dsn))
raise | Collects all migrations and applies missed.
Args:
connection (sqlalchemy connection): |
387,622 | def _calculate_status(self, target_freshness, freshness):
| Calculate the status of a run.
:param dict target_freshness: The target freshness dictionary. It must
match the freshness spec.
:param timedelta freshness: The actual freshness of the data, as
calculated from the database's timestamps |
387,623 | def from_content(cls, content):
parsed_content = parse_tibiacom_content(content)
tables = cls._parse_tables(parsed_content)
filters = tables.get("Highscores Filter")
if filters is None:
raise InvalidContent("content does is not from the highscores section of Tibia.com")
world_filter, vocation_filter, category_filter = filters
world = world_filter.find("option", {"selected": True})["value"]
if world == "":
return None
category = category_filter.find("option", {"selected": True})["value"]
vocation_selected = vocation_filter.find("option", {"selected": True})
vocation = int(vocation_selected["value"]) if vocation_selected else 0
highscores = cls(world, category, vocation=vocation)
entries = tables.get("Highscores")
if entries is None:
return None
_, header, *rows = entries
info_row = rows.pop()
highscores.results_count = int(results_pattern.search(info_row.text).group(1))
for row in rows:
cols_raw = row.find_all()
highscores._parse_entry(cols_raw)
return highscores | Creates an instance of the class from the html content of a highscores page.
Notes
-----
Tibia.com only shows up to 25 entries per page, so in order to obtain the full highscores, all 12 pages must
be parsed and merged into one.
Parameters
----------
content: :class:`str`
The HTML content of the page.
Returns
-------
:class:`Highscores`
The highscores results contained in the page.
Raises
------
InvalidContent
If content is not the HTML of a highscore's page. |
387,624 | def carrysave_adder(a, b, c, final_adder=ripple_add):
a, b, c = libutils.match_bitwidth(a, b, c)
partial_sum = a ^ b ^ c
shift_carry = (a | b) & (a | c) & (b | c)
return pyrtl.concat(final_adder(partial_sum[1:], shift_carry), partial_sum[0]) | Adds three wirevectors up in an efficient manner
:param WireVector a, b, c : the three wires to add up
:param function final_adder : The adder to use to do the final addition
:return: a wirevector with length 2 longer than the largest input |
387,625 | def __cache(self, file, content, document):
self.__files_cache.add_content(**{file: CacheData(content=content, document=document)}) | Caches given file.
:param file: File to cache.
:type file: unicode
:param content: File content.
:type content: list
:param document: File document.
:type document: QTextDocument |
387,626 | def valuefrompostdata(self, postdata):
if self.multi:
found = False
if self.id in postdata:
found = True
passedvalues = postdata[self.id].split()
values = []
for choicekey in [x[0] for x in self.choices]:
if choicekey in passedvalues:
found = True
values.append(choicekey)
else:
values = []
for choicekey in [x[0] for x in self.choices]:
if self.id++choicekey+ in postdata:
found = True
if postdata[self.id++choicekey+]:
values.append(choicekey)
if not found:
return None
else:
return values
else:
if self.id in postdata:
return postdata[self.id]
else:
return None | This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set() |
387,627 | def setup(level=, output=None):
output = output or settings.LOG[]
level = level.upper()
handlers = [
logbook.NullHandler()
]
if output == :
handlers.append(
logbook.StreamHandler(sys.stdout,
format_string=settings.LOG[],
level=level))
else:
handlers.append(
logbook.FileHandler(output,
format_string=settings.LOG[],
level=level))
sentry_dns = settings.LOG[]
if sentry_dns:
handlers.append(SentryHandler(sentry_dns, level=))
return logbook.NestedSetup(handlers) | Hivy formated logger |
387,628 | def gen_token(cls):
token = os.urandom(16)
token_time = int(time.time())
return {: token, : token_time} | 生成 access_token |
387,629 | def format_result(result):
instance = None
error = None
if result["instance"] is not None:
instance = format_instance(result["instance"])
if result["error"] is not None:
error = format_error(result["error"])
result = {
"success": result["success"],
"plugin": format_plugin(result["plugin"]),
"instance": instance,
"error": error,
"records": format_records(result["records"]),
"duration": result["duration"]
}
if os.getenv("PYBLISH_SAFE"):
schema.validate(result, "result")
return result | Serialise Result |
387,630 | def __audioread_load(path, offset, duration, dtype):
y = []
with audioread.audio_open(path) as input_file:
sr_native = input_file.samplerate
n_channels = input_file.channels
s_start = int(np.round(sr_native * offset)) * n_channels
if duration is None:
s_end = np.inf
else:
s_end = s_start + (int(np.round(sr_native * duration))
* n_channels)
n = 0
for frame in input_file:
frame = util.buf_to_float(frame, dtype=dtype)
n_prev = n
n = n + len(frame)
if n < s_start:
continue
if s_end < n_prev:
break
if s_end < n:
frame = frame[:s_end - n_prev]
if n_prev <= s_start <= n:
frame = frame[(s_start - n_prev):]
y.append(frame)
if y:
y = np.concatenate(y)
if n_channels > 1:
y = y.reshape((-1, n_channels)).T
else:
y = np.empty(0, dtype=dtype)
return y, sr_native | Load an audio buffer using audioread.
This loads one block at a time, and then concatenates the results. |
387,631 | def find(cls, key=None, **kwargs):
if not key:
return super(Asset, cls).find(**kwargs)
params = {"asset[key]": key}
params.update(kwargs)
theme_id = params.get("theme_id")
path_prefix = "%s/themes/%s" % (cls.site, theme_id) if theme_id else cls.site
resource = cls.find_one("%s/assets.%s" % (path_prefix, cls.format.extension), **params)
if theme_id and resource:
resource._prefix_options["theme_id"] = theme_id
return resource | Find an asset by key
E.g.
shopify.Asset.find('layout/theme.liquid', theme_id=99) |
387,632 | def ip():
ok, err = _hack_ip()
if not ok:
click.secho(click.style(err, fg=))
sys.exit(1)
click.secho(click.style(err, fg=)) | Show ip address. |
387,633 | def _mapped_std_streams(lookup_paths, streams=(, , )):
standard_inos = {}
for stream in streams:
try:
stream_stat = os.fstat(getattr(sys, stream).fileno())
key = stream_stat.st_dev, stream_stat.st_ino
standard_inos[key] = stream
except Exception:
pass
def stream_inos(paths):
for path in paths:
try:
stat = os.stat(path)
key = (stat.st_dev, stat.st_ino)
if key in standard_inos:
yield standard_inos[key], path
except FileNotFoundError:
pass
return dict(stream_inos(lookup_paths)) if standard_inos else {} | Get a mapping of standard streams to given paths. |
387,634 | def sys_version(version_tuple):
old_version = sys.version_info
sys.version_info = version_tuple
yield
sys.version_info = old_version | Set a temporary sys.version_info tuple
:param version_tuple: a fake sys.version_info tuple |
387,635 | def get_content_commit_date(extensions, acceptance_callback=None,
root_dir=):
logger = logging.getLogger(__name__)
def _null_callback(_):
return True
if acceptance_callback is None:
acceptance_callback = _null_callback
root_dir = os.path.abspath(root_dir)
repo = git.repo.base.Repo(path=root_dir, search_parent_directories=True)
newest_datetime = None
iters = [_iter_filepaths_with_extension(ext, root_dir=root_dir)
for ext in extensions]
for content_path in itertools.chain(*iters):
content_path = os.path.abspath(os.path.join(root_dir, content_path))
if acceptance_callback(content_path):
logger.debug(, content_path)
try:
commit_datetime = read_git_commit_timestamp_for_file(
content_path, repo=repo)
logger.debug(,
content_path, commit_datetime)
except IOError:
logger.warning(
,
content_path)
continue
if not newest_datetime or commit_datetime > newest_datetime:
newest_datetime = commit_datetime
logger.debug(, newest_datetime)
logger.debug(, newest_datetime)
if newest_datetime is None:
raise RuntimeError(.format(root_dir))
return newest_datetime | Get the datetime for the most recent commit to a project that
affected certain types of content.
Parameters
----------
extensions : sequence of 'str'
Extensions of files to consider in getting the most recent commit
date. For example, ``('rst', 'svg', 'png')`` are content extensions
for a Sphinx project. **Extension comparision is case sensitive.** add
uppercase variants to match uppercase extensions.
acceptance_callback : callable
Callable function whose sole argument is a file path, and returns
`True` or `False` depending on whether the file's commit date should
be considered or not. This callback is only run on files that are
included by ``extensions``. Thus this callback is a way to exclude
specific files that would otherwise be included by their extension.
root_dir : 'str`, optional
Only content contained within this root directory is considered.
This directory must be, or be contained by, a Git repository. This is
the current working directory by default.
Returns
-------
commit_date : `datetime.datetime`
Datetime of the most recent content commit.
Raises
------
RuntimeError
Raised if no content files are found. |
387,636 | def syllabify(word):
compound = bool(re.search(r, word))
syllabify = _syllabify_compound if compound else _syllabify_simplex
syllabifications = list(syllabify(word))
for word, rules in rank(syllabifications):
word = str(replace_umlauts(word, put_back=True))
rules = rules[1:]
yield word, rules | Syllabify the given word, whether simplex or complex. |
387,637 | def list_vnets(access_token, subscription_id):
endpoint = .join([get_rm_endpoint(),
, subscription_id,
,
, NETWORK_API])
return do_get(endpoint, access_token) | List the VNETs in a subscription .
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body of VNets list with properties. |
387,638 | def get_hex_color(layer_type):
COLORS = [, , , , ,
, , , , ,
, , , , ,
, , , , ]
hashed = int(hash(layer_type)) % 5
if "conv" in layer_type.lower():
return COLORS[:5][hashed]
if layer_type in lasagne.layers.pool.__all__:
return COLORS[5:10][hashed]
if layer_type in lasagne.layers.recurrent.__all__:
return COLORS[10:15][hashed]
else:
return COLORS[15:20][hashed] | Determines the hex color for a layer.
:parameters:
- layer_type : string
Class name of the layer
:returns:
- color : string containing a hex color for filling block. |
387,639 | def _get_fuzzy_tc_matches(text, full_text, options):
print("text: {}, full: {}, options: {}".format(text, full_text, options))
matching_options = _get_fuzzy_matches(full_text, options)
input_tokens = full_text.split()
initial_tokens = input_tokens.remove(text)
final_matches = []
for option in options:
option_tokens = option.split()
matches = [t for t in option_tokens if text in t]
input_tokens_which_match = [t for t in input_tokens for m in matches if t in m]
for token in input_tokens_which_match:
token_matches = [t for t in option_tokens if token in t]
if len(token_matches) == 1:
match = token_matches[0]
if match in matches:
matches.remove(match)
if match not in final_matches:
final_matches.append(match)
return final_matches | Get the options that match the full text, then from each option
return only the individual words which have not yet been matched
which also match the text being tab-completed. |
387,640 | def remove(self):
try:
self.phase = PHASE.REMOVE
self.logger.info("Removing environment %s..." % self.namespace)
self.instantiate_features()
self._specialize()
for feature in self.features.run_order:
try:
self.run_action(feature, )
except FormulaException:
pass
self.clear_all()
self.directory.remove()
self.injections.commit()
if self.error_occured:
self.logger.error(warning_template)
self.logger.error(REMOVE_WARNING)
except Exception:
self.logger.debug("", exc_info=sys.exc_info())
et, ei, tb = sys.exc_info()
reraise(et, ei, tb) | remove the environment |
387,641 | def tai(self, year=None, month=1, day=1, hour=0, minute=0, second=0.0,
jd=None):
if jd is not None:
tai = jd
else:
tai = julian_date(
_to_array(year), _to_array(month), _to_array(day),
_to_array(hour), _to_array(minute), _to_array(second),
)
return self.tai_jd(tai) | Build a `Time` from a TAI calendar date.
Supply the International Atomic Time (TAI) as a proleptic
Gregorian calendar date:
>>> t = ts.tai(2014, 1, 18, 1, 35, 37.5)
>>> t.tai
2456675.56640625
>>> t.tai_calendar()
(2014, 1, 18, 1, 35, 37.5) |
387,642 | def read_composite_array(fname, sep=):
r
with open(fname) as f:
header = next(f)
if header.startswith():
attrs = dict(parse_comment(header[1:]))
header = next(f)
else:
attrs = {}
transheader = htranslator.read(header.split(sep))
fields, dtype = parse_header(transheader)
ts_pairs = []
for name in fields:
dt = dtype.fields[name][0]
ts_pairs.append((dt.subdtype[0].type if dt.subdtype else dt.type,
dt.shape))
col_ids = list(range(1, len(ts_pairs) + 1))
num_columns = len(col_ids)
records = []
col, col_id = , 0
for i, line in enumerate(f, 2):
row = line.split(sep)
if len(row) != num_columns:
raise InvalidFile(
%
(num_columns, len(row), fname, i))
try:
record = []
for (ntype, shape), col, col_id in zip(ts_pairs, row, col_ids):
record.append(_cast(col, ntype, shape, i, fname))
records.append(tuple(record))
except Exception as e:
raise InvalidFile(
% (col, fname, i, col_id,
(ntype.__name__,) + shape, e))
return ArrayWrapper(numpy.array(records, dtype), attrs) | r"""
Convert a CSV file with header into an ArrayWrapper object.
>>> from openquake.baselib.general import gettemp
>>> fname = gettemp('PGA:3,PGV:2,avg:1\n'
... '.1 .2 .3,.4 .5,.6\n')
>>> print(read_composite_array(fname).array) # array of shape (1,)
[([0.1, 0.2, 0.3], [0.4, 0.5], [0.6])] |
387,643 | def add_btn_ok(self,label_ok):
self.wbtn_ok = button.Button("btn_ok",self,self.window,self.peng,
pos=lambda sw,sh, bw,bh: (sw/2-bw/2,sh/2-bh/2-bh*2),
size=[0,0],
label=label_ok,
borderstyle=self.borderstyle
)
self.wbtn_ok.size = lambda sw,sh: (self.wbtn_ok._label.font_size*8,self.wbtn_ok._label.font_size*2)
self.addWidget(self.wbtn_ok)
def f():
self.doAction("click_ok")
self.exitDialog()
self.wbtn_ok.addAction("click",f) | Adds an OK button to allow the user to exit the dialog.
This widget can be triggered by setting the label ``label_ok`` to a string.
This widget will be mostly centered on the screen, but below the main label
by the double of its height. |
387,644 | def convert_csv_with_dialog_paths(csv_file):
def convert_line_to_path(line):
file, dir = map(lambda x: x.strip(), line.split(","))
return os.path.join(dir, file)
return map(convert_line_to_path, csv_file) | Converts CSV file with comma separated paths to filesystem paths.
:param csv_file:
:return: |
387,645 | def _parse_scram_response(response):
return dict(item.split(b"=", 1) for item in response.split(b",")) | Split a scram response into key, value pairs. |
387,646 | def _format_msg(text, width, indent=0, prefix=""):
r
text = repr(text).replace("`", "\\`").replace("\\n", " ``\\n`` ")
sindent = " " * indent if not prefix else prefix
wrapped_text = textwrap.wrap(text, width, subsequent_indent=sindent)
return ("\n".join(wrapped_text))[1:-1].rstrip() | r"""
Format exception message.
Replace newline characters \n with ``\n``, ` with \` and then wrap text as
needed |
387,647 | def insert(self, fields, typecast=False):
return self._post(self.url_table, json_data={"fields": fields, "typecast": typecast}) | Inserts a record
>>> record = {'Name': 'John'}
>>> airtable.insert(record)
Args:
fields(``dict``): Fields to insert.
Must be dictionary with Column names as Key.
typecast(``boolean``): Automatic data conversion from string values.
Returns:
record (``dict``): Inserted record |
387,648 | def from_args(cls, args, project_profile_name=None):
cli_vars = parse_cli_vars(getattr(args, , ))
threads_override = getattr(args, , None)
target_override = getattr(args, , None)
raw_profiles = read_profile(args.profiles_dir)
profile_name = cls.pick_profile_name(args.profile,
project_profile_name)
return cls.from_raw_profiles(
raw_profiles=raw_profiles,
profile_name=profile_name,
cli_vars=cli_vars,
target_override=target_override,
threads_override=threads_override
) | Given the raw profiles as read from disk and the name of the desired
profile if specified, return the profile component of the runtime
config.
:param args argparse.Namespace: The arguments as parsed from the cli.
:param project_profile_name Optional[str]: The profile name, if
specified in a project.
:raises DbtProjectError: If there is no profile name specified in the
project or the command line arguments, or if the specified profile
is not found
:raises DbtProfileError: If the profile is invalid or missing, or the
target could not be found.
:returns Profile: The new Profile object. |
387,649 | def start(self):
if self._running:
raise RuntimeError(.format(
self._owner.__class__.__name__))
self._running = True
self.queue_command(self._owner.start_event)
while self._incoming:
self.queue_command(self._incoming.popleft()) | Start the component's event loop (thread-safe).
After the event loop is started the Qt thread calls the
component's :py:meth:`~Component.start_event` method, then calls
its :py:meth:`~Component.new_frame_event` and
:py:meth:`~Component.new_config_event` methods as required until
:py:meth:`~Component.stop` is called. Finally the component's
:py:meth:`~Component.stop_event` method is called before the
event loop terminates. |
387,650 | def format_command(
command_args,
command_output,
):
text = .format(command_args)
if not command_output:
text +=
elif logger.getEffectiveLevel() > logging.DEBUG:
text +=
else:
if not command_output.endswith():
command_output +=
text += (
).format(command_output)
return text | Format command information for logging. |
387,651 | def subset(args):
p = OptionParser(subset.__doc__)
p.add_option("--qchrs", default=None,
help="query chrs to extract, comma sep [default: %default]")
p.add_option("--schrs", default=None,
help="subject chrs to extract, comma sep [default: %default]")
p.add_option("--convert", default=False, action="store_true",
help="convert accns to chr_rank [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
blastfile, qbedfile, sbedfile = args
qchrs = opts.qchrs
schrs = opts.schrs
assert qchrs or schrs, p.print_help()
convert = opts.convert
outfile = blastfile + "."
if qchrs:
outfile += qchrs + "."
qchrs = set(qchrs.split(","))
else:
qchrs = set(Bed(qbedfile).seqids)
if schrs:
schrs = set(schrs.split(","))
if qbedfile != sbedfile or qchrs != schrs:
outfile += ",".join(schrs) + "."
else:
schrs = set(Bed(sbedfile).seqids)
outfile += "blast"
qo = Bed(qbedfile).order
so = Bed(sbedfile).order
fw = must_open(outfile, "w")
for b in Blast(blastfile):
q, s = b.query, b.subject
if qo[q][1].seqid in qchrs and so[s][1].seqid in schrs:
if convert:
b.query = qo[q][1].seqid + "_" + "{0:05d}".format(qo[q][0])
b.subject = so[s][1].seqid + "_" + "{0:05d}".format(so[s][0])
print(b, file=fw)
fw.close()
logging.debug("Subset blastfile written to `{0}`".format(outfile)) | %prog subset blastfile qbedfile sbedfile
Extract blast hits between given query and subject chrs.
If --qchrs or --schrs is not given, then all chrs from q/s genome will
be included. However one of --qchrs and --schrs must be specified.
Otherwise the script will do nothing. |
387,652 | def write(self, chunk, offset):
return lib.zfile_write(self._as_parameter_, chunk, offset) | Write chunk to file at specified position
Return 0 if OK, else -1 |
387,653 | def pywt_wavelet(wavelet):
if isinstance(wavelet, pywt.Wavelet):
return wavelet
else:
return pywt.Wavelet(wavelet) | Convert ``wavelet`` to a `pywt.Wavelet` instance. |
387,654 | def get_stream_url(self, session_id, stream_id=None):
url = self.api_url + + self.api_key + + session_id +
if stream_id:
url = url + + stream_id
return url | this method returns the url to get streams information |
387,655 | def insert(self, index, p_object):
validated_value = self.get_validated_object(p_object)
if validated_value is not None:
self.__modified_data__.insert(index, validated_value) | Insert an element to a list |
387,656 | def _clone(self, cid):
try:
iid = self.client.commit(
container=cid,
conf={
: {
:
}
}
)[]
except docker.errors.APIError as ex:
raise MountError(str(ex))
self.tmp_image = iid
return self._create_temp_container(iid) | Create a temporary image snapshot from a given cid.
Temporary image snapshots are marked with a sentinel label
so that they can be cleaned on unmount. |
387,657 | def wrap_rtx(packet, payload_type, sequence_number, ssrc):
rtx = RtpPacket(
payload_type=payload_type,
marker=packet.marker,
sequence_number=sequence_number,
timestamp=packet.timestamp,
ssrc=ssrc,
payload=pack(, packet.sequence_number) + packet.payload)
rtx.csrc = packet.csrc
rtx.extensions = packet.extensions
return rtx | Create a retransmission packet from a lost packet. |
387,658 | def change_type(self, bucket, key, storage_type):
resource = entry(bucket, key)
return self.__rs_do(, resource, .format(storage_type)) | 修改文件的存储类型
修改文件的存储类型为普通存储或者是低频存储,参考文档:
https://developer.qiniu.com/kodo/api/3710/modify-the-file-type
Args:
bucket: 待操作资源所在空间
key: 待操作资源文件名
storage_type: 待操作资源存储类型,0为普通存储,1为低频存储 |
387,659 | def receive_external(self, http_verb, host, url, http_headers):
if http_verb == :
return self.http.get(host + url, headers=http_headers, stream=True)
else:
raise ValueError("Unsupported http_verb:" + http_verb) | Retrieve a streaming request for a file.
:param http_verb: str GET is only supported right now
:param host: str host we are requesting the file from
:param url: str url to ask the host for
:param http_headers: object headers to send with the request
:return: requests.Response containing the successful result |
387,660 | def cheat(num):
solution = click.style(Problem(num).solution, bold=True)
click.confirm("View answer to problem %i?" % num, abort=True)
click.echo("The answer to problem {} is {}.".format(num, solution)) | View the answer to a problem. |
387,661 | def write_tsv(self, path):
with open(path, ) as ofh:
writer = csv.writer(
ofh, dialect=,
quoting=csv.QUOTE_NONE, lineterminator=os.linesep
)
for gs in self._gene_sets.values():
writer.writerow(gs.to_list()) | Write the database to a tab-delimited text file.
Parameters
----------
path: str
The path name of the file.
Returns
-------
None |
387,662 | def external_commands(self):
res = []
with self.app.external_commands_lock:
for cmd in self.app.get_external_commands():
res.append(cmd.serialize())
return res | Get the external commands from the daemon
Use a lock for this function to protect
:return: serialized external command list
:rtype: str |
387,663 | def linspace2(a, b, n, dtype=None):
a = linspace(a, b, n + 1, dtype=dtype)[:-1]
if len(a) > 1:
diff01 = ((a[1] - a[0]) / 2).astype(a.dtype)
a += diff01
return a | similar to numpy.linspace but excluding the boundaries
this is the normal numpy.linspace:
>>> print linspace(0,1,5)
[ 0. 0.25 0.5 0.75 1. ]
and this gives excludes the boundaries:
>>> print linspace2(0,1,5)
[ 0.1 0.3 0.5 0.7 0.9] |
387,664 | def serialize(
self,
value,
state
):
if self._nested is None:
state.raise_error(InvalidRootProcessor,
.format(self.alias))
if not value and self.required:
state.raise_error(MissingValue, .format(
self.alias))
start_element, end_element = _element_path_create_new(self._nested)
self._serialize(end_element, value, state)
return start_element | Serialize the value into a new Element object and return it. |
387,665 | def clean_axis(axis):
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
for spine in list(axis.spines.values()):
spine.set_visible(False) | Remove ticks, tick labels, and frame from axis |
387,666 | def load_from_stream(self, stream, container, **opts):
root = ET.parse(stream).getroot()
path = anyconfig.utils.get_path_from_stream(stream)
nspaces = _namespaces_from_file(path)
return root_to_container(root, container=container,
nspaces=nspaces, **opts) | :param stream: XML file or file-like object
:param container: callble to make a container object
:param opts: optional keyword parameters to be sanitized
:return: Dict-like object holding config parameters |
387,667 | def square_distance(a: Square, b: Square) -> int:
return max(abs(square_file(a) - square_file(b)), abs(square_rank(a) - square_rank(b))) | Gets the distance (i.e., the number of king steps) from square *a* to *b*. |
387,668 | def script_current_send(self, seq, force_mavlink1=False):
return self.send(self.script_current_encode(seq), force_mavlink1=force_mavlink1) | This message informs about the currently active SCRIPT.
seq : Active Sequence (uint16_t) |
387,669 | def chat_update_message(self, channel, text, timestamp, **params):
method =
if self._channel_is_name(channel):
channel = self.channel_name_to_id(channel)
params.update({
: channel,
: text,
: timestamp,
})
return self._make_request(method, params) | chat.update
This method updates a message.
Required parameters:
`channel`: Channel containing the message to be updated. (e.g: "C1234567890")
`text`: New text for the message, using the default formatting rules. (e.g: "Hello world")
`timestamp`: Timestamp of the message to be updated (e.g: "1405894322.002768")
https://api.slack.com/methods/chat.update |
387,670 | def _as_rdf_xml(self, ns):
self.rdf_identity = self._get_identity(ns)
elements = []
elements.append(ET.Element(NS(, ),
attrib={NS(, ):
self._get_persistent_identitity(ns)}))
if self.name is not None:
name = ET.Element(NS(, ))
name.text = self.name
elements.append(name)
if self.display_id is not None:
display_id = ET.Element(NS(, ))
display_id.text = self.display_id
elements.append(display_id)
if self.version is not None:
version = ET.Element(NS(, ))
version.text = self.version
elements.append(version)
if self.was_derived_from is not None:
elements.append(ET.Element(NS(, ),
attrib={NS(, ): self.was_derived_from}))
if self.description is not None:
description = ET.Element(NS(, ))
description.text = self.description
elements.append(description)
for a in self.annotations:
elements.append(a._as_rdf_xml(ns))
return elements | Return identity details for the element as XML nodes |
387,671 | def dump(self):
result = list(self.output_lines())
if self.locked:
result.append()
if self.choices:
for choice in self.choices:
result.append( + choice)
if self.explanation:
result.append( + self.explanation)
return .join(result) | Serialize a test case to a string. |
387,672 | def password_hash(password):
try:
return bcrypt_sha256.encrypt(password)
except TypeError:
return bcrypt_sha256.encrypt(password.decode()) | Hash the password, using bcrypt+sha256.
.. versionchanged:: 1.1.0
:param str password: Password in plaintext
:return: password hash
:rtype: str |
387,673 | def sill(self):
sill = self.nugget
for v in self.variograms:
sill += v.contribution
return sill | get the sill of the GeoStruct
Return
------
sill : float
the sill of the (nested) GeoStruct, including nugget and contribution
from each variogram |
387,674 | def getByTime(self, startTime=None, endTime=None):
collections = self.get_data_collections()
if startTime is not None:
startTime = float(startTime)
if endTime is not None:
endTime = float(endTime)
if startTime is not None and endTime is not None:
timeQuery = {"$and": [{"timestamps": {"$gt": startTime}},
{"timestamps": {"$lt": endTime}}]}
elif startTime is not None and endTime is None:
timeQuery = {"timestamps": {"$gt": startTime}}
elif startTime is None and endTime is not None:
timeQuery = {"timestamps": {"$lt": endTime}}
IDs = []
for coll in collections:
docs = self.noteDB[coll].find(timeQuery, {"ID": 1, "_id": 0})
for doc in docs:
IDs.append(doc[])
return IDs | :desc: Get all the notes in the given time window
:param int startTime: The begining of the window
:param int endTime: The end of the window
:returns: A list of IDs
:ravl: list |
387,675 | def firsttime(self):
self.config.set(, , )
if self.cli_config.getboolean(, , fallback=False):
print(PRIVACY_STATEMENT)
else:
self.cli_config.set_value(, , ask_user_for_telemetry())
self.update() | sets it as already done |
387,676 | def GetRemainder(self):
ret = libxml2mod.xmlTextReaderGetRemainder(self._o)
if ret is None:raise treeError()
__tmp = inputBuffer(_obj=ret)
return __tmp | Method to get the remainder of the buffered XML. this
method stops the parser, set its state to End Of File and
return the input stream with what is left that the parser
did not use. The implementation is not good, the parser
certainly procgressed past what's left in reader->input,
and there is an allocation problem. Best would be to
rewrite it differently. |
387,677 | def get_ticker(self, symbol=None):
data = {}
tick_path =
if symbol is not None:
tick_path =
data = {
: symbol
}
return self._get(tick_path, False, data=data) | Get symbol tick
https://docs.kucoin.com/#get-ticker
:param symbol: (optional) Name of symbol e.g. KCS-BTC
:type symbol: string
.. code:: python
all_ticks = client.get_ticker()
ticker = client.get_ticker('ETH-BTC')
:returns: ApiResponse
.. code:: python
{
"sequence": "1545825031840", # now sequence
"price": "3494.367783", # last trade price
"size": "0.05027185", # last trade size
"bestBid": "3494.367783", # best bid price
"bestBidSize": "2.60323254", # size at best bid price
"bestAsk": "3499.12", # best ask price
"bestAskSize": "0.01474011" # size at best ask price
}
:raises: KucoinResponseException, KucoinAPIException |
387,678 | def _disconnect(self, mqttc, userdata, rc):
self.connected = False
if rc != 0:
LOGGER.info("MQTT Unexpected disconnection. Trying reconnect.")
try:
self._mqttc.reconnect()
except Exception as ex:
template = "An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
LOGGER.error("MQTT Connection error: " + message)
else:
LOGGER.info("MQTT Graceful disconnection.") | The callback for when a DISCONNECT occurs.
:param mqttc: The client instance for this callback
:param userdata: The private userdata for the mqtt client. Not used in Polyglot
:param rc: Result code of connection, 0 = Graceful, anything else is unclean |
387,679 | def get_requests(self):
requests = self.derived_get_requests()
for request in requests:
request.url = URLHelper.remove_hash(request.url)
return requests | Get all the new requests that were found in the response.
Returns:
list(:class:`nyawc.http.Request`): A list of new requests that were found. |
387,680 | def build_request(self, input_data=None, *args, **kwargs):
if input_data is not None:
self.input_data = input_data
if self.input_data is None:
raise ValueError()
if self.uo is None:
raise ValueError()
self.request = RequestHolder()
self.request.nonce = get_random_vector(EBConsts.FRESHNESS_NONCE_LEN)
self.request.api_object = EBUtils.build_api_object(self.uo)
self.request.endpoint = self.uo.resolve_endpoint()
self.request.configuration = self.configuration
self.request.api_method = EBConsts.REQUEST_PROCESS_DATA
plain_buffer = \
to_bytes(31, 1) + to_bytes(self.uo.uo_id, 4) + \
to_bytes(self.request.nonce, EBConsts.FRESHNESS_NONCE_LEN) + to_bytes(self.input_data)
plain_buffer = PKCS7.pad(plain_buffer)
ciphertext = aes_enc(self.uo.enc_key, plain_buffer)
mac = cbc_mac(self.uo.mac_key, ciphertext)
self.request.body = {
"data": "Packet0_%s_0000%s" % (EBUtils.get_request_type(self.uo), to_hex(ciphertext + mac))}
return self.request | Builds request
:param input_data:
:param args:
:param kwargs:
:return: |
387,681 | def chunks(iterable, size):
it = iter(iterable)
item = list(islice(it, size))
while item:
yield item
item = list(islice(it, size)) | Splits a very large list into evenly sized chunks.
Returns an iterator of lists that are no more than the size passed in. |
387,682 | def spectrum(self, function=, lmax=None, unit=, base=10.):
if function.lower() not in (, , , ):
raise ValueError(
"function must be of type , , , or "
". Provided value was {:s}".format(repr(function))
)
s = _spectrum(self.coeffs, normalization=self.normalization,
convention=, unit=unit, base=base, lmax=lmax)
if self.errors is not None:
es = _spectrum(self.errors, normalization=self.normalization,
convention=, unit=unit, base=base, lmax=lmax)
if function.lower() == :
s *= (self.gm / self.r0)**2
if self.errors is not None:
es *= (self.gm / self.r0)**2
elif function.lower() == :
s *= self.r0**2
if self.errors is not None:
es *= self.r0**2
elif function.lower() == :
degrees = _np.arange(len(s))
s *= (self.gm * (degrees + 1) / self.r0**2)**2
if self.errors is not None:
es *= (self.gm * (degrees + 1) / self.r0**2)**2
elif function.lower() == :
degrees = _np.arange(len(s))
s *= (self.gm / self.r0**2)**2 * (degrees + 1) * (2 * degrees + 1)
if self.errors is not None:
es *= (self.gm / self.r0**2)**2 * (degrees + 1) * \
(2 * degrees + 1)
if self.errors is not None:
return s, es
else:
return s | Return the spectrum as a function of spherical harmonic degree.
Usage
-----
spectrum, [error_spectrum] = x.spectrum([function, lmax, unit, base])
Returns
-------
spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the spectrum, where lmax is the maximum
spherical harmonic degree.
error_spectrum : ndarray, shape (lmax+1)
1-D numpy ndarray of the error_spectrum (if the attribute errors
is not None).
Parameters
----------
function : str, optional, default = 'geoid'
The type of power spectrum to return: 'potential' for the
gravitational potential in m2/s2, 'geoid' for the geoid in m,
'radial' for the radial gravity in m/s2, or 'total' for the total
gravitational field in m/s2.
lmax : int, optional, default = x.lmax
Maximum spherical harmonic degree of the spectrum to return.
unit : str, optional, default = 'per_l'
If 'per_l', return the total contribution to the spectrum for each
spherical harmonic degree l. If 'per_lm', return the average
contribution to the spectrum for each coefficient at spherical
harmonic degree l. If 'per_dlogl', return the spectrum per log
interval dlog_a(l).
base : float, optional, default = 10.
The logarithm base when calculating the 'per_dlogl' spectrum.
Description
-----------
This method returns the power spectrum of the class instance, where the
type of function is defined by the function parameter: 'potential' for
the gravitational potential, 'geoid' for the geoid, 'radial' for
the radial gravity, or 'total' for the total gravitational field. In
all cases, the total power of the function is defined as the integral
of the function squared over all space, divided by the area the
function spans. If the mean of the function is zero, this is equivalent
to the variance of the function.
The output spectrum can be expresed using one of three units. 'per_l'
returns the contribution to the total spectrum from all angular orders
at degree l. 'per_lm' returns the average contribution to the total
spectrum from a single coefficient at degree l, which is equal to the
'per_l' spectrum divided by (2l+1). 'per_dlogl' returns the
contribution to the total spectrum from all angular orders over an
infinitessimal logarithmic degree band. The contrubution in the band
dlog_a(l) is spectrum(l, 'per_dlogl')*dlog_a(l), where a is the base,
and where spectrum(l, 'per_dlogl) is equal to
spectrum(l, 'per_l')*l*log(a). |
387,683 | def _clean_up_columns(
self):
self.log.debug()
sqlQueries = [
"update tcs_helper_catalogue_tables_info set old_table_name = table_name where old_table_name is null;",
"update tcs_helper_catalogue_tables_info set version_number = where table_name like and version_number is null;",
,
,
"update tcs_helper_catalogue_views_info set old_view_name = view_name where old_view_name is null;",
]
for sqlQuery in sqlQueries:
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
object_type = view_name.replace("tcs_view_", "").split("_")[0]
sqlQuery = u % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
sqlQuery = u % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
for row in rows:
view_name = row["view_name"]
table_name = view_name.replace("tcs_view_", "").split("_")[1:]
table_name = ("_").join(table_name)
table_name = "tcs_cat_%(table_name)s" % locals()
sqlQuery = u % locals()
writequery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
)
self.log.debug()
return None | clean up columns
.. todo ::
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring |
387,684 | def lpc(blk, order=None):
if order < 100:
return lpc.nautocor(blk, order)
try:
return lpc.kautocor(blk, order)
except ParCorError:
return lpc.nautocor(blk, order) | Find the Linear Predictive Coding (LPC) coefficients as a ZFilter object,
the analysis whitening filter. This implementation uses the autocorrelation
method, using the Levinson-Durbin algorithm or Numpy pseudo-inverse for
linear system solving, when needed.
Parameters
----------
blk :
An iterable with well-defined length. Don't use this function with Stream
objects!
order :
The order of the resulting ZFilter object. Defaults to ``len(blk) - 1``.
Returns
-------
A FIR filter, as a ZFilter object. The mean squared error over the given
block is in its "error" attribute.
Hint
----
See ``lpc.kautocor`` example, which should apply equally for this strategy.
See Also
--------
levinson_durbin :
Levinson-Durbin algorithm for solving Yule-Walker equations (Toeplitz
matrix linear system).
lpc.nautocor:
LPC coefficients from linear system solved with Numpy pseudo-inverse.
lpc.kautocor:
LPC coefficients obtained with Levinson-Durbin algorithm. |
387,685 | def upgradeShare1to2(oldShare):
"Upgrader from Share version 1 to version 2."
sharedInterfaces = []
attrs = set(oldShare.sharedAttributeNames.split(u))
for iface in implementedBy(oldShare.sharedItem.__class__):
if set(iface) == attrs or attrs == set():
sharedInterfaces.append(iface)
newShare = oldShare.upgradeVersion(, 1, 2,
shareID=oldShare.shareID,
sharedItem=oldShare.sharedItem,
sharedTo=oldShare.sharedTo,
sharedInterfaces=sharedInterfaces)
return newShare | Upgrader from Share version 1 to version 2. |
387,686 | def setObsoletedBy(self, pid, obsoletedByPid, serialVersion, vendorSpecific=None):
response = self.setObsoletedByResponse(
pid, obsoletedByPid, serialVersion, vendorSpecific
)
return self._read_boolean_response(response) | See Also: setObsoletedByResponse()
Args:
pid:
obsoletedByPid:
serialVersion:
vendorSpecific:
Returns: |
387,687 | def set_roots(self, uproot_with=None):
self.treedir = os.environ.get(, None) if not uproot_with else uproot_with
if not self.treedir:
treefilepath = os.path.dirname(os.path.abspath(__file__))
if in treefilepath:
self.treedir = treefilepath.rsplit(, 2)[0]
else:
self.treedir = treefilepath
self.treedir = treefilepath
os.environ[] = self.treedir
if in os.environ:
self.sasbasedir = os.environ["SAS_BASE_DIR"]
else:
self.sasbasedir = os.path.expanduser()
if not os.path.isdir(self.sasbasedir):
os.makedirs(self.sasbasedir) | Set the roots of the tree in the os environment
Parameters:
uproot_with (str):
A new TREE_DIR path used to override an existing TREE_DIR environment variable |
387,688 | def write(self, args):
ShellProgressView.done = False
message = args.get(, )
percent = args.get(, None)
if percent:
ShellProgressView.progress_bar = _format_value(message, percent)
if int(percent) == 1:
ShellProgressView.progress_bar = None
ShellProgressView.progress = message | writes the progres |
387,689 | def like_shared_file(self, sharekey=None):
if not sharekey:
raise Exception(
"You must specify a sharekey of the file you"
"want to .")
endpoint = .format(sharekey=sharekey)
data = self._make_request("POST", endpoint=endpoint, data=None)
try:
sf = SharedFile.NewFromJSON(data)
sf.liked = True
return sf
except:
raise Exception("{0}".format(data[])) | 'Like' a SharedFile. mlkshk doesn't allow you to unlike a
sharedfile, so this is ~~permanent~~.
Args:
sharekey (str): Sharekey for the file you want to 'like'.
Returns:
Either a SharedFile on success, or an exception on error. |
387,690 | def complete_info(self, text, line, begidx, endidx):
opts = self.INFO_OPTS
if not text:
completions = opts
else:
completions = [f
for f in opts
if f.startswith(text)
]
return completions | completion for info command |
387,691 | def dlabfs(handle):
handle = ctypes.c_int(handle)
descr = stypes.SpiceDLADescr()
found = ctypes.c_int()
libspice.dlabfs_c(handle, ctypes.byref(descr), ctypes.byref(found))
return descr, bool(found.value) | Begin a forward segment search in a DLA file.
https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dlabfs_c.html
:param handle: Handle of open DLA file.
:type handle: int
:return: Descriptor of next segment in DLA file
:rtype: spiceypy.utils.support_types.SpiceDLADescr |
387,692 | def extended_analog(self, pin, data):
task = asyncio.ensure_future(self.core.extended_analog(pin, data))
self.loop.run_until_complete(task) | This method will send an extended-data analog write command
to the selected pin..
:param pin: 0 - 127
:param data: 0 - 0-0x4000 (14 bits)
:returns: No return value |
387,693 | def serialize_formula(formula):
rPd(NH3)4+3H12N4Pd+3
charge = charge_from_formula(formula)
element_dict = nested_formula_parser(formula)
base = atoms_to_Hill(element_dict)
if charge == 0:
pass
elif charge > 0:
if charge == 1:
base +=
else:
base += + str(charge)
elif charge < 0:
if charge == -1:
base +=
else:
base += str(charge)
return base | r'''Basic formula serializer to construct a consistently-formatted formula.
This is necessary for handling user-supplied formulas, which are not always
well formatted.
Performs no sanity checking that elements are actually elements.
Parameters
----------
formula : str
Formula string as parseable by the method nested_formula_parser, [-]
Returns
-------
formula : str
A consistently formatted formula to describe a molecular formula, [-]
Notes
-----
Examples
--------
>>> serialize_formula('Pd(NH3)4+3')
'H12N4Pd+3' |
387,694 | def optional(validator):
if isinstance(validator, list):
return _OptionalValidator(_AndValidator(validator))
return _OptionalValidator(validator) | A validator that makes an attribute optional. An optional attribute is one
which can be set to ``None`` in addition to satisfying the requirements of
the sub-validator.
:param validator: A validator (or a list of validators) that is used for
non-``None`` values.
:type validator: callable or :class:`list` of callables.
.. versionadded:: 15.1.0
.. versionchanged:: 17.1.0 *validator* can be a list of validators. |
387,695 | def equivalent(kls, first, second):
if first.empty() and second.empty():
return True
elif first.vertices.shape[0] != second.vertices.shape[0]:
return False
elif first.edges.shape[0] != second.edges.shape[0]:
return False
EPSILON = 1e-7
vertex1, inv1 = np.unique(first.vertices, axis=0, return_inverse=True)
vertex2, inv2 = np.unique(second.vertices, axis=0, return_inverse=True)
vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)
if not vertex_match:
return False
remapping = {}
for i in range(len(inv1)):
remapping[inv1[i]] = inv2[i]
remap = np.vectorize(lambda idx: remapping[idx])
edges1 = np.sort(np.unique(first.edges, axis=0), axis=1)
edges1 = edges1[np.lexsort(edges1[:,::-1].T)]
edges2 = remap(second.edges)
edges2 = np.sort(np.unique(edges2, axis=0), axis=1)
edges2 = edges2[np.lexsort(edges2[:,::-1].T)]
edges_match = np.all(edges1 == edges2)
if not edges_match:
return False
second_verts = {}
for i, vert in enumerate(second.vertices):
second_verts[tuple(vert)] = i
for i in range(len(first.radii)):
i2 = second_verts[tuple(first.vertices[i])]
if first.radii[i] != second.radii[i2]:
return False
if first.vertex_types[i] != second.vertex_types[i2]:
return False
return True | Tests that two skeletons are the same in form not merely that
their array contents are exactly the same. This test can be
made more sophisticated. |
387,696 | def check_var_units(self, ds):
results = []
for variable in self.get_applicable_variables(ds):
msgs = []
unit_check = hasattr(ds.variables[variable], )
no_dim_check = (getattr(ds.variables[variable], ) == tuple())
if no_dim_check:
continue
if not unit_check:
msgs.append("units")
results.append(Result(BaseCheck.HIGH, unit_check, self._var_header.format(variable), msgs))
return results | Checks each applicable variable for the units attribute
:param netCDF4.Dataset ds: An open netCDF dataset |
387,697 | def get_histogram_bins(min_, max_, std, count):
width = _get_bin_width(std, count)
count = int(round((max_ - min_) / width) + 1)
if count:
bins = [i * width + min_ for i in xrange(1, count + 1)]
else:
bins = [min_]
return bins | Return optimal bins given the input parameters |
387,698 | def node_validate(node_dict, node_num, cmd_name):
req_lu = {"run": ["stopped", "Already Running"],
"stop": ["running", "Already Stopped"],
"connect": ["running", "Can't Connect, Node Not Running"],
"details": [node_dict[node_num].state, ""]}
tm = {True: ("Node {1}{2}{0} ({5}{3}{0} on {1}{4}{0})".
format(C_NORM, C_WARN, node_num,
node_dict[node_num].name,
node_dict[node_num].cloud_disp, C_TI)),
False: req_lu[cmd_name][1]}
node_valid = bool(req_lu[cmd_name][0] == node_dict[node_num].state)
node_info = tm[node_valid]
return node_valid, node_info | Validate that command can be performed on target node. |
387,699 | def NRMSE_sliding(data, pred, windowSize):
halfWindowSize = int(round(float(windowSize)/2))
window_center = range(halfWindowSize, len(data)-halfWindowSize, int(round(float(halfWindowSize)/5.0)))
nrmse = []
for wc in window_center:
nrmse.append(NRMSE(data[wc-halfWindowSize:wc+halfWindowSize],
pred[wc-halfWindowSize:wc+halfWindowSize]))
return (window_center, nrmse) | Computing NRMSE in a sliding window
:param data:
:param pred:
:param windowSize:
:return: (window_center, NRMSE) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.