Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
28,100 |
def set_section_order(self, section_name_list):
self.section_headings = section_name_list[:]
for section_name in self.sections.keys():
if section_name not in section_name_list:
self.section_headings.append(section_name)
return
|
Set the order of the sections, which are by default unorderd.
Any unlisted sections that exist will be placed at the end of the
document in no particular order.
|
28,101 |
def _parse_binary(v, header_d):
v = nullify(v)
if v is None:
return None
if six.PY2:
try:
return six.binary_type(v).strip()
except UnicodeEncodeError:
return six.text_type(v).strip()
else:
try:
return six.binary_type(v, ).strip()
except UnicodeEncodeError:
return six.text_type(v).strip()
|
Parses binary string.
Note:
<str> for py2 and <binary> for py3.
|
28,102 |
def copy(self):
return self.__class__(self._data.copy(), self._sensitive.copy(),
self._cwd)
|
Retrieve a copy of the Environment. Note that this is a shallow
copy.
|
28,103 |
def delete(self, key, **opts):
key, store = self._expand_opts(key, opts)
try:
del store[key]
except KeyError:
pass
|
Remove a key from the cache.
|
28,104 |
def ordered(self):
active, inactive = self.active_inactive
order = active + inactive
return UnitCell(self.matrix[:,order], self.active[order])
|
An equivalent unit cell with the active cell vectors coming first
|
28,105 |
def rebuildtable(cls):
cls._closure_model.objects.all().delete()
cls._closure_model.objects.bulk_create([cls._closure_model(
parent_id=x[],
child_id=x[],
depth=0
) for x in cls.objects.values("pk")])
for node in cls.objects.all():
node._closure_createlink()
|
Regenerate the entire closuretree.
|
28,106 |
def get_new_links(self, url, resp):
links_on_page = resp.xpath()
links = [utils.clean_url(u, url) for u in links_on_page]
links = [x for x in links if utils.check_protocol(x)]
if not self.args[]:
domain = utils.get_domain(url)
links = [x for x in links if utils.get_domain(x) == domain]
if self.args[]:
links = utils.re_filter(links, self.args[])
return links
|
Get new links from a URL and filter them.
|
28,107 |
def import_csv(filepath: str, currency: str):
logger.debug(f"currency = {currency}")
currency = currency.upper()
app = PriceDbApplication()
app.logger = logger
app.import_prices(filepath, currency)
|
Import prices from CSV file
|
28,108 |
def unregister_widget(self, widget_cls):
if widget_cls.__name__ in self.widgets:
del self.widgets[widget_cls().get_name()]
|
Unregisters the given widget.
|
28,109 |
def vert_dpi(self):
pHYs = self._chunks.pHYs
if pHYs is None:
return 72
return self._dpi(pHYs.units_specifier, pHYs.vert_px_per_unit)
|
Integer dots per inch for the height of this image. Defaults to 72
when not present in the file, as is often the case.
|
28,110 |
def terminate(self, nowait=False):
logger.debug("Acquiring lock for service termination")
with self.lock:
logger.debug("Terminating service")
if not self.listener:
logger.warning("Service already stopped.")
return
self.listener.stop(nowait)
try:
if not nowait:
self._post_log_batch()
except Exception:
if self.error_handler:
self.error_handler(sys.exc_info())
else:
raise
finally:
self.queue = None
self.listener = None
|
Finalize and stop service
Args:
nowait: set to True to terminate immediately and skip processing
messages still in the queue
|
28,111 |
def _require(*names):
from IPython.parallel.error import UnmetDependency
user_ns = globals()
for name in names:
if name in user_ns:
continue
try:
exec %name in user_ns
except ImportError:
raise UnmetDependency(name)
return True
|
Helper for @require decorator.
|
28,112 |
def Froude_densimetric(V, L, rho1, rho2, heavy=True, g=g):
r
if heavy:
rho3 = rho1
else:
rho3 = rho2
return V/((g*L)**0.5)*(rho3/(rho1 - rho2))**0.5
|
r'''Calculates the densimetric Froude number :math:`Fr_{den}` for velocity
`V` geometric length `L`, heavier fluid density `rho1`, and lighter fluid
density `rho2`. If desired, gravity can be specified as well. Depending on
the application, this dimensionless number may be defined with the heavy
phase or the light phase density in the numerator of the square root.
For some applications, both need to be calculated. The default is to
calculate with the heavy liquid ensity on top; set `heavy` to False
to reverse this.
.. math::
Fr = \frac{V}{\sqrt{gL}} \sqrt{\frac{\rho_\text{(1 or 2)}}
{\rho_1 - \rho_2}}
Parameters
----------
V : float
Velocity of the specified phase, [m/s]
L : float
Characteristic length, no typical definition [m]
rho1 : float
Density of the heavier phase, [kg/m^3]
rho2 : float
Density of the lighter phase, [kg/m^3]
heavy : bool, optional
Whether or not the density used in the numerator is the heavy phase or
the light phase, [-]
g : float, optional
Acceleration due to gravity, [m/s^2]
Returns
-------
Fr_den : float
Densimetric Froude number, [-]
Notes
-----
Many alternate definitions including density ratios have been used.
.. math::
Fr = \frac{\text{Inertial Force}}{\text{Gravity Force}}
Where the gravity force is reduced by the relative densities of one fluid
in another.
Note that an Exception will be raised if rho1 > rho2, as the square root
becomes negative.
Examples
--------
>>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81)
0.4134543386272418
>>> Froude_densimetric(1.83, L=2., rho1=800, rho2=1.2, g=9.81, heavy=False)
0.016013017679205096
References
----------
.. [1] Hall, A, G Stobie, and R Steven. "Further Evaluation of the
Performance of Horizontally Installed Orifice Plate and Cone
Differential Pressure Meters with Wet Gas Flows." In International
SouthEast Asia Hydrocarbon Flow Measurement Workshop, KualaLumpur,
Malaysia, 2008.
|
28,113 |
def points_possible(self, include_hidden=False):
return sum([test_case.points for testable in self.testables
for test_case in testable.test_cases
if include_hidden or not testable.is_hidden])
|
Return the total points possible for this project.
|
28,114 |
def fix_contig_names(asseembly_path):
fixed_assembly = "fixed_assembly.fa"
with open(asseembly_path) as in_hf, open(fixed_assembly, "w") as ou_fh:
for line in in_hf:
if line.startswith(">"):
fixed_line = line.replace(" ", "_")
ou_fh.write(fixed_line)
else:
ou_fh.write(line)
return fixed_assembly
|
Removes whitespace from the assembly contig names
Parameters
----------
asseembly_path : path to assembly file
Returns
-------
str:
Path to new assembly file with fixed contig names
|
28,115 |
def run(self, ):
ra = SceneReleaseActions()
mayawin = maya_main_window()
self.rw = ReleaseWin(FILETYPES["mayamainscene"], parent=mayawin)
self.rw.set_release_actions(ra)
pm = MayaPluginManager.get()
genesis = pm.get_plugin("MayaGenesis")
c = genesis.get_config()
try:
f = models.TaskFile.objects.get(pk=c[])
except models.TaskFile.DoesNotExist:
pass
else:
if f.releasetype == :
self.rw.browser.set_selection(f)
self.rw.show()
|
Start the configeditor
:returns: None
:rtype: None
:raises: None
|
28,116 |
def aggregate_hazard_summary(impact, aggregate_hazard):
source_fields = impact.keywords[]
target_fields = aggregate_hazard.keywords[]
target_compulsory_fields = [
aggregation_id_field,
aggregation_name_field,
hazard_id_field,
hazard_class_field
]
check_inputs(target_compulsory_fields, target_fields)
source_compulsory_fields = [
exposure_id_field,
exposure_class_field,
aggregation_id_field,
aggregation_name_field,
hazard_id_field,
hazard_class_field
]
check_inputs(source_compulsory_fields, source_fields)
aggregation_id = target_fields[aggregation_id_field[]]
hazard_id = target_fields[hazard_id_field[]]
hazard_class = target_fields[hazard_class_field[]]
exposure_class = source_fields[exposure_class_field[]]
exposure_class_index = impact.fields().lookupField(exposure_class)
unique_exposure = list(impact.uniqueValues(exposure_class_index))
fields = [, ]
absolute_values = create_absolute_values_structure(impact, fields)
field_index = report_on_field(impact)
aggregate_hazard.startEditing()
shift = aggregate_hazard.fields().count()
dynamic_structure = [
[exposure_count_field, unique_exposure],
]
add_fields(
aggregate_hazard,
absolute_values,
[affected_field, total_field],
dynamic_structure,
)
flat_table = FlatTable(, , )
request = QgsFeatureRequest()
request.setFlags(QgsFeatureRequest.NoGeometry)
LOGGER.debug()
for feature in impact.getFeatures(request):
if field_index is not None:
value = feature[field_index]
else:
value = 1
aggregation_value = feature[aggregation_id]
hazard_value = feature[hazard_id]
if (hazard_value is None
or hazard_value ==
or (hasattr(hazard_value, )
and hazard_value.isNull())):
hazard_value = not_exposed_class[]
exposure_value = feature[exposure_class]
if (exposure_value is None
or exposure_value ==
or (hasattr(exposure_value, )
and exposure_value.isNull())):
exposure_value =
flat_table.add_value(
value,
aggregation_id=aggregation_value,
hazard_id=hazard_value,
exposure_class=exposure_value
)
for field, field_definition in list(absolute_values.items()):
value = feature[field]
if (value ==
or value is None
or (hasattr(value, )
and value.isNull())):
value = 0
field_definition[0].add_value(
value,
aggregation_id=aggregation_value,
hazard_id=hazard_value
)
hazard_keywords = aggregate_hazard.keywords[]
hazard = hazard_keywords[]
classification = hazard_keywords[]
exposure_keywords = impact.keywords[]
exposure = exposure_keywords[]
for area in aggregate_hazard.getFeatures(request):
aggregation_value = area[aggregation_id]
feature_hazard_id = area[hazard_id]
if (feature_hazard_id ==
or feature_hazard_id is None
or (hasattr(feature_hazard_id, )
and feature_hazard_id.isNull())):
feature_hazard_id = not_exposed_class[]
feature_hazard_value = area[hazard_class]
total = 0
for i, val in enumerate(unique_exposure):
sum = flat_table.get_value(
aggregation_id=aggregation_value,
hazard_id=feature_hazard_id,
exposure_class=val
)
total += sum
aggregate_hazard.changeAttributeValue(area.id(), shift + i, sum)
affected = post_processor_affected_function(
exposure=exposure,
hazard=hazard,
classification=classification,
hazard_class=feature_hazard_value)
affected = tr(str(affected))
aggregate_hazard.changeAttributeValue(
area.id(), shift + len(unique_exposure), affected)
aggregate_hazard.changeAttributeValue(
area.id(), shift + len(unique_exposure) + 1, total)
for i, field in enumerate(absolute_values.values()):
value = field[0].get_value(
aggregation_id=aggregation_value,
hazard_id=feature_hazard_id
)
aggregate_hazard.changeAttributeValue(
area.id(), shift + len(unique_exposure) + 2 + i, value)
aggregate_hazard.commitChanges()
aggregate_hazard.keywords[] = (
layer_purpose_aggregate_hazard_impacted[])
if qgis_version() >= 21800:
aggregate_hazard.setName(aggregate_hazard.keywords[])
else:
aggregate_hazard.setLayerName(aggregate_hazard.keywords[])
aggregate_hazard.keywords[] = (
layer_purpose_aggregate_hazard_impacted[])
aggregate_hazard.keywords[] = impact.keywords.copy()
check_layer(aggregate_hazard)
return aggregate_hazard
|
Compute the summary from the source layer to the aggregate_hazard layer.
Source layer :
|exp_id|exp_class|haz_id|haz_class|aggr_id|aggr_name|affected|extra*|
Target layer :
| aggr_id | aggr_name | haz_id | haz_class | extra* |
Output layer :
|aggr_id| aggr_name|haz_id|haz_class|affected|extra*|count ber exposure*|
:param impact: The layer to aggregate vector layer.
:type impact: QgsVectorLayer
:param aggregate_hazard: The aggregate_hazard vector layer where to write
statistics.
:type aggregate_hazard: QgsVectorLayer
:return: The new aggregate_hazard layer with summary.
:rtype: QgsVectorLayer
.. versionadded:: 4.0
|
28,117 |
def get_or_create(cls, issue, header, text=None):
for comment in get_comments(issue):
try:
if comment.body.splitlines()[0] == header:
obj = cls(comment, header)
break
except IndexError:
pass
else:
comment = create_comment(issue, header)
obj = cls(comment, header)
if text:
obj.edit(text)
return obj
|
Get or create the dashboard comment in this issue.
|
28,118 |
def summaries(self, sc, limit=None):
clauses = copy(self.clauses)
schema = self.schema
if self.prefix:
schema = [] + schema
clauses[] = lambda x: True
with futures.ThreadPoolExecutor(self.max_concurrency) as executor:
scanned = self._scan(schema, [self.prefix], clauses, executor)
keys = sc.parallelize(scanned).flatMap(self.store.list_keys)
return keys.take(limit) if limit else keys.collect()
|
Summary of the files contained in the current dataset
Every item in the summary is a dict containing a key name and the corresponding size of
the key item in bytes, e.g.::
{'key': 'full/path/to/my/key', 'size': 200}
:param limit: Max number of objects to retrieve
:return: An iterable of summaries
|
28,119 |
def must_open(filename, mode="r", checkexists=False, skipcheck=False, \
oappend=False):
if isinstance(filename, list):
assert "r" in mode
if filename[0].endswith((".gz", ".bz2")):
filename = " ".join(filename)
else:
import fileinput
return fileinput.input(filename)
if filename.startswith("s3://"):
from jcvi.utils.aws import pull_from_s3
filename = pull_from_s3(filename)
if filename in ("-", "stdin"):
assert "r" in mode
fp = sys.stdin
elif filename == "stdout":
assert "w" in mode
fp = sys.stdout
elif filename == "stderr":
assert "w" in mode
fp = sys.stderr
elif filename == "tmp" and mode == "w":
from tempfile import NamedTemporaryFile
fp = NamedTemporaryFile(delete=False)
elif filename.endswith(".gz"):
if in mode:
cmd = "gunzip -c {0}".format(filename)
fp = popen(cmd, debug=False)
elif in mode:
import gzip
fp = gzip.open(filename, mode)
elif filename.endswith(".bz2"):
if in mode:
cmd = "bzcat {0}".format(filename)
fp = popen(cmd, debug=False)
elif in mode:
import bz2
fp = bz2.BZ2File(filename, mode)
else:
if checkexists:
assert mode == "w"
overwrite = (not op.exists(filename)) if skipcheck \
else check_exists(filename, oappend)
if overwrite:
if oappend:
fp = open(filename, "a")
else:
fp = open(filename, "w")
else:
logging.debug("File `{0}` already exists. Skipped."\
.format(filename))
return None
else:
fp = open(filename, mode)
return fp
|
Accepts filename and returns filehandle.
Checks on multiple files, stdin/stdout/stderr, .gz or .bz2 file.
|
28,120 |
def clean_data(freqs, data, chunk, avg_bin):
if avg_bin >= chunk:
raise ValueError(
)
if chunk >= data.duration:
raise ValueError(
)
steps = numpy.arange(0, int(data.duration/chunk)-0.5, 0.5)
seglen = chunk * data.sample_rate
tref = float(data.start_time)
for freq in freqs:
for step in steps:
start, end = int(step*seglen), int((step+1)*seglen)
chunk_line = matching_line(freq, data[start:end],
tref, bin_size=avg_bin)
hann_window = numpy.hanning(len(chunk_line))
apply_hann = TimeSeries(numpy.ones(len(chunk_line)),
delta_t=chunk_line.delta_t,
epoch=chunk_line.start_time)
if step == 0:
apply_hann.data[len(hann_window)/2:] *= \
hann_window[len(hann_window)/2:]
elif step == steps[-1]:
apply_hann.data[:len(hann_window)/2] *= \
hann_window[:len(hann_window)/2]
else:
apply_hann.data *= hann_window
chunk_line.data *= apply_hann.data
data.data[start:end] -= chunk_line.data.real
return data
|
Extract time-varying (wandering) lines from strain data.
Parameters
----------
freqs: list
List containing the frequencies of the wandering lines.
data: pycbc.types.TimeSeries
Strain data to extract the wandering lines from.
chunk: float
Duration of the chunks the data will be divided into to account
for the time variation of the wandering lines. Should be smaller
than data.duration, and allow for at least a few chunks.
avg_bin: float
Duration of the bins each chunk will be divided into for averaging
the inner product when measuring the parameters of the line. Should
be smaller than chunk.
Returns
-------
data: pycbc.types.TimeSeries
The strain data with the wandering lines removed.
|
28,121 |
def export_file(self, record, field, event=None, return_format=):
self._check_file_field(field)
pl = self.__basepl(content=, format=return_format)
content_map = dict(kv)
else:
content_map = {}
return content, content_map
|
Export the contents of a file stored for a particular record
Notes
-----
Unlike other export methods, this works on a single record.
Parameters
----------
record : str
record ID
field : str
field name containing the file to be exported.
event: str
for longitudinal projects, specify the unique event here
return_format: ('json'), 'csv', 'xml'
format of error message
Returns
-------
content : bytes
content of the file
content_map : dict
content-type dictionary
|
28,122 |
def set_length_and_maybe_checksums(self, record, payload_offset=None):
if self._params.digests:
record.compute_checksum(payload_offset)
else:
record.set_content_length()
|
Set the content length and possibly the checksums.
|
28,123 |
def batch_remove_absolute_retrain__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
return __run_batch_abs_metric(measures.batch_remove_retrain, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
|
Batch Remove Absolute (retrain)
xlabel = "Fraction of features removed"
ylabel = "1 - ROC AUC"
transform = "one_minus"
sort_order = 13
|
28,124 |
def get_metadata_from_xml_tree(tree, get_issns_from_nlm=False,
get_abstracts=False, prepend_title=False,
mesh_annotations=False):
results = {}
pm_articles = tree.findall()
for art_ix, pm_article in enumerate(pm_articles):
medline_citation = pm_article.find()
article_info = _get_article_info(medline_citation,
pm_article.find())
journal_info = _get_journal_info(medline_citation, get_issns_from_nlm)
context_info = _get_annotations(medline_citation)
result = {}
result.update(article_info)
result.update(journal_info)
result.update(context_info)
if get_abstracts:
abstract = _abstract_from_article_element(
medline_citation.find(),
prepend_title=prepend_title
)
result[] = abstract
results[article_info[]] = result
return results
|
Get metadata for an XML tree containing PubmedArticle elements.
Documentation on the XML structure can be found at:
- https://www.nlm.nih.gov/bsd/licensee/elements_descriptions.html
- https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html
Parameters
----------
tree : xml.etree.ElementTree
ElementTree containing one or more PubmedArticle elements.
get_issns_from_nlm : boolean
Look up the full list of ISSN number for the journal associated with
the article, which helps to match articles to CrossRef search results.
Defaults to False, since it slows down performance.
get_abstracts : boolean
Indicates whether to include the Pubmed abstract in the results.
prepend_title : boolean
If get_abstracts is True, specifies whether the article title should
be prepended to the abstract text.
mesh_annotations : boolean
If True, extract mesh annotations from the pubmed entries and include
in the returned data. If false, don't.
Returns
-------
dict of dicts
Dictionary indexed by PMID. Each value is a dict containing the
following fields: 'doi', 'title', 'authors', 'journal_title',
'journal_abbrev', 'journal_nlm_id', 'issn_list', 'page'.
|
28,125 |
def pickledump(theobject, fname):
fhandle = open(fname, )
pickle.dump(theobject, fhandle)
|
same as pickle.dump(theobject, fhandle).takes filename as parameter
|
28,126 |
def check_lazy_load_gemeente(f):
def wrapper(*args):
gemeente = args[0]
if (
gemeente._centroid is None or gemeente._bounding_box is None
or gemeente._taal_id is None or gemeente._metadata is None
):
log.debug(, gemeente.id)
gemeente.check_gateway()
g = gemeente.gateway.get_gemeente_by_id(gemeente.id)
gemeente._taal_id = g._taal_id
gemeente._centroid = g._centroid
gemeente._bounding_box = g._bounding_box
gemeente._metadata = g._metadata
return f(*args)
return wrapper
|
Decorator function to lazy load a :class:`Gemeente`.
|
28,127 |
def point(self, t):
return (1 - t)**2*self.start + 2*(1 - t)*t*self.control + t**2*self.end
|
returns the coordinates of the Bezier curve evaluated at t.
|
28,128 |
def put_group_policy(self, group_name, policy_name, policy_json):
params = { : group_name,
: policy_name,
: policy_json}
return self.get_response(, params, verb=)
|
Adds or updates the specified policy document for the specified group.
:type group_name: string
:param group_name: The name of the group the policy is associated with.
:type policy_name: string
:param policy_name: The policy document to get.
:type policy_json: string
:param policy_json: The policy document.
|
28,129 |
def draw_panel(self, data, panel_params, coord, ax, **params):
for _, gdata in data.groupby():
gdata.reset_index(inplace=True, drop=True)
self.draw_group(gdata, panel_params, coord, ax, **params)
|
Plot all groups
For effeciency, geoms that do not need to partition
different groups before plotting should override this
method and avoid the groupby.
Parameters
----------
data : dataframe
Data to be plotted by this geom. This is the
dataframe created in the plot_build pipeline.
panel_params : dict
The scale information as may be required by the
axes. At this point, that information is about
ranges, ticks and labels. Keys of interest to
the geom are::
'x_range' # tuple
'y_range' # tuple
coord : coord
Coordinate (e.g. coord_cartesian) system of the
geom.
ax : axes
Axes on which to plot.
params : dict
Combined parameters for the geom and stat. Also
includes the 'zorder'.
|
28,130 |
def send_Linux_notify(title, content, img_path):
command = [
,
, ,
, ,
,
]
if img_path is not None:
command.extend([, img_path])
subprocess.call(command + [title, content])
|
发送Linux桌面通知
|
28,131 |
def read_json(self):
line = self.stdin.readline()
if line == :
raise EOFError()
return json.loads(line)
|
Read a single line and decode it as JSON.
Can raise an EOFError() when the input source was closed.
|
28,132 |
def _parse_unit(measure_or_unit_abbreviation):
try:
float(measure_or_unit_abbreviation[0])
factor, unit_abbreviation = measure_or_unit_abbreviation.split(, 1)
return unit_abbreviation, float(factor)
except ValueError:
return measure_or_unit_abbreviation, 1.0
|
Helper function that extracts constant factors from unit specifications.
This allows to specify units similar to this: 10^6 m^3.
Return a couple (unit, factor)
|
28,133 |
def _get_argspec(f):
if sys.version_info[0] < 3:
argspec = inspect.getargspec(f)
else:
argspec = inspect.getfullargspec(f)
return argspec
|
Get argspec of a function. Supports both Python 2 and Python 3.
|
28,134 |
def save_ewif_file(self, path: str, password: str) -> None:
version = 1
salt = libnacl.crypto_hash_sha256(
libnacl.crypto_hash_sha256(
Base58Encoder.decode(self.pubkey)))[0:4]
password_bytes = password.encode("utf-8")
scrypt_seed = scrypt(password_bytes, salt, 16384, 8, 8, 64)
derivedhalf1 = scrypt_seed[0:32]
derivedhalf2 = scrypt_seed[32:64]
seed1_xor_derivedhalf1_1 = bytes(xor_bytes(self.seed[0:16], derivedhalf1[0:16]))
seed2_xor_derivedhalf1_2 = bytes(xor_bytes(self.seed[16:32], derivedhalf1[16:32]))
aes = pyaes.AESModeOfOperationECB(derivedhalf2)
encryptedhalf1 = aes.encrypt(seed1_xor_derivedhalf1_1)
encryptedhalf2 = aes.encrypt(seed2_xor_derivedhalf1_2)
seed_bytes = b + salt + encryptedhalf1 + encryptedhalf2
sha256_v1 = libnacl.crypto_hash_sha256(seed_bytes)
sha256_v2 = libnacl.crypto_hash_sha256(sha256_v1)
checksum = sha256_v2[0:2]
ewif_key = Base58Encoder.encode(seed_bytes + checksum)
with open(path, ) as fh:
fh.write(
.format(version=version, data=ewif_key)
)
|
Save an Encrypted Wallet Import Format file (WIF v2)
:param path: Path to file
:param password:
|
28,135 |
def _track(self, class_name):
if self._test_cases.get(class_name) is None:
if self.streaming and self.header:
self._write_test_case_header(class_name, self.stream)
self._test_cases[class_name] = []
if self.combined:
self.combined_test_cases_seen.append(class_name)
|
Keep track of which test cases have executed.
|
28,136 |
def ipv6_ipv6route_route_dest(self, **kwargs):
config = ET.Element("config")
ipv6 = ET.SubElement(config, "ipv6", xmlns="urn:brocade.com:mgmt:brocade-common-def")
ipv6route = ET.SubElement(ipv6, "ipv6route", xmlns="urn:brocade.com:mgmt:brocade-ip-forward")
route = ET.SubElement(ipv6route, "route")
dest = ET.SubElement(route, "dest")
dest.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
28,137 |
def op_paths(self, path_base=None):
if path_base:
path_base += self.path_prefix
else:
path_base = self.path_prefix or UrlPath()
for container in self.containers:
for op_path in container.op_paths(path_base):
yield op_path
|
Return all operations stored in containers.
|
28,138 |
def generate_filename(self, mark, **kwargs):
kwargs = kwargs.copy()
kwargs[] = int(kwargs[] * 100)
kwargs[] = kwargs[].st_mtime
kwargs[] = kwargs[].st_size
params = [
,
,
,
,
,
,
,
,
,
]
scale = kwargs.get(, None)
if scale and scale != mark.size:
params.append( % (float(kwargs[][0]) / mark.size[0] * 100))
if kwargs.get(, None):
params.append()
filename = % (.join(params), kwargs[])
return filename % kwargs
|
Comes up with a good filename for the watermarked image
|
28,139 |
def parse_eep(self, rorg_func=None, rorg_type=None, direction=None, command=None):
if rorg_func is not None and rorg_type is not None:
self.select_eep(rorg_func, rorg_type, direction, command)
provides, values = self.eep.get_values(self._profile, self._bit_data, self._bit_status)
self.parsed.update(values)
return list(provides)
|
Parse EEP based on FUNC and TYPE
|
28,140 |
def threaded(system, func, *args, **kwargs):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if system.raven_client:
system.raven_client.captureException()
logger.exception(, e)
return False
return lambda: wrapper(*args, **kwargs)
|
uses thread_init as a decorator-style
|
28,141 |
def retry_shipper_tasks(self, project_name, logstore_name, shipper_name, task_list):
headers = {}
params = {}
body = six.b(json.dumps(task_list))
headers[] =
headers[] = str(len(body))
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name + "/tasks"
(resp, header) = self._send("PUT", project_name, body, resource, params, headers)
return RetryShipperTasksResponse(header, resp)
|
retry failed tasks , only the failed task can be retried
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type task_list: string array
:param task_list: the failed task_id list, e.g ['failed_task_id_1', 'failed_task_id_2',...], currently the max retry task count 10 every time
:return: RetryShipperTasksResponse
:raise: LogException
|
28,142 |
def parse_date_created(dct):
date = dct[]
if date:
return (int(date[]), int(date[]), int(date[]))
else:
return (None, None, None)
|
Helper function to parse date-created from profile.
|
28,143 |
def pivoted_cholesky(matrix, max_rank, diag_rtol=1e-3, name=None):
with tf.compat.v2.name_scope(name or ):
dtype = dtype_util.common_dtype([matrix, diag_rtol],
preferred_dtype=tf.float32)
matrix = tf.convert_to_tensor(value=matrix, name=, dtype=dtype)
if tensorshape_util.rank(matrix.shape) is None:
raise NotImplementedError()
max_rank = tf.convert_to_tensor(
value=max_rank, name=, dtype=tf.int64)
max_rank = tf.minimum(max_rank,
prefer_static.shape(matrix, out_type=tf.int64)[-1])
diag_rtol = tf.convert_to_tensor(
value=diag_rtol, dtype=dtype, name=)
matrix_diag = tf.linalg.diag_part(matrix)
permuted_diag = batch_gather(matrix_diag, perm[..., m:])
maxi = tf.argmax(
input=permuted_diag, axis=-1, output_type=tf.int64)[..., tf.newaxis]
maxval = batch_gather(permuted_diag, maxi)
maxi = maxi + m
maxval = maxval[..., 0]
perm = _swap_m_with_i(perm, m, maxi)
row = batch_gather(matrix, perm[..., m:m + 1], axis=-2)
row = batch_gather(row, perm[..., m + 1:])
prev_rows = pchol[..., :m, :]
prev_rows_perm_m_onward = batch_gather(prev_rows, perm[..., m + 1:])
prev_rows_pivot_col = batch_gather(prev_rows, perm[..., m:m + 1])
row -= tf.reduce_sum(
input_tensor=prev_rows_perm_m_onward * prev_rows_pivot_col,
axis=-2)[..., tf.newaxis, :]
pivot = tf.sqrt(maxval)[..., tf.newaxis, tf.newaxis]
row = tf.concat([pivot, row / pivot], axis=-1)
paddings = tf.concat([
tf.zeros([prefer_static.rank(pchol) - 1, 2], dtype=tf.int32),
[[tf.cast(m, tf.int32), 0]]], axis=0)
diag_update = tf.pad(tensor=row**2, paddings=paddings)[..., 0, :]
reverse_perm = _invert_permutation(perm)
matrix_diag -= batch_gather(diag_update, reverse_perm)
row = tf.pad(tensor=row, paddings=paddings)
row = batch_gather(row, reverse_perm)
pchol_shape = pchol.shape
pchol = tf.concat([pchol[..., :m, :], row, pchol[..., m + 1:, :]],
axis=-2)
tensorshape_util.set_shape(pchol, pchol_shape)
return m + 1, pchol, perm, matrix_diag
m = np.int64(0)
pchol = tf.zeros_like(matrix[..., :max_rank, :])
matrix_shape = prefer_static.shape(matrix, out_type=tf.int64)
perm = tf.broadcast_to(
prefer_static.range(matrix_shape[-1]), matrix_shape[:-1])
_, pchol, _, _ = tf.while_loop(
cond=cond, body=body, loop_vars=(m, pchol, perm, matrix_diag))
pchol = tf.linalg.matrix_transpose(pchol)
tensorshape_util.set_shape(
pchol, tensorshape_util.concatenate(matrix_diag.shape, [None]))
return pchol
|
Computes the (partial) pivoted cholesky decomposition of `matrix`.
The pivoted Cholesky is a low rank approximation of the Cholesky decomposition
of `matrix`, i.e. as described in [(Harbrecht et al., 2012)][1]. The
currently-worst-approximated diagonal element is selected as the pivot at each
iteration. This yields from a `[B1...Bn, N, N]` shaped `matrix` a `[B1...Bn,
N, K]` shaped rank-`K` approximation `lr` such that `lr @ lr.T ~= matrix`.
Note that, unlike the Cholesky decomposition, `lr` is not triangular even in
a rectangular-matrix sense. However, under a permutation it could be made
triangular (it has one more zero in each column as you move to the right).
Such a matrix can be useful as a preconditioner for conjugate gradient
optimization, i.e. as in [(Wang et al. 2019)][2], as matmuls and solves can be
cheaply done via the Woodbury matrix identity, as implemented by
`tf.linalg.LinearOperatorLowRankUpdate`.
Args:
matrix: Floating point `Tensor` batch of symmetric, positive definite
matrices.
max_rank: Scalar `int` `Tensor`, the rank at which to truncate the
approximation.
diag_rtol: Scalar floating point `Tensor` (same dtype as `matrix`). If the
errors of all diagonal elements of `lr @ lr.T` are each lower than
`element * diag_rtol`, iteration is permitted to terminate early.
name: Optional name for the op.
Returns:
lr: Low rank pivoted Cholesky approximation of `matrix`.
#### References
[1]: H Harbrecht, M Peters, R Schneider. On the low-rank approximation by the
pivoted Cholesky decomposition. _Applied numerical mathematics_,
62(4):428-440, 2012.
[2]: K. A. Wang et al. Exact Gaussian Processes on a Million Data Points.
_arXiv preprint arXiv:1903.08114_, 2019. https://arxiv.org/abs/1903.08114
|
28,144 |
def get_or_create_user(self, provider, access, info):
"Create a shell auth.User."
digest = hashlib.sha1(smart_bytes(access)).digest()
username = force_text(base64.urlsafe_b64encode(digest)).replace(, )
User = get_user_model()
kwargs = {
User.USERNAME_FIELD: username,
: ,
: None
}
return User.objects.create_user(**kwargs)
|
Create a shell auth.User.
|
28,145 |
def ones(shape, ctx=None, dtype=None, **kwargs):
if ctx is None:
ctx = current_context()
dtype = mx_real_t if dtype is None else dtype
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
|
Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.context.current_context()``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
|
28,146 |
def users_me_merge(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users
api_path = "/api/v2/users/me/merge.json"
return self.call(api_path, method="PUT", data=data, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/users#merge-self-with-another-user
|
28,147 |
def multi_trace_plot(traces, corr=True, stack=, size=(7, 12),
**kwargs):
import matplotlib.pyplot as plt
from eqcorrscan.core.match_filter import normxcorr2
n_axes = len(traces)
if stack in [, ]:
n_axes += 1
fig, axes = plt.subplots(n_axes, 1, sharex=True, figsize=size)
if len(traces) > 1:
axes = axes.ravel()
traces = [(trace, trace.stats.starttime.datetime) for trace in traces]
traces.sort(key=lambda tup: tup[1])
traces = [trace[0] for trace in traces]
for i, tr in enumerate(traces):
y = tr.data
x = np.arange(len(y))
x = x / tr.stats.sampling_rate
if not stack:
ind = i
else:
ind = i + 1
axes[ind].plot(x, y, , linewidth=1.1)
axes[ind].yaxis.set_ticks([])
traces = [Stream(trace) for trace in traces]
if stack == :
stacked = PWS_stack(traces)
elif stack == :
stacked = linstack(traces)
if stack in [, ]:
tr = stacked[0]
y = tr.data
x = np.arange(len(y))
x = x / tr.stats.sampling_rate
axes[0].plot(x, y, , linewidth=2.0)
axes[0].set_ylabel(, rotation=0)
axes[0].yaxis.set_ticks([])
for i, slave in enumerate(traces):
if corr:
cc = normxcorr2(tr.data, slave[0].data)
if not stack:
ind = i
else:
ind = i + 1
if corr:
axes[ind].set_ylabel( + str(round(np.max(cc), 2)), rotation=0)
axes[ind].text(0.9, 0.15, str(round(np.max(slave[0].data))),
bbox=dict(facecolor=, alpha=0.95),
transform=axes[ind].transAxes)
axes[ind].text(0.7, 0.85, slave[0].stats.starttime.datetime.
strftime(),
bbox=dict(facecolor=, alpha=0.95),
transform=axes[ind].transAxes)
axes[-1].set_xlabel()
fig = _finalise_figure(fig=fig, **kwargs)
return fig
|
Plot multiple traces (usually from the same station) on the same plot.
Differs somewhat to obspy's stream.plot in that only relative time within
traces is worried about, it will not merge traces together.
:type traces: list
:param traces: List of obspy.core.Trace
:type corr: bool
:param corr:
To calculate the correlation or not, if True, will add this to the
axes
:type stack: str
:param stack:
To plot the stack as the first trace or not, select type of
stack: 'linstack' or 'PWS', or None.
:type size: tuple
:param size: Size of figure.
|
28,148 |
def _check_uninferable_call(self, node):
if not isinstance(node.func, astroid.Attribute):
return
expr = node.func.expr
klass = safe_infer(expr)
if (
klass is None
or klass is astroid.Uninferable
or not isinstance(klass, astroid.Instance)
):
return
try:
attrs = klass._proxied.getattr(node.func.attrname)
except exceptions.NotFoundError:
return
for attr in attrs:
if attr is astroid.Uninferable:
continue
if not isinstance(attr, astroid.FunctionDef):
continue
if decorated_with_property(attr):
try:
all_returns_are_callable = all(
return_node.callable() or return_node is astroid.Uninferable
for return_node in attr.infer_call_result(node)
)
except astroid.InferenceError:
continue
if not all_returns_are_callable:
self.add_message(
"not-callable", node=node, args=node.func.as_string()
)
break
|
Check that the given uninferable Call node does not
call an actual function.
|
28,149 |
def for_print(self):
s = "\033[34m" + self.get_object_info() + "\033[0m"
s += "\n"
s += self.as_string()
return s
|
for_print
|
28,150 |
def values(self):
"Returns a list of ConfigMap values."
return (list(self._pb.IntMap.values()) + list(self._pb.StringMap.values()) +
list(self._pb.FloatMap.values()) + list(self._pb.BoolMap.values()))
|
Returns a list of ConfigMap values.
|
28,151 |
def volume_disk_temp_avg(self, volume):
volume = self._get_volume(volume)
if volume is not None:
vol_disks = volume["disks"]
if vol_disks is not None:
total_temp = 0
total_disks = 0
for vol_disk in vol_disks:
disk_temp = self.disk_temp(vol_disk)
if disk_temp is not None:
total_disks += 1
total_temp += disk_temp
if total_temp > 0 and total_disks > 0:
return round(total_temp / total_disks, 0)
|
Average temperature of all disks making up the volume
|
28,152 |
def get_soft_bounds(self):
if self.bounds is None:
hl,hu=(None,None)
else:
hl,hu=self.bounds
if self._softbounds is None:
sl,su=(None,None)
else:
sl,su=self._softbounds
if sl is None: l = hl
else: l = sl
if su is None: u = hu
else: u = su
return (l,u)
|
For each soft bound (upper and lower), if there is a defined bound (not equal to None)
then it is returned, otherwise it defaults to the hard bound. The hard bound could still be None.
|
28,153 |
def stop_experiment(args):
experiment_id_list = parse_ids(args)
if experiment_id_list:
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
for experiment_id in experiment_id_list:
print_normal( % experiment_id)
nni_config = Config(experiment_dict[experiment_id][])
rest_port = nni_config.get_config()
rest_pid = nni_config.get_config()
if rest_pid:
kill_command(rest_pid)
tensorboard_pid_list = nni_config.get_config()
if tensorboard_pid_list:
for tensorboard_pid in tensorboard_pid_list:
try:
kill_command(tensorboard_pid)
except Exception as exception:
print_error(exception)
nni_config.set_config(, [])
print_normal()
experiment_config.update_experiment(experiment_id, , )
time_now = time.strftime(,time.localtime(time.time()))
experiment_config.update_experiment(experiment_id, , str(time_now))
|
Stop the experiment which is running
|
28,154 |
def load_fd(self, key, noexpire=False):
s responsibility
to close the file descriptor.%s has expired%s found in cacherb%s not found in cache', key)
raise KeyError(key)
|
Look up an item in the cache and return an open file
descriptor for the object. It is the caller's responsibility
to close the file descriptor.
|
28,155 |
def batchDF(symbols, fields=None, range_=, last=10, token=, version=):
x = batch(symbols, fields, range_, last, token, version)
ret = {}
if isinstance(symbols, str):
for field in x.keys():
ret[field] = _MAPPING[field](x[field])
else:
for symbol in x.keys():
for field in x[symbol].keys():
if field not in ret:
ret[field] = pd.DataFrame()
dat = x[symbol][field]
dat = _MAPPING[field](dat)
dat[] = symbol
ret[field] = pd.concat([ret[field], dat], sort=True)
return ret
|
Batch several data requests into one invocation
https://iexcloud.io/docs/api/#batch-requests
Args:
symbols (list); List of tickers to request
fields (list); List of fields to request
range_ (string); Date range for chart
last (int);
token (string); Access token
version (string); API version
Returns:
DataFrame: results in json
|
28,156 |
def get_extra_info(self, name, default=None):
if name == :
return self.__inner_protocol.transport.get_extra_info()
elif name == :
return self.__relayed_address
return default
|
Return optional transport information.
- `'related_address'`: the related address
- `'sockname'`: the relayed address
|
28,157 |
def get_broker_list(cluster_config):
with ZK(cluster_config) as zk:
brokers = sorted(list(zk.get_brokers().items()), key=itemgetter(0))
return [(id, data[]) for id, data in brokers]
|
Returns a list of brokers in the form [(id: host)]
:param cluster_config: the configuration of the cluster
:type cluster_config: map
|
28,158 |
def find_module(self, module_name, path=None):
module_path = os.path.join(*module_name.split(MODULE_PATH_SEP))
for search_root in self.paths:
target_path = os.path.join(search_root, module_path)
is_pkg = False
if os.path.isdir(target_path):
target_file = os.path.join(target_path, )
is_pkg = True
else:
target_file = .format(target_path)
if os.path.exists(target_file):
return ModuleLoader(
target_path, module_name, target_file, is_pkg)
return None
|
Searches the paths for the required module.
:param module_name: the full name of the module to find
:param path: set to None when the module in being searched for is a
top-level module - otherwise this is set to
package.__path__ for submodules and subpackages (unused)
|
28,159 |
def mchirp_compression(m1, m2, fmin, fmax, min_seglen=0.02, df_multiple=None):
sample_points = []
f = fmin
while f < fmax:
if df_multiple is not None:
f = int(f/df_multiple)*df_multiple
sample_points.append(f)
f += 1.0 / rough_time_estimate(m1, m2, f, fudge_min=min_seglen)
if sample_points[-1] < fmax:
sample_points.append(fmax)
return numpy.array(sample_points)
|
Return the frequencies needed to compress a waveform with the given
chirp mass. This is based on the estimate in rough_time_estimate.
Parameters
----------
m1: float
mass of first component object in solar masses
m2: float
mass of second component object in solar masses
fmin : float
The starting frequency of the compressed waveform.
fmax : float
The ending frequency of the compressed waveform.
min_seglen : float
The inverse of this gives the maximum frequency step that is used.
df_multiple : {None, float}
Make the compressed sampling frequencies a multiple of the given value.
If None provided, the returned sample points can have any floating
point value.
Returns
-------
array
The frequencies at which to evaluate the compressed waveform.
|
28,160 |
def send(self, channel, payload):
with track( + channel):
with track():
Event.objects.create(
group=self,
channel=channel,
value=payload)
ChannelGroup(str(self.pk)).send(
{: json.dumps({
: channel,
: payload
})})
|
Send a message with the given payload on the given channel.
Messages are broadcast to all players in the group.
|
28,161 |
def proto_0201(theABF):
abf=ABF(theABF)
abf.log.info("analyzing as a membrane test")
plot=ABFplot(abf)
plot.figure_height,plot.figure_width=SQUARESIZE/2,SQUARESIZE/2
plot.figure_sweeps()
plt.tight_layout()
frameAndSave(abf,"membrane test")
plt.close()
|
protocol: membrane test.
|
28,162 |
def _states(self):
self.current_table = 0
self._last_selected_cell = 0, 0, 0
self._view_frozen = False
self.timer_running = False
|
Sets grid states
|
28,163 |
def calculate_lux(r, g, b):
illuminance = (-0.32466 * r) + (1.57837 * g) + (-0.73191 * b)
return int(illuminance)
|
Converts the raw R/G/B values to luminosity in lux.
|
28,164 |
def print_banner(filename: str, template: str = DEFAULT_BANNER_TEMPLATE) -> None:
if not os.path.isfile(filename):
logger.warning("Can't find logo banner at %s", filename)
return
with open(filename, "r") as f:
banner = f.read()
formatted_banner = template.format(banner)
print(formatted_banner)
|
Print text file to output.
:param filename: Which file to print.
:param template: Format string which specified banner arrangement.
:return: Does not return anything
|
28,165 |
def split_url(url):
if url is None:
return (None, None)
array = url.split("
if len(array) == 1:
array.append(None)
return tuple(array[0:2])
|
Split the given URL ``base#anchor`` into ``(base, anchor)``,
or ``(base, None)`` if no anchor is present.
In case there are two or more ``#`` characters,
return only the first two tokens: ``a#b#c => (a, b)``.
:param string url: the url
:rtype: list of str
|
28,166 |
def casefold_with_i_dots(text):
text = unicodedata.normalize(, text).replace(, ).replace(, )
return text.casefold()
|
Convert capital I's and capital dotted İ's to lowercase in the way
that's appropriate for Turkish and related languages, then case-fold
the rest of the letters.
|
28,167 |
def _parse_options(opts, delim):
options = {}
for opt in opts.split(delim):
key, val = opt.split("=")
if key.lower() == :
options.setdefault(, []).append(val)
else:
if str(key) in options:
warnings.warn("Duplicate URI option %s" % (str(key),))
options[str(key)] = unquote_plus(val)
if "wtimeout" in options:
if "wtimeoutMS" in options:
options.pop("wtimeout")
warnings.warn("Option wtimeout is deprecated, use "
" instead")
return options
|
Helper method for split_options which creates the options dict.
Also handles the creation of a list for the URI tag_sets/
readpreferencetags portion.
|
28,168 |
def qtePrepareToRun(self):
msgObj = QtmacsMessage((self.qteMacroName(), self.qteWidget), None)
msgObj.setSignalName()
self.qteMain.qtesigMacroStart.emit(msgObj)
try:
self.qteRun()
self.qteMain.qtesigMacroFinished.emit(msgObj)
except Exception as err:
if self.qteApplet is None:
appID = appSig = None
else:
appID = self.qteApplet.qteAppletID()
appSig = self.qteApplet.qteAppletSignature()
msg = (
)
msg = msg.format(self.qteMacroName(), appSig, appID)
if isinstance(err, QtmacsArgumentError):
msg += + str(err)
self.qteMain.qteEnableMacroProcessing()
self.qteMain.qtesigMacroError.emit(msgObj)
self.qteLogger.exception(msg, exc_info=True, stack_info=True)
|
This method is called by Qtmacs to prepare the macro for
execution.
It is probably a bad idea to overload this method as it only
administrates the macro execution and calls the ``qteRun``
method (which *should* be overloaded by the macro programmer
in order for the macro to do something).
|Args|
* **None**
|Returns|
* **None**
|Raises|
* **None**
|
28,169 |
def chmod_r(root: str, permission: int) -> None:
os.chmod(root, permission)
for dirpath, dirnames, filenames in os.walk(root):
for d in dirnames:
os.chmod(os.path.join(dirpath, d), permission)
for f in filenames:
os.chmod(os.path.join(dirpath, f), permission)
|
Recursive ``chmod``.
Args:
root: directory to walk down
permission: e.g. ``e.g. stat.S_IWUSR``
|
28,170 |
def version(core_name=None):
successdataerrorswarnings*
ret = _get_return_dict()
if _get_none_or_value(core_name) is None and _check_for_cores():
success = True
for name in __opts__[]:
resp = _get_admin_info(, core_name=name)
if resp[]:
lucene = resp[][]
data = {name: {: lucene[]}}
else:
success = False
data = {name: {: None}}
ret = _update_return_dict(ret, success, data,
resp[], resp[])
return ret
else:
resp = _get_admin_info(, core_name=core_name)
if resp[]:
version_num = resp[][][]
return _get_return_dict(True, {: version_num},
resp[], resp[])
else:
return resp
|
Gets the solr version for the core specified. You should specify a core
here as all the cores will run under the same servlet container and so will
all have the same version.
core_name : str (None)
The name of the solr core if using cores. Leave this blank if you are
not using cores or if you want to check all cores.
Return : dict<str,obj>::
{'success':boolean, 'data':dict, 'errors':list, 'warnings':list}
CLI Example:
.. code-block:: bash
salt '*' solr.version
|
28,171 |
def setAllColor(self, color):
header = bytearray()
header.append(LightProtocolCommand.SetAllColor)
light = bytearray()
light.extend(color)
buff = header + light
return self.send(buff)
|
Command: 0x06
sets all colors in the array
Data:
[Command][r][g][b]
|
28,172 |
def parse_input(s):
if isinstance(s, six.integer_types):
s = str(s)
elif not isinstance(s, six.string_types):
raise ValueError(s)
original = s
if s[-1:] == :
s = s[:-1]
sign = {: -1, : 0, : 1}.get(s[0], None)
if sign is not None:
s = s[1:]
ts = 0
for unit in _SORTED_UNITS:
pos = s.find(unit[0])
if pos == 0:
raise ValueError(original)
elif pos > 0:
if sign is None:
sign = 1
ts += int(s[:pos]) * __timedelta_millis(unit[1])
s = s[min(len(s), pos + 1):]
if s:
ts += int(s)
return date_from_utc_ts(ts) if not sign else \
utc() + sign * delta(milliseconds=ts)
|
Parse the given input and intelligently transform it into an absolute,
non-naive, timezone-aware datetime object for the UTC timezone.
The input can be specified as a millisecond-precision UTC timestamp (or
delta against Epoch), with or without a terminating 'L'. Alternatively, the
input can be specified as a human-readable delta string with unit-separated
segments, like '24d6h4m500' (24 days, 6 hours, 4 minutes and 500ms), as
long as the segments are in descending unit span order.
|
28,173 |
def add_command_option(command, name, doc, is_bool=False):
dist = get_dummy_distribution()
cmdcls = dist.get_command_class(command)
if (hasattr(cmdcls, ) and
name in cmdcls._astropy_helpers_options):
return
attr = name.replace(, )
if hasattr(cmdcls, attr):
raise RuntimeError(
.format(cmdcls, attr, name))
for idx, cmd in enumerate(cmdcls.user_options):
if cmd[0] == name:
log.warn(
.format(command, name))
del cmdcls.user_options[idx]
if name in cmdcls.boolean_options:
cmdcls.boolean_options.remove(name)
break
cmdcls.user_options.append((name, None, doc))
if is_bool:
cmdcls.boolean_options.append(name)
cmdcls._astropy_helpers_options = set([name])
else:
cmdcls._astropy_helpers_options.add(name)
|
Add a custom option to a setup command.
Issues a warning if the option already exists on that command.
Parameters
----------
command : str
The name of the command as given on the command line
name : str
The name of the build option
doc : str
A short description of the option, for the `--help` message
is_bool : bool, optional
When `True`, the option is a boolean option and doesn't
require an associated value.
|
28,174 |
def effect_ratio(self, mechanism, purview):
return self._ratio(Direction.EFFECT, mechanism, purview)
|
The effect ratio of the ``purview`` given ``mechanism``.
|
28,175 |
def strip_harakat(text):
if not text:
return text
elif is_vocalized(text):
for char in HARAKAT:
text = text.replace(char, )
return text
|
Strip Harakat from arabic word except Shadda.
The striped marks are :
- FATHA, DAMMA, KASRA
- SUKUN
- FATHATAN, DAMMATAN, KASRATAN,
@param text: arabic text.
@type text: unicode.
@return: return a striped text.
@rtype: unicode.
|
28,176 |
def _map_player_request_to_func(self, player_request_type):
view_func = self._intent_view_funcs.get(player_request_type, lambda: None)
argspec = inspect.getargspec(view_func)
arg_names = argspec.args
arg_values = self._map_params_to_view_args(player_request_type, arg_names)
return partial(view_func, *arg_values)
|
Provides appropriate parameters to the on_playback functions.
|
28,177 |
def set_handler(self, handler):
if self._handler:
raise Exception()
if handler:
self._handler = async_task(handler, loop=self._loop)
|
Connect with a coroutine, which is scheduled when connection is made.
This function will create a task, and when connection is closed,
the task will be canceled.
:param handler:
:return: None
|
28,178 |
def _get_log_lines(self, n=300):
with open(self.log_file) as fh:
last_lines = fh.readlines()[-n:]
return last_lines
|
Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
|
28,179 |
def json(self):
nodes = []
links = []
for link in self.link_set.all():
if self.is_layer2:
source = link.interface_a.mac
destination = link.interface_b.mac
else:
source = str(link.interface_a.ip_set.first().address)
destination = str(link.interface_b.ip_set.first().address)
nodes.append({
: source
})
nodes.append({
: destination
})
links.append(OrderedDict((
(, source),
(, destination),
(, link.metric_value)
)))
return OrderedDict((
(, ),
(, self.parser.protocol),
(, self.parser.version),
(, self.parser.metric),
(, nodes),
(, links)
))
|
returns a dict that represents a NetJSON NetworkGraph object
|
28,180 |
def post_processor_affected_function(
exposure=None, hazard=None, classification=None, hazard_class=None):
if exposure == exposure_population[]:
affected = is_affected(
hazard, classification, hazard_class)
else:
classes = None
for hazard in hazard_classes_all:
if hazard[] == classification:
classes = hazard[]
break
for the_class in classes:
if the_class[] == hazard_class:
affected = the_class[]
break
else:
affected = not_exposed_class[]
return affected
|
Private function used in the affected postprocessor.
It returns a boolean if it's affected or not, or not exposed.
:param exposure: The exposure to use.
:type exposure: str
:param hazard: The hazard to use.
:type hazard: str
:param classification: The hazard classification to use.
:type classification: str
:param hazard_class: The hazard class of the feature.
:type hazard_class: str
:return: If this hazard class is affected or not. It can be `not exposed`.
The not exposed value returned is the key defined in
`hazard_classification.py` at the top of the file.
:rtype: bool,'not exposed'
|
28,181 |
def format_title(self):
def asciify(_title):
_title = unicodedata.normalize(, unicode(_title))
ascii = True
out = []
ok = u"1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM- \r\n\t-,-]+", , "".join(out)) )
(ascii, _title) = asciify(self.meta.title)
if not ascii and self.meta.alternative_title:
(ascii, _title2) = asciify(self.meta.alternative_title)
if ascii:
_title = _title2
title_length = 99 - len(str(self.book_id)) - 1
if len(_title) > title_length:
repo_title = "{0}__{1}".format(_title[:title_length], self.book_id)
else:
repo_title = "{0}_{1}".format(_title[:title_length], self.book_id)
logger.debug("%s %s" % (len(repo_title), repo_title))
self.meta.metadata[] = repo_title
return repo_title
|
Takes a string and sanitizes it for Github's url name format
|
28,182 |
def signalbus(self):
try:
signalbus = self.__signalbus
except AttributeError:
signalbus = self.__signalbus = SignalBus(self, init_app=False)
return signalbus
|
The associated `SignalBus` object.
|
28,183 |
def get_dependencies_from_index(dep, sources=None, pip_options=None, wheel_cache=None):
finder = get_finder(sources=sources, pip_options=pip_options)
if not wheel_cache:
wheel_cache = WHEEL_CACHE
dep.is_direct = True
reqset = pip_shims.shims.RequirementSet()
reqset.add_requirement(dep)
requirements = None
setup_requires = {}
with temp_environ(), start_resolver(finder=finder, wheel_cache=wheel_cache) as resolver:
os.environ[] =
dist = None
if dep.editable and not dep.prepared and not dep.req:
with cd(dep.setup_py_dir):
from setuptools.dist import distutils
try:
dist = distutils.core.run_setup(dep.setup_py)
except (ImportError, TypeError, AttributeError):
dist = None
else:
setup_requires[dist.get_name()] = dist.setup_requires
if not dist:
try:
dist = dep.get_dist()
except (TypeError, ValueError, AttributeError):
pass
else:
setup_requires[dist.get_name()] = dist.setup_requires
resolver.require_hashes = False
try:
results = resolver._resolve_one(reqset, dep)
except Exception:
results = []
finally:
try:
wheel_cache.cleanup()
except AttributeError:
pass
resolver_requires_python = getattr(resolver, "requires_python", None)
requires_python = getattr(reqset, "requires_python", resolver_requires_python)
if requires_python:
add_marker = fix_requires_python_marker(requires_python)
reqset.remove(dep)
if dep.req.marker:
dep.req.marker._markers.extend([,].extend(add_marker._markers))
else:
dep.req.marker = add_marker
reqset.add(dep)
requirements = set()
for r in results:
if requires_python:
if r.req.marker:
r.req.marker._markers.extend([,].extend(add_marker._markers))
else:
r.req.marker = add_marker
requirements.add(format_requirement(r))
for section in setup_requires:
python_version = section
not_python = not is_python(section)
for value in setup_requires[section]:
if is_python(section):
python_version = value[1:-1]
else:
not_python = True
if not in value and not_python:
try:
requirement_str = "{0}{1}".format(value, python_version).replace(":", ";")
requirements.add(format_requirement(make_install_requirement(requirement_str).ireq))
except Exception:
pass
if not dep.editable and is_pinned_requirement(dep) and requirements is not None:
DEPENDENCY_CACHE[dep] = list(requirements)
return requirements
|
Retrieves dependencies for the given install requirement from the pip resolver.
:param dep: A single InstallRequirement
:type dep: :class:`~pip._internal.req.req_install.InstallRequirement`
:param sources: Pipfile-formatted sources, defaults to None
:type sources: list[dict], optional
:return: A set of dependency lines for generating new InstallRequirements.
:rtype: set(str) or None
|
28,184 |
def copy(self):
pen = Pen()
pen.__dict__ = self.__dict__.copy()
return pen
|
Create a copy of this pen.
|
28,185 |
def _get_html(self, url):
self.log.info(u"/GET {}".format(url))
r = requests.get(url)
if hasattr(r, ):
if r.from_cache:
self.log.info("(from cache)")
if r.status_code != 200:
throw_request_err(r)
return r.content
|
Get html from url
|
28,186 |
def clean_password(self):
value = self.cleaned_data.get()
if value not in self.valid_passwords:
raise forms.ValidationError()
return value
|
Check that the password is valid.
|
28,187 |
def portfolio_from_orders(orders, funds=1e5, price_type=):
portfolio = orders.copy()
prices = price_dataframe(orders.columns, start=orders.index[0], end=orders.index[-1],
price_type=price_type, cleaner=clean_dataframe)
portfolio["$CASH"] = funds - (orders * prices).sum(axis=1).cumsum()
portfolio["total_value"] = portfolio["$CASH"] + (orders.cumsum() * prices).sum(axis=1)
return portfolio
|
Create a DataFrame of portfolio holdings (#'s' of shares for the symbols and dates)
Appends the "$CASH" symbol to the porfolio and initializes it to `funds` indicated.
Appends the symbol "total_value" to store the total value of cash + stocks at each timestamp.
The symbol holdings are found by multipling each element of the orders matrix by the
price matrix for those symbols and then computing a cumulative sum of those purchases.
portfolio["$CASH"] = funds - (orders * prices).sum(axis=1).cumsum()
portfolio["total_value"] = portfolio["$CASH"] + (orders.cumsum() * prices).sum(axis=1)
|
28,188 |
def get_reversed_aliases(self):
return dict((v, k) for k, v in six.iteritems(self.aliases))
|
Return the reversed aliases dict. Instead of being in the form
{'alias': mapping}, the dict is in the form {mapping: 'alias'}.
|
28,189 |
def load_formatter_fn(formatter):
logagg.formatters.basescript
obj = util.load_object(formatter)
if not hasattr(obj, ):
obj.ispartial = util.ispartial
return obj
|
>>> load_formatter_fn('logagg.formatters.basescript') #doctest: +ELLIPSIS
<function basescript at 0x...>
|
28,190 |
def contains_duplicates(values: Iterable[Any]) -> bool:
for v in Counter(values).values():
if v > 1:
return True
return False
|
Does the iterable contain any duplicate values?
|
28,191 |
def close(self):
if not self._response.raw.closed:
sock_fp = self._response.raw._fp.fp
if hasattr(sock_fp, ):
sock_raw = sock_fp.raw
if hasattr(sock_raw, ):
sock = sock_raw.sock
elif hasattr(sock_raw, ):
sock = sock_raw._sock
elif hasattr(sock_fp, ):
raise DockerException(
)
else:
sock = sock_fp._sock
if hasattr(urllib3.contrib, ) and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()
|
Closes the event streaming.
|
28,192 |
def login(self, username: str, password: str, course: int) -> requests.Response:
try:
payload = {
: username,
: password,
: course
}
return self.__session.post(
self.__url + , data=payload, timeout=0.5, verify=False)
except requests.exceptions.Timeout:
return None
|
登入課程
|
28,193 |
def run(self, row, **kwargs):
self.source = row
kwargs[] = self.__graph__()
super(CSVRowProcessor, self).run(**kwargs)
return kwargs[]
|
Methods takes a row and depending if a dict or list,
runs RML rules.
Args:
-----
row(Dict, List): Row from CSV Reader
|
28,194 |
def _set_intrinsics(self):
strm = self._profile.get_stream(rs.stream.color)
obj = strm.as_video_stream_profile().get_intrinsics()
self._intrinsics[0, 0] = obj.fx
self._intrinsics[1, 1] = obj.fy
self._intrinsics[0, 2] = obj.ppx
self._intrinsics[1, 2] = obj.ppy
|
Read the intrinsics matrix from the stream.
|
28,195 |
def _parse_access_vlan(self, config):
value = re.search(r, config)
return dict(access_vlan=value.group(1))
|
Scans the specified config and parse the access-vlan value
Args:
config (str): The interface configuration block to scan
Returns:
dict: A Python dict object with the value of switchport access
value. The dict returned is intended to be merged into the
resource dict
|
28,196 |
def AjustarLiquidacionUnificadoPapel(self):
"Ajustar Liquidación realizada en un formulario F1116 B / C (papel)"
if not self.ajuste[][]:
del self.ajuste[][]
for k1 in (, ):
for k2 in (, ):
if not self.ajuste[k1][k2]:
del self.ajuste[k1][k2]
ret = self.client.liquidacionAjustarUnificadoPapel(
auth={
: self.Token, : self.Sign,
: self.Cuit, },
**self.ajuste
)
ret = ret[]
self.__analizar_errores(ret)
if in ret:
aut = ret[]
self.AnalizarAjuste(aut)
return True
|
Ajustar Liquidación realizada en un formulario F1116 B / C (papel)
|
28,197 |
def delete_project(id=None, name=None):
content = delete_project_raw(id, name)
if content:
return utils.format_json(content)
|
Delete a Project by ID or name.
|
28,198 |
def switch(self, first, second):
allowed_states = [, ]
if first in self.queue and second in self.queue \
and self.queue[first][] in allowed_states\
and self.queue[second][] in allowed_states:
tmp = self.queue[second].copy()
self.queue[second] = self.queue[first].copy()
self.queue[first] = tmp
self.write()
return True
return False
|
Switch two entries in the queue. Return False if an entry doesn't exist.
|
28,199 |
def get_offload(self, name, **kwargs):
result = self._request("GET", "offload/{0}".format(name), kwargs)
if isinstance(result, list):
headers = result.headers
result = ResponseDict(result[0])
result.headers = headers
return result
|
Return a dictionary describing the connected offload target.
:param offload: Name of offload target to get information about.
:type offload: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET offload/::offload**
:type \*\*kwargs: optional
:returns: A dictionary describing the offload connection.
:rtype: ResponseDict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.