Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
380,600 | def insertBefore(self, child: Node, ref_node: Node) -> Node:
if self.connected:
self._insert_before_web(child, ref_node)
return self._insert_before(child, ref_node) | Insert new child node before the reference child node.
If the reference node is not a child of this node, raise ValueError. If
this instance is connected to the node on browser, the child node is
also added to it. |
380,601 | def buglist(self, from_date=DEFAULT_DATETIME):
if not self.version:
self.version = self.__fetch_version()
if self.version in self.OLD_STYLE_VERSIONS:
order =
else:
order =
date = from_date.strftime("%Y-%m-%d %H:%M:%S")
params = {
self.PCHFIELD_FROM: date,
self.PCTYPE: self.CTYPE_CSV,
self.PLIMIT: self.max_bugs_csv,
self.PORDER: order
}
response = self.call(self.CGI_BUGLIST, params)
return response | Get a summary of bugs in CSV format.
:param from_date: retrieve bugs that where updated from that date |
380,602 | def get_ports(self, id_or_uri, start=0, count=-1):
uri = self._client.build_subresource_uri(resource_id_or_uri=id_or_uri, subresource_path="ports")
return self._client.get_all(start, count, uri=uri) | Gets all interconnect ports.
Args:
id_or_uri: Can be either the interconnect id or the interconnect uri.
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
Returns:
list: All interconnect ports. |
380,603 | def _update_console(self, value=None):
if self._total == 0:
frac = 1.0
else:
frac = float(value) / float(self._total)
file = self._file
write = file.write
if frac > 1:
bar_fill = int(self._bar_length)
else:
bar_fill = int(float(self._bar_length) * frac)
write()
color_print( * bar_fill, , file=file, end=)
if bar_fill < self._bar_length:
color_print(, , file=file, end=)
write( * (self._bar_length - bar_fill - 1))
write()
if value >= self._total:
t = time.time() - self._start_time
prefix =
elif value <= 0:
t = None
prefix =
else:
t = ((time.time() - self._start_time) * (1.0 - frac)) / frac
prefix =
write(.format(
human_file_size(value),
self._human_total))
write(.format(.format(frac * 100.0)))
write(prefix)
if t is not None:
write(human_time(t))
self._file.flush() | Update the progress bar to the given value (out of the total
given to the constructor). |
380,604 | def contains_non_repeat_actions(self):
for action in self.actions:
if not isinstance(action, (int, dynamic.RepeatCommand)):
return True
return False | Because repeating repeat actions can get ugly real fast |
380,605 | def udf(f=None, returnType=StringType()):
if f is None or isinstance(f, (str, DataType)):
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF) | Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+ |
380,606 | def _cmp(self, other):
if self is other:
return 0
try:
assert isinstance(other, CIMParameter)
except AssertionError:
raise TypeError(
_format("other must be CIMParameter, but is: {0}",
type(other)))
return (cmpname(self.name, other.name) or
cmpitem(self.type, other.type) or
cmpname(self.reference_class, other.reference_class) or
cmpitem(self.is_array, other.is_array) or
cmpitem(self.array_size, other.array_size) or
cmpdict(self.qualifiers, other.qualifiers) or
cmpitem(self.value, other.value) or
cmpitem(self.embedded_object, other.embedded_object)) | Comparator function for two :class:`~pywbem.CIMParameter` objects.
The comparison is based on their public attributes, in descending
precedence:
* `name`
* `type`
* `reference_class`
* `is_array`
* `array_size`
* `qualifiers`
* `value`
* `embedded_object`
The comparison takes into account any case insensitivities described
for these attributes.
Raises `TypeError', if the `other` object is not a
:class:`~pywbem.CIMParameter` object. |
380,607 | def zip(self, *items):
return self.__class__(list(zip(self.items, *items))) | Zip the collection together with one or more arrays.
:param items: The items to zip
:type items: list
:rtype: Collection |
380,608 | def funnel_rebuild(psg_trm_spec):
param_score_gen, top_result_model, specification = psg_trm_spec
params, score, gen = param_score_gen
model = specification(*params)
rmsd = top_result_model.rmsd(model)
return rmsd, score, gen | Rebuilds a model and compares it to a reference model.
Parameters
----------
psg_trm: (([float], float, int), AMPAL, specification)
A tuple containing the parameters, score and generation for a
model as well as a model of the best scoring parameters.
Returns
-------
energy_rmsd_gen: (float, float, int)
A triple containing the BUFF score, RMSD to the top model
and generation of a model generated during the minimisation. |
380,609 | def most_recent_common_ancestor(self, *ts):
if len(ts) > 200:
res = self._large_mrca(ts)
else:
res = self._small_mrca(ts)
if res:
(res,), = res
else:
raise NoAncestor()
return res | Find the MRCA of some tax_ids.
Returns the MRCA of the specified tax_ids, or raises ``NoAncestor`` if
no ancestor of the specified tax_ids could be found. |
380,610 | def build_sdk_span(self, span):
custom_data = CustomData(tags=span.tags,
logs=self.collect_logs(span))
sdk_data = SDKData(name=span.operation_name,
custom=custom_data,
Type=self.get_span_kind_as_string(span))
if "arguments" in span.tags:
sdk_data.arguments = span.tags["arguments"]
if "return" in span.tags:
sdk_data.Return = span.tags["return"]
data = Data(service=instana.singletons.agent.sensor.options.service_name, sdk=sdk_data)
entity_from = {: instana.singletons.agent.from_.pid,
: instana.singletons.agent.from_.agentUuid}
json_span = JsonSpan(
t=span.context.trace_id,
p=span.parent_id,
s=span.context.span_id,
ts=int(round(span.start_time * 1000)),
d=int(round(span.duration * 1000)),
k=self.get_span_kind_as_int(span),
n="sdk",
f=entity_from,
data=data)
error = span.tags.pop("error", False)
ec = span.tags.pop("ec", None)
if error and ec:
json_span.error = error
json_span.ec = ec
return json_span | Takes a BasicSpan and converts into an SDK type JsonSpan |
380,611 | def _load_github_repo():
if in os.environ:
raise RuntimeError(
)
try:
with open(os.path.join(config_dir, ), ) as f:
return f.read()
except (OSError, IOError):
raise RuntimeError(
) | Loads the GitHub repository from the users config. |
380,612 | def get(self):
open_tracking = {}
if self.enable is not None:
open_tracking["enable"] = self.enable
if self.substitution_tag is not None:
open_tracking["substitution_tag"] = self.substitution_tag.get()
return open_tracking | Get a JSON-ready representation of this OpenTracking.
:returns: This OpenTracking, ready for use in a request body.
:rtype: dict |
380,613 | def handle(client, request):
formaters = request.get(, None)
if not formaters:
formaters = [{: }]
logging.debug( + json.dumps(formaters, indent=4))
data = request.get(, None)
if not isinstance(data, str):
return send(client, , None)
max_line_length = None
for formater in formaters:
max_line_length = formater.get(, {}).get()
if max_line_length:
break
for formater in formaters:
name = formater.get(, None)
config = formater.get(, {})
if name not in FORMATERS:
return send(client, .format(name), None)
formater = FORMATERS[name]
if formater is None:
return send(client, .format(name), None)
if name == and max_line_length:
config.setdefault(, max_line_length)
data = formater(data, **config)
return send(client, None, data) | Handle format request
request struct:
{
'data': 'data_need_format',
'formaters': [
{
'name': 'formater_name',
'config': {} # None or dict
},
... # formaters
]
}
if no formaters, use autopep8 formater and it's default config |
380,614 | def has_perm(self, user_obj, perm, obj=None):
if not is_authenticated(user_obj):
return False
change_permission = self.get_full_permission_string()
delete_permission = self.get_full_permission_string()
if obj is None:
if self.any_permission:
return True
if self.change_permission and perm == change_permission:
return True
if self.delete_permission and perm == delete_permission:
return True
return False
elif user_obj.is_active:
if obj == user_obj:
if self.any_permission:
return True
if (self.change_permission and
perm == change_permission):
return True
if (self.delete_permission and
perm == delete_permission):
return True
return False | Check if user have permission of himself
If the user_obj is not authenticated, it return ``False``.
If no object is specified, it return ``True`` when the corresponding
permission was specified to ``True`` (changed from v0.7.0).
This behavior is based on the django system.
https://code.djangoproject.com/wiki/RowLevelPermissions
If an object is specified, it will return ``True`` if the object is the
user.
So users can change or delete themselves (you can change this behavior
to set ``any_permission``, ``change_permissino`` or
``delete_permission`` attributes of this instance).
Parameters
----------
user_obj : django user model instance
A django user model instance which be checked
perm : string
`app_label.codename` formatted permission string
obj : None or django model instance
None or django model instance for object permission
Returns
-------
boolean
Whether the specified user have specified permission (of specified
object). |
380,615 | def get(zpool, prop=None, show_source=False, parsable=True):
*
ret = OrderedDict()
value_properties = [, , , ]
res = __salt__[](
__utils__[](
command=,
flags=[],
property_name=prop if prop else ,
target=zpool,
),
python_shell=False,
)
if res[] != 0:
return __utils__[](res)
for line in res[].splitlines():
prop_data = OrderedDict(list(zip(
value_properties,
[x for x in line.strip().split() if x not in []],
)))
del prop_data[]
if parsable:
prop_data[] = __utils__[](prop_data[], prop_data[])
else:
prop_data[] = __utils__[](prop_data[], prop_data[])
if show_source:
ret[prop_data[]] = prop_data
del ret[prop_data[]][]
else:
ret[prop_data[]] = prop_data[]
return ret | .. versionadded:: 2016.3.0
Retrieves the given list of properties
zpool : string
Name of storage pool
prop : string
Optional name of property to retrieve
show_source : boolean
Show source of property
parsable : boolean
Display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.get myzpool |
380,616 | def cluster_application_attempts(self, application_id):
path = .format(
appid=application_id)
return self.request(path) | With the application attempts API, you can obtain a collection of
resources that represent an application attempt.
:param str application_id: The application id
:returns: API response object with JSON data
:rtype: :py:class:`yarn_api_client.base.Response` |
380,617 | def page(self, course, task, submission):
submission = self.submission_manager.get_input_from_submission(submission)
submission = self.submission_manager.get_feedback_from_submission(
submission,
show_everything=True,
translation=self.app._translations.get(self.user_manager.session_language(), gettext.NullTranslations())
)
to_display = {
problem.get_id(): {
"id": problem.get_id(),
"name": problem.get_name(self.user_manager.session_language()),
"defined": True
} for problem in task.get_problems()
}
to_display.update({
pid: {
"id": pid,
"name": pid,
"defined": False
} for pid in (set(submission["input"]) - set(to_display))
})
return self.template_helper.get_renderer().course_admin.submission(course, task, submission, to_display.values()) | Get all data and display the page |
380,618 | def graph_loads(graph_json):
layers = []
for layer in graph_json[]:
layer_info = Layer(layer[], layer[], layer[], layer[])
layer_info.is_delete = layer[]
layers.append(layer_info)
graph = Graph(graph_json[], [], [], [])
graph.layers = layers
return graph | Load graph |
380,619 | def _decompose_vectorized_indexer(indexer, shape, indexing_support):
assert isinstance(indexer, VectorizedIndexer)
if indexing_support is IndexingSupport.VECTORIZED:
return indexer, BasicIndexer(())
backend_indexer = []
np_indexer = []
indexer = [np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k
for k, s in zip(indexer.tuple, shape)]
for k, s in zip(indexer, shape):
if isinstance(k, slice):
bk_slice, np_slice = _decompose_slice(k, s)
backend_indexer.append(bk_slice)
np_indexer.append(np_slice)
else:
oind, vind = np.unique(k, return_inverse=True)
backend_indexer.append(oind)
np_indexer.append(vind.reshape(*k.shape))
backend_indexer = OuterIndexer(tuple(backend_indexer))
np_indexer = VectorizedIndexer(tuple(np_indexer))
if indexing_support is IndexingSupport.OUTER:
return backend_indexer, np_indexer
backend_indexer, np_indexer1 = _decompose_outer_indexer(
backend_indexer, shape, indexing_support)
np_indexer = _combine_indexers(np_indexer1, shape, np_indexer)
return backend_indexer, np_indexer | Decompose vectorized indexer to the successive two indexers, where the
first indexer will be used to index backend arrays, while the second one
is used to index loaded on-memory np.ndarray.
Parameters
----------
indexer: VectorizedIndexer
indexing_support: one of IndexerSupport entries
Returns
-------
backend_indexer: OuterIndexer or BasicIndexer
np_indexers: an ExplicitIndexer (VectorizedIndexer / BasicIndexer)
Notes
-----
This function is used to realize the vectorized indexing for the backend
arrays that only support basic or outer indexing.
As an example, let us consider to index a few elements from a backend array
with a vectorized indexer ([0, 3, 1], [2, 3, 2]).
Even if the backend array only supports outer indexing, it is more
efficient to load a subslice of the array than loading the entire array,
>>> backend_indexer = OuterIndexer([0, 1, 3], [2, 3])
>>> array = array[backend_indexer] # load subslice of the array
>>> np_indexer = VectorizedIndexer([0, 2, 1], [0, 1, 0])
>>> array[np_indexer] # vectorized indexing for on-memory np.ndarray. |
380,620 | def log(self, text, level=logging.INFO):
self._fileStore.logToMaster(text, level) | convenience wrapper for :func:`fileStore.logToMaster` |
380,621 | def desired_destination(self, network, edge):
n = len(network.out_edges[edge[1]])
if n <= 1:
return network.out_edges[edge[1]][0]
u = uniform()
pr = network._route_probs[edge[1]]
k = _choice(pr, u, n)
return network.out_edges[edge[1]][k] | Returns the agents next destination given their current
location on the network.
An ``Agent`` chooses one of the out edges at random. The
probability that the ``Agent`` will travel along a specific
edge is specified in the :class:`QueueNetwork's<.QueueNetwork>`
transition matrix.
Parameters
----------
network : :class:`.QueueNetwork`
The :class:`.QueueNetwork` where the Agent resides.
edge : tuple
A 4-tuple indicating which edge this agent is located at.
The first two slots indicate the current edge's source and
target vertices, while the third slot indicates this edges
``edge_index``. The last slot indicates the edge type of
that edge
Returns
-------
out : int
Returns an the edge index corresponding to the agents next
edge to visit in the network.
See Also
--------
:meth:`.transitions` : :class:`QueueNetwork's<.QueueNetwork>`
method that returns the transition probabilities for each
edge in the graph. |
380,622 | def com_google_fonts_check_family_equal_glyph_names(ttFonts):
fonts = list(ttFonts)
all_glyphnames = set()
for ttFont in fonts:
all_glyphnames |= set(ttFont["glyf"].glyphs.keys())
missing = {}
available = {}
for glyphname in all_glyphnames:
missing[glyphname] = []
available[glyphname] = []
failed = False
for ttFont in fonts:
fontname = ttFont.reader.file.name
these_ones = set(ttFont["glyf"].glyphs.keys())
for glyphname in all_glyphnames:
if glyphname not in these_ones:
failed = True
missing[glyphname].append(fontname)
else:
available[glyphname].append(fontname)
for gn in missing.keys():
if missing[gn]:
available_styles = [style(k) for k in available[gn]]
missing_styles = [style(k) for k in missing[gn]]
if None not in available_styles + missing_styles:
avail = .join(available_styles)
miss = .join(missing_styles)
else:
avail = .join(available[gn])
miss = .join(missing[gn])
yield FAIL, (f"Glyphname is defined on {avail}"
f" but is missing on {miss}.")
if not failed:
yield PASS, "All font files have identical glyph names." | Fonts have equal glyph names? |
380,623 | def write_data(self, variable_id, value):
i = 0
j = 0
while i < 10:
try:
self.inst.query()
i = 12
j = 1
except:
self.connect()
time.sleep(1)
i += 1
logger.error("AFG1022 connect error i : %s" %i)
if j == 0:
logger.error("AFG1022-Instrument not connected")
return None
if variable_id == :
try:
N = int(RecordedData.objects.last_element(variable_id=Variable.objects.get(name=).id).value())
except:
N = 0
logger.error()
if N == 0:
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=1, start=time.time())
cwt.save()
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=1, start=time.time())
cwt.save()
return None
elif N == 1:
self.inst.read_termination =
Vepp = RecordedData.objects.last_element(variable_id=Variable.objects.get(name=).id).value()
Fmin = RecordedData.objects.last_element(variable_id=Variable.objects.get(name=).id).value()
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=Vepp, start=time.time())
cwt.save()
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=1, start=time.time())
cwt.save()
CMD = str(+str(Vepp)+)
self.inst.write(CMD)
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=Fmin, start=time.time())
cwt.save()
self.write_data("set_freq", Fmin)
return True
else:
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=1, start=time.time())
cwt.save()
logger.info("Init GBF - N : %s" %N)
return False
return None
elif variable_id == :
self.inst.write(+str(value))
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=1, start=time.time())
cwt.save()
return self.parse_value(value)
elif variable_id == :
self.inst.write(+str(value)+)
F = RecordedData.objects.last_element(variable_id=Variable.objects.get(name=).id).value()
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=F, start=time.time())
cwt.save()
cwt = DeviceWriteTask(variable_id=Variable.objects.get(name=).id, value=F, start=time.time())
cwt.save()
return self.parse_value(value)
elif variable_id == :
return self.parse_value(value)
elif variable_id == :
import os
os.system()
return 1
else:
return self.parse_value(self.inst.query(str(variable_id)++str(value)))
return None | write values to the device |
380,624 | def purge_db(self):
with self.engine.begin() as db:
purge_user(db, self.user_id) | Clear all matching our user_id. |
380,625 | def get_projects(session, query):
response = make_get_request(session, , params_data=query)
json_data = response.json()
if response.status_code == 200:
return json_data[]
else:
raise ProjectsNotFoundException(
message=json_data[],
error_code=json_data[],
request_id=json_data[]) | Get one or more projects |
380,626 | def get_area_def(self, dsid):
geocoding = self.root.find()
epsg = geocoding.find().text
rows = int(geocoding.find( + str(dsid.resolution) + ).text)
cols = int(geocoding.find( + str(dsid.resolution) + ).text)
geoposition = geocoding.find( + str(dsid.resolution) + )
ulx = float(geoposition.find().text)
uly = float(geoposition.find().text)
xdim = float(geoposition.find().text)
ydim = float(geoposition.find().text)
area_extent = (ulx, uly + rows * ydim, ulx + cols * xdim, uly)
area = geometry.AreaDefinition(
self.tile,
"On-the-fly area",
self.tile,
{: epsg},
cols,
rows,
area_extent)
return area | Get the area definition of the dataset. |
380,627 | def extant_item(arg, arg_type):
if arg_type == "file":
if not os.path.isfile(arg):
raise argparse.ArgumentError(
None,
"The file {arg} does not exist.".format(arg=arg))
else:
return arg
elif arg_type == "directory":
if not os.path.isdir(arg):
raise argparse.ArgumentError(
None,
"The directory {arg} does not exist.".format(arg=arg))
else:
return arg | Determine if parser argument is an existing file or directory.
This technique comes from http://stackoverflow.com/a/11541450/95592
and from http://stackoverflow.com/a/11541495/95592
Args:
arg: parser argument containing filename to be checked
arg_type: string of either "file" or "directory"
Returns:
If the file exists, return the filename or directory.
Raises:
If the file does not exist, raise a parser error. |
380,628 | def validate_slice_increment(dicoms):
first_image_position = numpy.array(dicoms[0].ImagePositionPatient)
previous_image_position = numpy.array(dicoms[1].ImagePositionPatient)
increment = first_image_position - previous_image_position
for dicom_ in dicoms[2:]:
current_image_position = numpy.array(dicom_.ImagePositionPatient)
current_increment = previous_image_position - current_image_position
if not numpy.allclose(increment, current_increment, rtol=0.05, atol=0.1):
logger.warning()
logger.warning()
logger.warning( % (previous_image_position, increment))
logger.warning( % (current_image_position, current_increment))
if in dicom_:
logger.warning( % dicom_.InstanceNumber)
logger.warning()
raise ConversionValidationError()
previous_image_position = current_image_position | Validate that the distance between all slices is equal (or very close to)
:param dicoms: list of dicoms |
380,629 | def hot(self, limit=None):
return self._reddit.hot(self.display_name, limit=limit) | GETs hot links from this subreddit. Calls :meth:`narwal.Reddit.hot`.
:param limit: max number of links to return |
380,630 | def copy(self):
return self.__class__(
amount=self["amount"],
asset=self["asset"].copy(),
blockchain_instance=self.blockchain,
) | Copy the instance and make sure not to use a reference |
380,631 | def score_group(group_name=None):
warnings.warn()
def _inner(func):
def _dec(s, ds):
ret_val = func(s, ds)
if not isinstance(ret_val, list):
ret_val = [ret_val]
def dogroup(r):
cur_grouping = r.name
if isinstance(cur_grouping, tuple):
cur_grouping = list(cur_grouping)
elif not isinstance(cur_grouping, list):
cur_grouping = [cur_grouping]
cur_grouping.insert(0, group_name)
return Result(r.weight, r.value, tuple(cur_grouping), r.msgs)
ret_val = [fix_return_value(x, func.__name__, func, s) for x in
ret_val]
ret_val = list(map(dogroup, ret_val))
return ret_val
return wraps(func)(_dec)
return _inner | Warning this is deprecated as of Compliance Checker v3.2!
Please do not using scoring groups and update your plugins
if necessary |
380,632 | def inst_matches(self, start, end, instr, target=None, include_beyond_target=False):
try:
None in instr
except:
instr = [instr]
first = self.offset2inst_index[start]
result = []
for inst in self.insts[first:]:
if inst.opcode in instr:
if target is None:
result.append(inst.offset)
else:
t = self.get_target(inst.offset)
if include_beyond_target and t >= target:
result.append(inst.offset)
elif t == target:
result.append(inst.offset)
pass
pass
pass
if inst.offset >= end:
break
pass
return result | Find all `instr` in the block from start to end.
`instr` is a Python opcode or a list of opcodes
If `instr` is an opcode with a target (like a jump), a target
destination can be specified which must match precisely.
Return a list with indexes to them or [] if none found. |
380,633 | def fit_general(xy, uv):
gxy = uv.astype(ndfloat128)
guv = xy.astype(ndfloat128)
Sx = gxy[:,0].sum()
Sy = gxy[:,1].sum()
Su = guv[:,0].sum()
Sv = guv[:,1].sum()
Sux = np.dot(guv[:,0], gxy[:,0])
Svx = np.dot(guv[:,1], gxy[:,0])
Suy = np.dot(guv[:,0], gxy[:,1])
Svy = np.dot(guv[:,1], gxy[:,1])
Sxx = np.dot(gxy[:,0], gxy[:,0])
Syy = np.dot(gxy[:,1], gxy[:,1])
Sxy = np.dot(gxy[:,0], gxy[:,1])
n = len(xy[:,0])
M = np.array([[Sx, Sy, n], [Sxx, Sxy, Sx], [Sxy, Syy, Sy]])
U = np.array([Su, Sux, Suy])
V = np.array([Sv, Svx, Svy])
try:
invM = np.linalg.inv(M.astype(np.float64))
except np.linalg.LinAlgError:
raise SingularMatrixError(
"Singular matrix: suspected colinear points."
)
P = np.dot(invM, U).astype(np.float64)
Q = np.dot(invM, V).astype(np.float64)
if not (np.all(np.isfinite(P)) and np.all(np.isfinite(Q))):
raise ArithmeticError()
result = build_fit(P, Q, )
resids = xy - np.dot(uv, result[]) - result[]
result[] = resids.std(axis=0)
result[] = resids
result[] = float(np.sqrt(np.mean(2 * resids**2)))
result[] = float(np.mean(np.linalg.norm(resids, axis=1)))
return result | Performs a simple fit for the shift only between
matched lists of positions 'xy' and 'uv'.
Output: (same as for fit_arrays)
=================================
DEVELOPMENT NOTE:
Checks need to be put in place to verify that
enough objects are available for a fit.
================================= |
380,634 | def even_even(self):
return self.select(lambda Z, N: not(Z % 2) and not(N % 2), name=self.name) | Selects even-even nuclei from the table |
380,635 | def copy_path(self):
path = cairo.cairo_copy_path(self._pointer)
result = list(_iter_path(path))
cairo.cairo_path_destroy(path)
return result | Return a copy of the current path.
:returns:
A list of ``(path_operation, coordinates)`` tuples
of a :ref:`PATH_OPERATION` string
and a tuple of floats coordinates
whose content depends on the operation type:
* :obj:`MOVE_TO <PATH_MOVE_TO>`: 1 point ``(x, y)``
* :obj:`LINE_TO <PATH_LINE_TO>`: 1 point ``(x, y)``
* :obj:`CURVE_TO <PATH_CURVE_TO>`: 3 points
``(x1, y1, x2, y2, x3, y3)``
* :obj:`CLOSE_PATH <PATH_CLOSE_PATH>` 0 points ``()`` (empty tuple) |
380,636 | def plot_latent(self, labels=None, which_indices=None,
resolution=60, legend=True,
plot_limits=None,
updates=False,
kern=None, marker=,
num_samples=1000, projection=,
predict_kwargs={},
scatter_kwargs=None, **imshow_kwargs):
from ..plotting.gpy_plot.latent_plots import plot_latent
if "Yindex" not in predict_kwargs:
predict_kwargs[] = 0
Yindex = predict_kwargs[]
self.kern = self.bgplvms[Yindex].kern
self.likelihood = self.bgplvms[Yindex].likelihood
return plot_latent(self, labels, which_indices, resolution, legend, plot_limits, updates, kern, marker, num_samples, projection, scatter_kwargs) | see plotting.matplot_dep.dim_reduction_plots.plot_latent
if predict_kwargs is None, will plot latent spaces for 0th dataset (and kernel), otherwise give
predict_kwargs=dict(Yindex='index') for plotting only the latent space of dataset with 'index'. |
380,637 | def order_by(self, *field_names):
if not self._search_ordered:
self._search_ordered = len(self._search_terms) > 0
return super(SearchableQuerySet, self).order_by(*field_names) | Mark the filter as being ordered if search has occurred. |
380,638 | def _process_response(response, save_to):
status_code = response.status_code
if status_code == 200 and save_to:
if save_to.startswith("~"): save_to = os.path.expanduser(save_to)
if os.path.isdir(save_to) or save_to.endswith(os.path.sep):
dirname = os.path.abspath(save_to)
filename = H2OConnection._find_file_name(response)
else:
dirname, filename = os.path.split(os.path.abspath(save_to))
fullname = os.path.join(dirname, filename)
try:
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(fullname, "wb") as f:
for chunk in response.iter_content(chunk_size=65536):
if chunk:
f.write(chunk)
except OSError as e:
raise H2OValueError("Cannot write to file %s: %s" % (fullname, e))
return fullname
content_type = response.headers.get("Content-Type", "")
if ";" in content_type:
content_type = content_type[:content_type.index(";")]
if content_type == "application/json":
try:
data = response.json(object_pairs_hook=H2OResponse)
except (JSONDecodeError, requests.exceptions.ContentDecodingError) as e:
raise H2OServerError("Malformed JSON from server (%s):\n%s" % (str(e), response.text))
else:
data = response.text
if status_code in {200, 201, 202, 204}:
return data
if status_code in {400, 404, 412} and isinstance(data, (H2OErrorV3, H2OModelBuilderErrorV3)):
raise H2OResponseError(data)
raise H2OServerError("HTTP %d %s:\n%r" % (status_code, response.reason, data)) | Given a response object, prepare it to be handed over to the external caller.
Preparation steps include:
* detect if the response has error status, and convert it to an appropriate exception;
* detect Content-Type, and based on that either parse the response as JSON or return as plain text. |
380,639 | def save(self, *objs, condition=None, atomic=False):
objs = set(objs)
validate_not_abstract(*objs)
for obj in objs:
self.session.save_item({
"TableName": self._compute_table_name(obj.__class__),
"Key": dump_key(self, obj),
**render(self, obj=obj, atomic=atomic, condition=condition, update=True)
})
object_saved.send(self, engine=self, obj=obj)
logger.info("successfully saved {} objects".format(len(objs))) | Save one or more objects.
:param objs: objects to save.
:param condition: only perform each save if this condition holds.
:param bool atomic: only perform each save if the local and DynamoDB versions of the object match.
:raises bloop.exceptions.ConstraintViolation: if the condition (or atomic) is not met. |
380,640 | def _read_mode_tsopt(self, size, kind):
temp = struct.unpack(, self._read_fileng(size))
data = dict(
kind=kind,
length=size,
val=temp[0],
ecr=temp[1],
)
return data | Read Timestamps option.
Positional arguments:
* size - int, length of option
* kind - int, 8 (Timestamps)
Returns:
* dict -- extracted Timestamps (TS) option
Structure of TCP TSopt [RFC 7323]:
+-------+-------+---------------------+---------------------+
|Kind=8 | 10 | TS Value (TSval) |TS Echo Reply (TSecr)|
+-------+-------+---------------------+---------------------+
1 1 4 4
Octets Bits Name Description
0 0 tcp.ts.kind Kind (8)
1 8 tcp.ts.length Length (10)
2 16 tcp.ts.val Timestamp Value
6 48 tcp.ts.ecr Timestamps Echo Reply |
380,641 | def entry_detail(request, slug, template=):
entry = get_object_or_404(Entry.public, slug=slug)
context = {
: entry,
}
return render_to_response(
template,
context,
context_instance=RequestContext(request),
) | Returns a response of an individual entry, for the given slug. |
380,642 | def multi_send(self, template, emails, _vars=None, evars=None, schedule_time=None, options=None):
_vars = _vars or {}
evars = evars or {}
options = options or {}
data = {: template,
: .join(emails) if isinstance(emails, list) else emails,
: _vars.copy(),
: evars.copy(),
: options.copy()}
if schedule_time is not None:
data[] = schedule_time
return self.api_post(, data) | Remotely send an email template to multiple email addresses.
http://docs.sailthru.com/api/send
@param template: template string
@param emails: List with email values or comma separated email string
@param _vars: a key/value hash of the replacement vars to use in the send. Each var may be referenced as {varname} within the template itself
@param options: optional dictionary to include replyto and/or test keys
@param schedule_time: do not send the email immediately, but at some point in the future. Any date recognized by PHP's strtotime function is valid, but be sure to specify timezone or use a UTC time to avoid confusion |
380,643 | def draft_context(cls):
previous_state = g.get()
try:
g.draft = True
yield
finally:
g.draft = previous_state | Set the context to draft |
380,644 | def checkedItems( self ):
if not self.isCheckable():
return []
return [nativestring(self.itemText(i)) for i in self.checkedIndexes()] | Returns the checked items for this combobox.
:return [<str>, ..] |
380,645 | def load_var_files(opt, p_obj=None):
obj = {}
if p_obj:
obj = p_obj
for var_file in opt.extra_vars_file:
LOG.debug("loading vars from %s", var_file)
obj = merge_dicts(obj.copy(), load_var_file(var_file, obj))
return obj | Load variable files, merge, return contents |
380,646 | def interpolate(values, color_map=None, dtype=np.uint8):
if color_map is None:
cmap = linear_color_map
else:
from matplotlib.pyplot import get_cmap
cmap = get_cmap(color_map)
values = np.asanyarray(values, dtype=np.float64).ravel()
colors = cmap((values - values.min()) / values.ptp())
rgba = to_rgba(colors, dtype=dtype)
return rgba | Given a 1D list of values, return interpolated colors
for the range.
Parameters
---------------
values : (n, ) float
Values to be interpolated over
color_map : None, or str
Key to a colormap contained in:
matplotlib.pyplot.colormaps()
e.g: 'viridis'
Returns
-------------
interpolated : (n, 4) dtype
Interpolated RGBA colors |
380,647 | def received_message(self, msg):
logger.debug("Received message: %s", msg)
if msg.is_binary:
raise ValueError("Binary messages not supported")
resps = json.loads(msg.data)
cmd_group = _get_cmds_id(*resps)
if cmd_group:
(cmds, promise) = self._cmd_groups[cmd_group]
promise.fulfill((cmds, resps))
else:
try:
self.received_unsolicited(resps)
except:
logger.exception("Error in unsolicited msg handler")
raise | Handle receiving a message by checking whether it is in response
to a command or unsolicited, and dispatching it to the appropriate
object method. |
380,648 | def ge(self, other):
self._raise_if_null(other)
return self.begin >= getattr(other, , other) | Greater than or overlaps. Returns True if no part of this Interval
extends lower than other.
:raises ValueError: if either self or other is a null Interval
:param other: Interval or point
:return: True or False
:rtype: bool |
380,649 | def shard_data(self, region):
url, query = LolStatusApiV3Urls.shard_data(region=region)
return self._raw_request(self.shard_data.__name__, region, url, query) | Get League of Legends status for the given shard.
Requests to this API are not counted against the application Rate Limits.
:param string region: the region to execute this request on
:returns: ShardStatus |
380,650 | def remove_tiers(self, tiers):
for a in tiers:
self.remove_tier(a, clean=False)
self.clean_time_slots() | Remove multiple tiers, note that this is a lot faster then removing
them individually because of the delayed cleaning of timeslots.
:param list tiers: Names of the tier to remove.
:raises KeyError: If a tier is non existent. |
380,651 | def close(self):
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | Close all connections |
380,652 | def _do_packet_out(self, datapath, data, in_port, actions):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
out = parser.OFPPacketOut(
datapath=datapath, buffer_id=ofproto.OFP_NO_BUFFER,
data=data, in_port=in_port, actions=actions)
datapath.send_msg(out) | send a packet. |
380,653 | def t384(args):
p = OptionParser(t384.__doc__)
opts, args = p.parse_args(args)
plate, splate = get_plate()
fw = sys.stdout
for i in plate:
for j, p in enumerate(i):
if j != 0:
fw.write()
fw.write(p)
fw.write() | %prog t384
Print out a table converting between 96 well to 384 well |
380,654 | def read_adjacency_matrix(file_path, separator):
file_row_generator = get_file_row_generator(file_path, separator)
row = list()
col = list()
append_row = row.append
append_col = col.append
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
append_row(source_node)
append_col(target_node)
append_row(target_node)
append_col(source_node)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
number_of_nodes = np.max(row)
row -= 1
col -= 1
adjacency_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
return adjacency_matrix | Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format. |
380,655 | def reload_list(self):
self.leetcode.load()
if self.leetcode.quizzes and len(self.leetcode.quizzes) > 0:
self.home_view = self.make_listview(self.leetcode.quizzes)
self.view_stack = []
self.goto_view(self.home_view) | Press R in home view to retrieve quiz list |
380,656 | def _salt_send_event(opaque, conn, data):
prefixobjectevent
tag_prefix = opaque[]
object_type = opaque[]
event_type = opaque[]
uri = urlparse(conn.getURI())
uri_tag = [uri.scheme]
if uri.netloc:
uri_tag.append(uri.netloc)
path = uri.path.strip()
if path:
uri_tag.append(path)
uri_str = "/".join(uri_tag)
all_data = {
: conn.getURI()
}
all_data.update(data)
tag = .join((tag_prefix, uri_str, object_type, event_type))
if __opts__.get() == :
salt.utils.event.get_master_event(
__opts__,
__opts__[]).fire_event(all_data, tag)
else:
__salt__[](tag, all_data) | Convenience function adding common data to the event and sending it
on the salt event bus.
:param opaque: the opaque data that is passed to the callback.
This is a dict with 'prefix', 'object' and 'event' keys.
:param conn: libvirt connection
:param data: additional event data dict to send |
380,657 | def set_context_suffix(self, name, suffix):
data = self._context(name)
data["suffix"] = suffix
self._flush_tools() | Set a context's suffix.
This will be applied to all wrappers for the tools in this context. For
example, a tool called 'foo' would appear as 'foo<suffix>' in the
suite's bin path.
Args:
name (str): Name of the context to suffix.
suffix (str): Suffix to apply to tools. |
380,658 | def _get_course_content(course_id, course_url, sailthru_client, site_code, config):
cache_key = "{}:{}".format(site_code, course_url)
response = cache.get(cache_key)
if not response:
try:
sailthru_response = sailthru_client.api_get("content", {"id": course_url})
if not sailthru_response.is_ok():
response = {}
else:
response = sailthru_response.json
cache.set(cache_key, response, config.get())
except SailthruClientError:
response = {}
if not response:
logger.error(
)
response = _get_course_content_from_ecommerce(course_id, site_code=site_code)
if response:
cache.set(cache_key, response, config.get())
return response | Get course information using the Sailthru content api or from cache.
If there is an error, just return with an empty response.
Arguments:
course_id (str): course key of the course
course_url (str): LMS url for course info page.
sailthru_client (object): SailthruClient
site_code (str): site code
config (dict): config options
Returns:
course information from Sailthru |
380,659 | def convert_epoch_to_timestamp(cls, timestamp, tsformat):
return time.strftime(tsformat, time.gmtime(timestamp)) | Converts the given float representing UNIX-epochs into an actual timestamp.
:param float timestamp: Timestamp as UNIX-epochs.
:param string tsformat: Format of the given timestamp. This is used to convert the
timestamp from UNIX epochs. For valid examples take a look into the
:py:func:`time.strptime` documentation.
:return: Returns the timestamp as defined in format.
:rtype: string |
380,660 | def DbGetHostServersInfo(self, argin):
self._log.debug("In DbGetHostServersInfo()")
argin = replace_wildcard(argin)
return self.db.get_host_servers_info(argin) | Get info about all servers running on specified host, name, mode and level
:param argin: Host name
:type: tango.DevString
:return: Server info for all servers running on specified host
:rtype: tango.DevVarStringArray |
380,661 | def rename_file(db, user_id, old_api_path, new_api_path):
if file_exists(db, user_id, new_api_path):
raise FileExists(new_api_path)
old_dir, old_name = split_api_filepath(old_api_path)
new_dir, new_name = split_api_filepath(new_api_path)
if old_dir != new_dir:
raise ValueError(
dedent(
.format(
old_api_path=old_api_path,
new_api_path=new_api_path
)
)
)
db.execute(
files.update().where(
_file_where(user_id, old_api_path),
).values(
name=new_name,
created_at=func.now(),
)
) | Rename a file. |
380,662 | def generate_hash(data, algorithm=, hash_fns=(), chd_keys_per_bin=1,
chd_load_factor=None, fch_bits_per_key=None,
num_graph_vertices=None, brz_memory_size=8,
brz_temp_dir=None, brz_max_keys_per_bucket=128,
bdz_precomputed_rank=7, chd_avg_keys_per_bucket=4):
cfg = _cfg(algorithm, hash_fns, chd_keys_per_bin, chd_load_factor,
fch_bits_per_key, num_graph_vertices, brz_memory_size,
brz_temp_dir, brz_max_keys_per_bucket, bdz_precomputed_rank,
chd_avg_keys_per_bucket)
with create_adapter(_cmph, ffi, data) as source:
with _create_config(source, cfg) as config:
_mph = _cmph.cmph_new(config)
if not _mph:
raise RuntimeError("MPH generation failed")
return MPH(_mph) | Generates a new Minimal Perfect Hash (MPH)
Parameters
----------
data : list, array-like, file-like
The input that is used to generate the minimal perfect hash.
Be aware, in most cases the input is expected to be distinct, and
many of the algorithms benefit from the input being sorted.
algorithm : string, optional
{chd_ph (default), chd, bmz, bmz8, chm, brz, fch, bdz, bdz_ph}
The algorithm to use in generating MPH's, choice of:
chd / chd_ph - Compress Hash and Displace (default)
(http://cmph.sourceforge.net/chd.html)
- It is the fastest algorithm to build PHFs and MPHFs in linear
time.
- It generates the most compact PHFs and MPHFs we know of.
- It can generate PHFs with a load factor up to 99 %.
- It can be used to generate t-perfect hash functions. A
t-perfect hash function allows at most t collisions in a given
bin. It is a well-known fact that modern memories are
organized as blocks which constitute transfer unit. Example of
such blocks are cache lines for internal memory or sectors for
hard disks. Thus, it can be very useful for devices that
carry out I/O operations in blocks.
- It is a two level scheme. It uses a first level hash function
to split the key set in buckets of average size determined by
a parameter b in the range [1,32]. In the second level it uses
displacement values to resolve the collisions that have given
rise to the buckets.
- It can generate MPHFs that can be stored in approximately 2.07
bits per key.
- For a load factor equal to the maximum one that is achieved by
the BDZ algorithm (81 %), the resulting PHFs are stored in
approximately 1.40 bits per key.
bdz - BDZ / BPZ algorithm
(http://cmph.sourceforge.net/bdz.html)
- It is very simple and efficient. It outperforms all others
except CHD.
- It constructs both PHFs and MPHFs in linear time.
- The maximum load factor one can achieve for a PHF is 1/1.23.
- It is based on acyclic random 3-graphs. A 3-graph is a
generalization of a graph where each edge connects 3 vertices
instead of only 2.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs can be stored in only (2 + x)cn bits,
where c should be larger than or equal to 1.23 and x is a
constant larger than 0 (actually, x = 1/b and b is a parameter
that should be larger than 2). For c = 1.23 and b = 8, the
resulting functions are stored in approximately 2.6 bits per
key.
- For its maximum load factor (81 %), the resulting PHFs are
stored in approximately 1.95 bits per key.
bmz - Botelho, Menoti and Ziviani algorithm:
(http://cmph.sourceforge.net/bdz.html)
- Constructs MPHFs in linear time.
- It is based on cyclic random graphs. This makes it faster than
the CHM algorithm.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs are more compact than the ones generated by
the CHM algorithm and can be stored in 4cn bytes, where c is in
the range [0.93,1.15].
brz - BRZ algorithm:
(http://cmph.sourceforge.net/brz.html)
- A very fast external memory based algorithm for constructing
minimal perfect hash functions for sets in the order of
billions of keys.
- It works in linear time.
- The resulting MPHFs are not order preserving.
- The resulting MPHFs can be stored using less than 8.0 bits per
key.
chm - Czech, Havas and Majewski algorithm:
(http://cmph.sourceforge.net/chm.html)
- Construct minimal MPHFs in linear time.
- It is based on acyclic random graphs
- The resulting MPHFs are order preserving.
- The resulting MPHFs are stored in 4cn bytes, where c is greater
than 2.
fch - Fox, Chen and Heath algorithm:
(http://cmph.sourceforge.net/chm.html)
- Construct minimal perfect hash functions that require less than
4 bits per key to be stored.
- The resulting MPHFs are very compact and very efficient at
evaluation time
- The algorithm is only efficient for small sets.
- It is used as internal algorithm in the BRZ algorithm to
efficiently solve larger problems and even so to generate MPHFs
that require approximately 4.1 bits per key to be stored. For
that, you just need to set the parameters -a to brz and -c to a
value larger than or equal to 2.6.
hash_fns : list {jenkins (default), count} optional
Internal hash functions to use inside MPH generation functions,
can be multiple fns as a list.
chd_keys_per_bin : int [1 to 128], optional
Set the number of keys per bin for a t-perfect hashing function. A
t-perfect hash function allows at most t collisions in a given bin.
This parameter applies only to the `chd` and `chd_ph` algorithms.
Its value should be an integer in the range [1, 128].
Default is 1
chd_load_factor : float, optional
The load factor used in the `chd_ph` algorithm
fch_bits_per_key : int, optional
The number of bits per key required in the FCH algorithm
num_graph_vertices : int, optional
The number of vertices in the graph for the algorithms BMZ and CHM
brz_memory_size : int (default 8), optional
Main memory availability (in MB) used in BRZ algorithm
Default is 8Mb
brz_temp_dir : string, optional
Temporary directory used in BRZ algorithm
brz_max_keys_per_bucket : int [64 to 175] (default 128), optional
Used to make the maximal number of keys in a bucket lower than 256.
In this case its value should be an integer in the range [64,175].
Default is 128.
bdz_precomputed_rank : int [3 to 10] (default 7), optional
For BDZ it is used to determine the size of some precomputed rank
information and its value should be an integer in the range [3,10].
Default is 7.
The larger is this value, the more compact are the resulting
functions and the slower are them at evaluation time.
chd_avg_keys_per_bucket : int [1 to 32] (default 4), optional
For CHD and CHD_PH it is used to set the average number of keys per
bucket and its value should be an integer in the range [1,32].
Default is 4.
The larger is this value, the slower is the construction of the
functions.
Returns
-------
MPH
A wrapper object that represents a minimal perfect hash in memory
Raises
------
ValueError
If arguments presented are incomplete, or incompatable
RuntimeError
If the MPH generation fails |
380,663 | def save(self, dolist=0):
quoted = not dolist
array_size = 1
for d in self.shape:
array_size = d*array_size
ndim = len(self.shape)
fields = (7+2*ndim+len(self.value))*[""]
fields[0] = self.name
fields[1] = self.type
fields[2] = self.mode
fields[3] = str(ndim)
next = 4
for d in self.shape:
fields[next] = str(d); next += 1
fields[next] = ; next += 1
nvstart = 7+2*ndim
if self.choice is not None:
schoice = list(map(self.toString, self.choice))
schoice.insert(0,)
schoice.append()
fields[nvstart-3] = repr(.join(schoice))
elif self.min not in [None,INDEF]:
fields[nvstart-3] = self.toString(self.min,quoted=quoted)
if quoted:
fields[nvstart-3] = + fields[nvstart-3]
if self.max not in [None,INDEF]:
fields[nvstart-2] = self.toString(self.max,quoted=quoted)
if self.prompt:
if quoted:
sprompt = repr(self.prompt)
else:
sprompt = self.prompt
sprompt = sprompt.replace(r, )
sprompt = sprompt.replace(r, )
fields[nvstart-1] = sprompt
for i in range(len(self.value)):
fields[nvstart+i] = self.toString(self.value[i],quoted=quoted)
if dolist:
return fields
else:
fields[nvstart] = + fields[nvstart]
return .join(fields) | Return .par format string for this parameter
If dolist is set, returns fields as a list of strings. Default
is to return a single string appropriate for writing to a file. |
380,664 | def vectorize_inhibit(audio: np.ndarray) -> np.ndarray:
def samp(x):
return int(pr.sample_rate * x)
inputs = []
for offset in range(samp(inhibit_t), samp(inhibit_dist_t), samp(inhibit_hop_t)):
if len(audio) - offset < samp(pr.buffer_t / 2.):
break
inputs.append(vectorize(audio[:-offset]))
return np.array(inputs) if inputs else np.empty((0, pr.n_features, pr.feature_size)) | Returns an array of inputs generated from the
wake word audio that shouldn't cause an activation |
380,665 | def get_definition(self, stmt: Statement,
sctx: SchemaContext) -> Tuple[Statement, SchemaContext]:
if stmt.keyword == "uses":
kw = "grouping"
elif stmt.keyword == "type":
kw = "typedef"
else:
raise ValueError("not a or statement")
loc, did = self.resolve_pname(stmt.argument, sctx.text_mid)
if did == sctx.text_mid:
dstmt = stmt.get_definition(loc, kw)
if dstmt:
return (dstmt, sctx)
else:
dstmt = self.modules[did].statement.find1(kw, loc)
if dstmt:
return (dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, did))
for sid in self.modules[did].submodules:
dstmt = self.modules[sid].statement.find1(kw, loc)
if dstmt:
return (
dstmt, SchemaContext(sctx.schema_data, sctx.default_ns, sid))
raise DefinitionNotFound(kw, stmt.argument) | Find the statement defining a grouping or derived type.
Args:
stmt: YANG "uses" or "type" statement.
sctx: Schema context where the definition is used.
Returns:
A tuple consisting of the definition statement ('grouping' or
'typedef') and schema context of the definition.
Raises:
ValueError: If `stmt` is neither "uses" nor "type" statement.
ModuleNotRegistered: If `mid` is not registered in the data model.
UnknownPrefix: If the prefix specified in the argument of `stmt`
is not declared.
DefinitionNotFound: If the corresponding definition is not found. |
380,666 | def clear_file(self):
if (self.get_file_metadata().is_read_only() or
self.get_file_metadata().is_required()):
raise NoAccess()
if in self.my_osid_object_form._my_map[]:
rm = self.my_osid_object_form._get_provider_manager()
catalog_id_str =
if in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map[][0]
elif in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map[][0]
try:
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
except AttributeError:
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
aas.delete_asset(Id(self.my_osid_object_form._my_map[][]))
self.my_osid_object_form._my_map[] = \
dict(self.get_file_metadata().get_default_object_values()[0]) | stub |
380,667 | def availabledirs(self) -> Folder2Path:
directories = Folder2Path()
for directory in os.listdir(self.basepath):
if not directory.startswith():
path = os.path.join(self.basepath, directory)
if os.path.isdir(path):
directories.add(directory, path)
elif directory.endswith():
directories.add(directory[:-4], path)
return directories | Names and paths of the available working directories.
Available working directories are those beeing stored in the
base directory of the respective |FileManager| subclass.
Folders with names starting with an underscore are ignored
(use this for directories handling additional data files,
if you like). Zipped directories, which can be unpacked
on the fly, do also count as available directories:
>>> from hydpy.core.filetools import FileManager
>>> filemanager = FileManager()
>>> filemanager.BASEDIR = 'basename'
>>> filemanager.projectdir = 'projectname'
>>> import os
>>> from hydpy import repr_, TestIO
>>> TestIO.clear()
>>> with TestIO():
... os.makedirs('projectname/basename/folder1')
... os.makedirs('projectname/basename/folder2')
... open('projectname/basename/folder3.zip', 'w').close()
... os.makedirs('projectname/basename/_folder4')
... open('projectname/basename/folder5.tar', 'w').close()
... filemanager.availabledirs # doctest: +ELLIPSIS
Folder2Path(folder1=.../projectname/basename/folder1,
folder2=.../projectname/basename/folder2,
folder3=.../projectname/basename/folder3.zip) |
380,668 | def _extract_secrets_from_file(self, f, filename):
try:
log.info("Checking file: %s", filename)
for results, plugin in self._results_accumulator(filename):
results.update(plugin.analyze(f, filename))
f.seek(0)
except UnicodeDecodeError:
log.warning("%s failed to load.", filename) | Extract secrets from a given file object.
:type f: File object
:type filename: string |
380,669 | def add_parent(self,node):
if not isinstance(node, (CondorDAGNode,CondorDAGManNode) ):
raise CondorDAGNodeError, "Parent must be a CondorDAGNode or a CondorDAGManNode"
self.__parents.append( node ) | Add a parent to this node. This node will not be executed until the
parent node has run sucessfully.
@param node: CondorDAGNode to add as a parent. |
380,670 | def get_all_children(self):
all_children = set()
for parent in self.children:
all_children.add(parent.item_id)
all_children |= parent.get_all_children()
return all_children | Return all children GO IDs. |
380,671 | def is_valid_catalog(self, catalog=None):
catalog = catalog or self
return validation.is_valid_catalog(catalog, validator=self.validator) | Valida que un archivo `data.json` cumpla con el schema definido.
Chequea que el data.json tiene todos los campos obligatorios y que
tanto los campos obligatorios como los opcionales siguen la estructura
definida en el schema.
Args:
catalog (str o dict): Catálogo (dict, JSON o XLSX) a ser validado.
Si no se pasa, valida este catálogo.
Returns:
bool: True si el data.json cumple con el schema, sino False. |
380,672 | def projects(self):
result = set()
for todo in self._todos:
projects = todo.projects()
result = result.union(projects)
return result | Returns a set of all projects in this list. |
380,673 | def get_instance_status(self):
status_url = self._get_url()
res = self.rest_client.session.get(status_url)
_handle_http_errors(res)
return res.json() | Get the status the instance for this Streaming Analytics service.
Returns:
dict: JSON response for the instance status operation. |
380,674 | def _retry(self, context, backoff):
if not hasattr(context, ):
context.count = 0
if self._should_retry(context):
backoff_interval = backoff(context)
context.count += 1
if self.retry_to_secondary:
self._set_next_host_location(context)
if hasattr(context.request.body, ):
if context.body_position is None:
return None
else:
try:
context.request.body.seek(context.body_position, SEEK_SET)
except UnsupportedOperation:
return None
return backoff_interval
return None | A function which determines whether and how to retry.
:param ~azure.storage.models.RetryContext context:
The retry context. This contains the request, response, and other data
which can be used to determine whether or not to retry.
:param function() backoff:
A function which returns the backoff time if a retry is to be performed.
:return:
An integer indicating how long to wait before retrying the request,
or None to indicate no retry should be performed.
:rtype: int or None |
380,675 | def recent(self, with_catalog=True, with_date=True):
kwd = {
: ,
: ,
: with_catalog,
: with_date,
}
self.render(,
kwd=kwd,
view=MPost.query_recent(num=20),
postrecs=MPost.query_recent(num=2),
format_date=tools.format_date,
userinfo=self.userinfo,
cfg=CMS_CFG, ) | List posts that recent edited. |
380,676 | def create(self, throw_on_exists=False):
if not throw_on_exists and self.exists():
return self
resp = self.r_session.put(self.database_url, params={
: TYPE_CONVERTERS.get(bool)(self._partitioned)
})
if resp.status_code == 201 or resp.status_code == 202:
return self
raise CloudantDatabaseException(
resp.status_code, self.database_url, resp.text
) | Creates a database defined by the current database object, if it
does not already exist and raises a CloudantException if the operation
fails. If the database already exists then this method call is a no-op.
:param bool throw_on_exists: Boolean flag dictating whether or
not to throw a CloudantDatabaseException when attempting to
create a database that already exists.
:returns: The database object |
380,677 | def before(method_name):
def decorator(function):
@wraps(function)
def wrapper(self, *args, **kwargs):
returns = getattr(self, method_name)(*args, **kwargs)
if returns is None:
return function(self, *args, **kwargs)
else:
if isinstance(returns, HttpResponse):
return returns
else:
return function(self, *returns)
return wrapper
return decorator | Run the given method prior to the decorated view.
If you return anything besides ``None`` from the given method,
its return values will replace the arguments of the decorated
view.
If you return an instance of ``HttpResponse`` from the given method,
Respite will return it immediately without delegating the request to the
decorated view.
Example usage::
class ArticleViews(Views):
@before('_load')
def show(self, request, article):
return self._render(
request = request,
template = 'show',
context = {
'article': article
}
)
def _load(self, request, id):
try:
return request, Article.objects.get(id=id)
except Article.DoesNotExist:
return self._error(request, 404, message='The article could not be found.')
:param method: A string describing a class method. |
380,678 | def to_report_json(self):
return self.reporter.json(self.n_lines, self.n_assocs, self.skipped) | Generate a summary in json format |
380,679 | def execute(self, fn, *args, **kwargs):
if self.in_executor_context():
corofn = asyncio.coroutine(lambda: fn(*args, **kwargs))
return corofn()
future = self.submit(fn, *args, **kwargs)
return future.result() | Execute an operation and return the result. |
380,680 | def _gen_exclusion_paths():
yield
yield
yield
if not hasattr(imp, ):
return
base = os.path.join(, + imp.get_tag())
yield base +
yield base +
yield base +
yield base + | Generate file paths to be excluded for namespace packages (bytecode
cache files). |
380,681 | def autoencoder_residual_text():
hparams = autoencoder_residual()
hparams.bottleneck_bits = 32
hparams.batch_size = 1024
hparams.hidden_size = 64
hparams.max_hidden_size = 512
hparams.bottleneck_noise = 0.0
hparams.bottom = {
"inputs": modalities.identity_bottom,
"targets": modalities.identity_bottom,
}
hparams.top = {
"targets": modalities.identity_top,
}
hparams.autoregressive_mode = "none"
hparams.sample_width = 1
return hparams | Residual autoencoder model for text. |
380,682 | def setDesigns(self, F, A):
F = to_list(F)
A = to_list(A)
assert len(A) == len(F),
n_terms = len(F)
n_covs = 0
k = 0
l = 0
for ti in range(n_terms):
assert F[ti].shape[0] == self._N,
assert A[ti].shape[1] == self._P,
n_covs += F[ti].shape[1] * A[ti].shape[0]
k += F[ti].shape[1]
l += A[ti].shape[0]
self._n_terms = n_terms
self._n_covs = n_covs
self._k = k
self._l = l
self._F = F
self._A = A
self._b = sp.zeros((n_covs, 1))
self.clear_cache(, , )
self._notify()
self._notify() | set fixed effect designs |
380,683 | def create_mod_site(self, mc):
site_name = get_mod_site_name(mc)
(unmod_site_state, mod_site_state) = states[mc.mod_type]
self.create_site(site_name, (unmod_site_state, mod_site_state))
site_anns = [Annotation((site_name, mod_site_state), mc.mod_type,
)]
if mc.residue:
site_anns.append(Annotation(site_name, mc.residue, ))
if mc.position:
site_anns.append(Annotation(site_name, mc.position, ))
self.site_annotations += site_anns | Create modification site for the BaseAgent from a ModCondition. |
380,684 | def shortname(inputid):
parsed_id = urllib.parse.urlparse(inputid)
if parsed_id.fragment:
return parsed_id.fragment.split(u"/")[-1]
return parsed_id.path.split(u"/")[-1] | Returns the last segment of the provided fragment or path. |
380,685 | def maps_get_rules_output_rules_value(self, **kwargs):
config = ET.Element("config")
maps_get_rules = ET.Element("maps_get_rules")
config = maps_get_rules
output = ET.SubElement(maps_get_rules, "output")
rules = ET.SubElement(output, "rules")
value = ET.SubElement(rules, "value")
value.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
380,686 | def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin) | Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set. |
380,687 | def request_response(self):
a, b, e = self.pmm[3] & 7, self.pmm[3] >> 3 & 7, self.pmm[3] >> 6
timeout = 302E-6 * (b + 1 + a + 1) * 4**e
data = self.send_cmd_recv_rsp(0x04, , timeout, check_status=False)
if len(data) != 1:
log.debug("insufficient data received from tag")
raise tt3.Type3TagCommandError(tt3.DATA_SIZE_ERROR)
return data[0] | Verify that a card is still present and get its operating mode.
The Request Response command returns the current operating
state of the card. The operating state changes with the
authentication process, a card is in Mode 0 after power-up or
a Polling command, transitions to Mode 1 with Authentication1,
to Mode 2 with Authentication2, and Mode 3 with any of the
card issuance commands. The :meth:`request_response` method
returns the mode as an integer.
Command execution errors raise
:exc:`~nfc.tag.TagCommandError`. |
380,688 | def compute_batch(self, duplicate_manager=None, context_manager=None):
from ...acquisitions import AcquisitionLP
assert isinstance(self.acquisition, AcquisitionLP)
self.acquisition.update_batches(None,None,None)
X_batch = self.acquisition.optimize()[0]
k=1
if self.batch_size >1:
L = estimate_L(self.acquisition.model.model,self.acquisition.space.get_bounds())
Min = self.acquisition.model.model.Y.min()
while k<self.batch_size:
self.acquisition.update_batches(X_batch,L,Min)
new_sample = self.acquisition.optimize()[0]
X_batch = np.vstack((X_batch,new_sample))
k +=1
self.acquisition.update_batches(None,None,None)
return X_batch | Computes the elements of the batch sequentially by penalizing the acquisition. |
380,689 | def filter_incomplete_spectra(self, flimit=1000, percAccept=85):
assert percAccept > 0 and percAccept < 100
def _retain_only_complete_spectra(item, fmax, acceptN):
frequencies = item[].loc[item[] < fmax]
fN = frequencies.size
if fN >= acceptN:
return True
return False
group_abmn = self.data.groupby([, , , ])
frequencies = np.array(
list(sorted(self.data.groupby().groups.keys()))
)
assert flimit >= frequencies.min() and flimit <= frequencies.max()
Nlimit = len(np.where(frequencies <= flimit)[0])
Naccept = np.ceil(Nlimit * percAccept / 100.0)
self.data = group_abmn.filter(
_retain_only_complete_spectra, fmax=flimit, acceptN=Naccept
).copy() | Remove all data points that belong to spectra that did not retain at
least **percAccept** percent of the number of data points.
..warning::
This function does not honor additional dimensions (e.g.,
timesteps) yet! |
380,690 | def ExportClientsByKeywords(keywords, filename, token=None):
r
index = client_index.CreateClientIndex(token=token)
client_list = index.LookupClients(keywords)
logging.info("found %d clients", len(client_list))
if not client_list:
return
writer = csv.DictWriter([
u"client_id",
u"hostname",
u"last_seen",
u"os",
u"os_release",
u"os_version",
u"users",
u"ips",
u"macs",
])
writer.WriteHeader()
for client in aff4.FACTORY.MultiOpen(client_list, token=token):
s = client.Schema
writer.WriteRow({
u"client_id": client.urn.Basename(),
u"hostname": client.Get(s.HOSTNAME),
u"os": client.Get(s.SYSTEM),
u"os_release": client.Get(s.OS_RELEASE),
u"os_version": client.Get(s.OS_VERSION),
u"ips": client.Get(s.HOST_IPS),
u"macs": client.Get(s.MAC_ADDRESS),
u"users": "\n".join(client.Get(s.USERNAMES, [])),
u"last_seen": client.Get(s.PING),
})
with io.open(filename, "w") as csv_out:
csv_out.write(writer.Content()) | r"""A script to export clients summaries selected by a keyword search.
This script does a client search for machines matching all of keywords and
writes a .csv summary of the results to filename. Multi-value fields are '\n'
separated.
Args:
keywords: a list of keywords to search for
filename: the name of the file to write to, will be replaced if already
present
token: datastore token. |
380,691 | def on_exception(wait_gen,
exception,
max_tries=None,
jitter=full_jitter,
giveup=lambda e: False,
on_success=None,
on_backoff=None,
on_giveup=None,
**wait_gen_kwargs):
success_hdlrs = _handlers(on_success)
backoff_hdlrs = _handlers(on_backoff, _log_backoff)
giveup_hdlrs = _handlers(on_giveup, _log_giveup)
def decorate(target):
@functools.wraps(target)
def retry(*args, **kwargs):
return ret
return retry
return decorate | Returns decorator for backoff and retry triggered by exception.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
exception: An exception type (or tuple of types) which triggers
backoff.
max_tries: The maximum number of attempts to make before giving
up. Once exhausted, the exception will be allowed to escape.
The default value of None means their is no limit to the
number of tries. If a callable is passed, it will be
evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
giveup: Function accepting an exception instance and
returning whether or not to give up. Optional. The default
is to always continue.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration. |
380,692 | def randbetween(lower: int, upper: int) -> int:
if not isinstance(lower, int) or not isinstance(upper, int):
raise TypeError()
if lower < 0 or upper <= 0:
raise ValueError()
return randbelow(upper - lower + 1) + lower | Return a random int in the range [lower, upper].
Raises ValueError if any is lower than 0, and TypeError if any is not an
integer. |
380,693 | def read_frames(self, nframes, dtype=np.float64):
return self._sndfile.read_frames(nframes, dtype) | Read nframes frames of the file.
:Parameters:
nframes : int
number of frames to read.
dtype : numpy dtype
dtype of the returned array containing read data (see note).
Notes
-----
- read_frames updates the read pointer.
- One column is one channel (one row per channel after 0.9)
- if float are requested when the file contains integer data, you will
get normalized data (that is the max possible integer will be 1.0,
and the minimal possible value -1.0).
- if integers are requested when the file contains floating point data,
it may give wrong results because there is an ambiguity: if the
floating data are normalized, you can get a file with only 0 !
Getting integer data from files encoded in normalized floating point
is not supported (yet: sndfile supports it). |
380,694 | def restore_catalog_to_ckan(catalog, origin_portal_url, destination_portal_url,
apikey, download_strategy=None,
generate_new_access_url=None):
catalog[] = catalog.get() or origin_portal_url
res = {}
origin_portal = RemoteCKAN(origin_portal_url)
try:
org_list = origin_portal.action.organization_list()
except CKANAPIError as e:
logger.exception(
.format(origin_portal_url, str(e)))
print(e)
return res
for org in org_list:
print("Restaurando organizacion {}".format(org))
response = origin_portal.action.organization_show(
id=org, include_datasets=True)
datasets = [package[] for package in response[]]
pushed_datasets = restore_organization_to_ckan(
catalog, org, destination_portal_url, apikey,
dataset_list=datasets, download_strategy=download_strategy,
generate_new_access_url=generate_new_access_url
)
res[org] = pushed_datasets
return res | Restaura los datasets de un catálogo original al portal pasado
por parámetro. Si hay temas presentes en el DataJson que no están en
el portal de CKAN, los genera.
Args:
catalog (DataJson): El catálogo de origen que se restaura.
origin_portal_url (str): La URL del portal CKAN de origen.
destination_portal_url (str): La URL del portal CKAN de
destino.
apikey (str): La apikey de un usuario con los permisos que le
permitan crear o actualizar el dataset.
download_strategy(callable): Una función
(catálogo, distribución)-> bool. Sobre las distribuciones
que evalúa True, descarga el recurso en el downloadURL y lo
sube al portal de destino. Por default no sube ninguna
distribución.
generate_new_access_url(list): Se pasan los ids de las
distribuciones cuyo accessURL se regenerar en el portal de
destino. Para el resto, el portal debe mantiene el valor
pasado en el DataJson.
Returns:
dict: Diccionario con key organización y value la lista de ids
de datasets subidos a esa organización |
380,695 | def set_uint_info(self, field, data):
_check_call(_LIB.XGDMatrixSetUIntInfo(self.handle,
c_str(field),
c_array(ctypes.c_uint, data),
len(data))) | Set uint type property into the DMatrix.
Parameters
----------
field: str
The field name of the information
data: numpy array
The array ofdata to be set |
380,696 | def resizeEvent(self, event):
LOGGER.debug("> Application resize event accepted!")
self.size_changed.emit(event)
event.accept() | Reimplements the :meth:`QWidget.resizeEvent` method.
:param event: QEvent.
:type event: QEvent |
380,697 | def build(self, js_path):
super(Script, self).build()
self.source = js_path | :param js_path: Javascript source code. |
380,698 | def PublishMultipleEvents(cls, events, token=None):
event_name_map = registry.EventRegistry.EVENT_NAME_MAP
for event_name, messages in iteritems(events):
if not isinstance(event_name, string_types):
raise ValueError(
"Event names should be string, got: %s" % type(event_name))
for msg in messages:
if not isinstance(msg, rdfvalue.RDFValue):
raise ValueError("Can only publish RDFValue instances.")
for event_cls in event_name_map.get(event_name, []):
event_cls().ProcessMessages(messages, token=token) | Publishes multiple messages at once.
Args:
events: A dict with keys being event names and values being lists of
messages.
token: ACL token.
Raises:
ValueError: If the message is invalid. The message must be a Semantic
Value (instance of RDFValue) or a full GrrMessage. |
380,699 | def isinstance(self, instance, class_name):
if isinstance(instance, BaseNode):
klass = self.dynamic_node_classes.get(class_name, None)
if klass:
return isinstance(instance, klass)
return False
else:
raise TypeError("This function can only be used for BaseNode objects") | Check if a BaseNode is an instance of a registered dynamic class |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.