Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
387,200 | def get_reports(self):
url = .format(self.url)
return Report._get_list_from_url(url, append_base_url=False) | Retrieve all reports submitted for this Sample.
:return: A list of :class:`.Report` |
387,201 | def norm(self, valu):
func = self._type_norms.get(type(valu))
if func is None:
raise s_exc.NoSuchFunc(name=self.name, mesg= % (type(valu),))
return func(valu) | Normalize the value for a given type.
Args:
valu (obj): The value to normalize.
Returns:
((obj,dict)): The normalized valu, info tuple.
Notes:
The info dictionary uses the following key conventions:
subs (dict): The normalized sub-fields as name: valu entries. |
387,202 | def getFloat(self, name, default=0.0, parent_search=False, multikeys_search=False):
try:
value = self.get(name, default, parent_search, multikeys_search)
return float(value)
except:
return default | récupération d'un élément float |
387,203 | def circleconvert(amount, currentformat, newformat):
if currentformat.lower() == newformat.lower():
return amount
if currentformat.lower() == :
if newformat.lower() == :
return amount * 2
elif newformat.lower() == :
return amount * 2 * math.pi
raise ValueError("Invalid new format provided.")
elif currentformat.lower() == :
if newformat.lower() == :
return amount / 2
elif newformat.lower() == :
return amount * math.pi
raise ValueError("Invalid new format provided.")
elif currentformat.lower() == :
if newformat.lower() == :
return amount / math.pi / 2
elif newformat.lower() == :
return amount / math.pi | Convert a circle measurement.
:type amount: number
:param amount: The number to convert.
:type currentformat: string
:param currentformat: The format of the provided value.
:type newformat: string
:param newformat: The intended format of the value.
>>> circleconvert(45, "radius", "diameter")
90 |
387,204 | def _ftp_pwd(self):
try:
return self.ftp.pwd()
except UnicodeEncodeError:
if compat.PY2 or self.ftp.encoding != "utf-8":
raise
prev_encoding = self.ftp.encoding
try:
write("ftp.pwd() failed with utf-8: trying Cp1252...", warning=True)
return self.ftp.pwd()
finally:
self.ftp.encoding = prev_encoding | Variant of `self.ftp.pwd()` that supports encoding-fallback.
Returns:
Current working directory as native string. |
387,205 | def name(self):
if not self._name:
self._name = self._alternatives[self._choice][0]
return self._name | :return:
A unicode string of the field name of the chosen alternative |
387,206 | def dir2fn(ofn, ifn, suffix) -> Union[None, Path]:
if not ofn:
return None
ofn = Path(ofn).expanduser()
ifn = Path(ifn).expanduser()
assert ifn.is_file()
if ofn.suffix == suffix:
pass
else:
assert ofn.is_dir(), f
ofn = ofn / ifn.with_suffix(suffix).name
try:
assert not ofn.samefile(ifn), f
except FileNotFoundError:
pass
return ofn | ofn = filename or output directory, to create filename based on ifn
ifn = input filename (don't overwrite!)
suffix = desired file extension e.g. .h5 |
387,207 | def get_static(root=None):
*
ret = set()
out = __salt__[](
_systemctl_cmd(,
root=root),
python_shell=False,
ignore_retcode=True)
for line in salt.utils.itertools.split(out, ):
try:
fullname, unit_state = line.strip().split(None, 1)
except ValueError:
continue
else:
if unit_state != :
continue
try:
unit_name, unit_type = fullname.rsplit(, 1)
except ValueError:
continue
if unit_type in VALID_UNIT_TYPES:
ret.add(unit_name if unit_type == else fullname)
return sorted(ret) | .. versionadded:: 2015.8.5
Return a list of all static services
root
Enable/disable/mask unit files in the specified root directory
CLI Example:
.. code-block:: bash
salt '*' service.get_static |
387,208 | def write_vtu(Verts, Cells, pdata=None, pvdata=None, cdata=None, cvdata=None,
fname=):
vtk_cell_info = [-1, 1, None, 2, None, 3, None, None, 4, 4, 4, 8, 8, 6, 5]
if isinstance(fname, str):
try:
fname = open(fname, )
except IOError as e:
print(".vtu error (%s): %s" % (e.errno, e.strerror))
else:
raise ValueError()
for key in Cells:
if ((not isinstance(key, int)) or (key not in list(range(1, 15)))):
raise ValueError()
if (vtk_cell_info[key] is None) and (Cells[key] is not None):
raise NotImplementedError()
if Cells[key] is None:
raise ValueError( % (key))
if np.ndim(Cells[key]) != 2:
Cells[key] = Cells[key].reshape((Cells[key].size, 1))
if vtk_cell_info[key] != Cells[key].shape[1]:
raise ValueError( %
(Cells[key].shape[1], vtk_cell_info[key]))
n_pdata = 0
if pdata is not None:
if np.ndim(pdata) > 1:
n_pdata = pdata.shape[1]
else:
n_pdata = 1
pdata = pdata.reshape((pdata.size, 1))
if pdata.shape[0] != Ndof:
raise ValueError( %
(Ndof, pdata.shape[0]))
n_pvdata = 0
if pvdata is not None:
if np.ndim(pvdata) > 1:
n_pvdata = pvdata.shape[1]
else:
n_pvdata = 1
pvdata = pvdata.reshape((pvdata.size, 1))
if pvdata.shape[0] != 3*Ndof:
raise ValueError( % (Ndof*3, pvdata.shape[0]))
n_cdata = 0
if cdata is not None:
for key in Cells:
if np.ndim(cdata[key]) > 1:
if n_cdata == 0:
n_cdata = cdata[key].shape[1]
elif n_cdata != cdata[key].shape[1]:
raise ValueError()
else:
n_cdata = 1
cdata[key] = cdata[key].reshape((cdata[key].size, 1))
if cdata[key].shape[0] != Cells[key].shape[0]:
raise ValueError( %
(cdata[key].shape[0], Cells[key].shape[0]))
if cdata[key] is None:
raise ValueError( %
(key))
n_cvdata = 0
if cvdata is not None:
for key in Cells:
if np.ndim(cvdata[key]) > 1:
if n_cvdata == 0:
n_cvdata = cvdata[key].shape[1]
elif n_cvdata != cvdata[key].shape[1]:
raise ValueError()
else:
n_cvdata = 1
cvdata[key] = cvdata[key].reshape((cvdata[key].size, 1))
if cvdata[key].shape[0] != 3*Cells[key].shape[0]:
raise ValueError()
if cvdata[key] is None:
raise ValueError( %
(key))
Ncells = 0
cell_ind = []
cell_offset = []
cell_type = []
cdata_all = None
cvdata_all = None
for key in Cells:
sz = Cells[key].shape[0]
offset = Cells[key].shape[1]
Ncells += sz
uu = np.ones((sz,), dtype=)
cell_ind = np.hstack((cell_ind, Cells[key].ravel()))
cell_offset = np.hstack((cell_offset, offset*uu))
cell_type = np.hstack((cell_type, key*uu))
if cdata is not None:
if cdata_all is None:
cdata_all = cdata[key]
else:
cdata_all = np.vstack((cdata_all, cdata[key]))
if cvdata is not None:
if cvdata_all is None:
cvdata_all = cvdata[key]
else:
cvdata_all = np.vstack((cvdata_all, cvdata[key]))
doc = xml.dom.minidom.Document()
root = doc.createElementNS(, )
d = {: , : ,
: }
set_attributes(d, root)
grid = doc.createElementNS(, )
piece = doc.createElementNS(, )
d = {: str(Ndof), : str(Ncells)}
set_attributes(d, piece)
points = doc.createElementNS(, )
points_data = doc.createElementNS(, )
d = {: , : , : ,
: }
set_attributes(d, points_data)
points_data_str = doc.createTextNode(a2s(Verts))
cells = doc.createElementNS(, )
cells_data = doc.createElementNS(, )
d = {: , : , : }
set_attributes(d, cells_data)
cells_data_str = doc.createTextNode(a2s(cell_ind))
cells_offset_data = doc.createElementNS(, )
d = {: , : , : }
set_attributes(d, cells_offset_data)
cells_offset_data_str = doc.createTextNode(a2s(cell_offset.cumsum()))
cells_type_data = doc.createElementNS(, )
d = {: , : , : }
set_attributes(d, cells_type_data)
cells_type_data_str = doc.createTextNode(a2s(cell_type))
pointdata = doc.createElementNS(, )
pdata_obj = []
pdata_str = []
for i in range(0, n_pdata):
pdata_obj.append(doc.createElementNS(, ))
d = {: , : % (i),
: , : }
set_attributes(d, pdata_obj[i])
pdata_str.append(doc.createTextNode(a2s(pdata[:, i])))
pvdata_obj = []
pvdata_str = []
for i in range(0, n_pvdata):
pvdata_obj.append(doc.createElementNS(, ))
d = {: , : % (i),
: , : }
set_attributes(d, pvdata_obj[i])
pvdata_str.append(doc.createTextNode(a2s(pvdata[:, i])))
celldata = doc.createElementNS(, )
cdata_obj = []
cdata_str = []
for i in range(0, n_cdata):
cdata_obj.append(doc.createElementNS(, ))
d = {: , : % (i),
: , : }
set_attributes(d, cdata_obj[i])
cdata_str.append(doc.createTextNode(a2s(cdata_all[:, i])))
cvdata_obj = []
cvdata_str = []
for i in range(0, n_cvdata):
cvdata_obj.append(doc.createElementNS(, ))
d = {: , : % (i),
: , : }
set_attributes(d, cvdata_obj[i])
cvdata_str.append(doc.createTextNode(a2s(cvdata_all[:, i])))
doc.appendChild(root)
root.appendChild(grid)
grid.appendChild(piece)
piece.appendChild(points)
points.appendChild(points_data)
points_data.appendChild(points_data_str)
piece.appendChild(cells)
cells.appendChild(cells_data)
cells.appendChild(cells_offset_data)
cells.appendChild(cells_type_data)
cells_data.appendChild(cells_data_str)
cells_offset_data.appendChild(cells_offset_data_str)
cells_type_data.appendChild(cells_type_data_str)
piece.appendChild(pointdata)
for i in range(0, n_pdata):
pointdata.appendChild(pdata_obj[i])
pdata_obj[i].appendChild(pdata_str[i])
for i in range(0, n_pvdata):
pointdata.appendChild(pvdata_obj[i])
pvdata_obj[i].appendChild(pvdata_str[i])
piece.appendChild(celldata)
for i in range(0, n_cdata):
celldata.appendChild(cdata_obj[i])
cdata_obj[i].appendChild(cdata_str[i])
for i in range(0, n_cvdata):
celldata.appendChild(cvdata_obj[i])
cvdata_obj[i].appendChild(cvdata_str[i])
doc.writexml(fname, newl=)
fname.close() | Write a .vtu file in xml format.
Parameters
----------
fname : {string}
file to be written, e.g. 'mymesh.vtu'
Verts : {array}
Ndof x 3 (if 2, then expanded by 0)
list of (x,y,z) point coordinates
Cells : {dictionary}
Dictionary of with the keys
pdata : {array}
Ndof x Nfields array of scalar values for the vertices
pvdata : {array}
Nfields*3 x Ndof array of vector values for the vertices
cdata : {dictionary}
scalar valued cell data
cvdata : {dictionary}
vector valued cell data
Returns
-------
writes a .vtu file for use in Paraview
Notes
-----
- Poly data not supported
- Non-Poly data is stored in Numpy array: Ncell x vtk_cell_info
- Each I1 must be >=3
- pdata = Ndof x Nfields
- pvdata = 3*Ndof x Nfields
- cdata,cvdata = list of dictionaries in the form of Cells
===== =================== ============= ===
keys type n points dim
===== =================== ============= ===
1 VTK_VERTEX: 1 point 2d
2 VTK_POLY_VERTEX: n points 2d
3 VTK_LINE: 2 points 2d
4 VTK_POLY_LINE: n+1 points 2d
5 VTK_TRIANGLE: 3 points 2d
6 VTK_TRIANGLE_STRIP: n+2 points 2d
7 VTK_POLYGON: n points 2d
8 VTK_PIXEL: 4 points 2d
9 VTK_QUAD: 4 points 2d
10 VTK_TETRA: 4 points 3d
11 VTK_VOXEL: 8 points 3d
12 VTK_HEXAHEDRON: 8 points 3d
13 VTK_WEDGE: 6 points 3d
14 VTK_PYRAMID: 5 points 3d
===== =================== ============= ===
Examples
--------
>>> from pyamg.vis import write_vtu
>>> import numpy as np
>>> Verts = np.array([[0.0,0.0],
... [1.0,0.0],
... [2.0,0.0],
... [0.0,1.0],
... [1.0,1.0],
... [2.0,1.0],
... [0.0,2.0],
... [1.0,2.0],
... [2.0,2.0],
... [0.0,3.0],
... [1.0,3.0],
... [2.0,3.0]])
>>> E2V = np.array([[0,4,3],
... [0,1,4],
... [1,5,4],
... [1,2,5],
... [3,7,6],
... [3,4,7],
... [4,8,7],
... [4,5,8],
... [6,10,9],
... [6,7,10],
... [7,11,10],
... [7,8,11]])
>>> E2edge = np.array([[0,1]])
>>> E2point = np.array([2,3,4,5])
>>> Cells = {5:E2V,3:E2edge,1:E2point}
>>> pdata=np.ones((12,2))
>>> pvdata=np.ones((12*3,2))
>>> cdata={5:np.ones((12,2)),3:np.ones((1,2)),1:np.ones((4,2))}
>>> cvdata={5:np.ones((3*12,2)),3:np.ones((3*1,2)),
1:np.ones((3*4,2))}
>>> write_vtu(Verts=Verts, Cells=Cells, fname='test.vtu')
See Also
--------
write_mesh |
387,209 | def get_object(self, url, month_format=, day_format=):
params = self.get_params(url)
try:
year = params[self._meta.year_part]
month = params[self._meta.month_part]
day = params[self._meta.day_part]
except KeyError:
try:
year, month, day = params[], params[], params[]
except KeyError:
raise OEmbedException()
try:
tt = time.strptime( % (year, month, day),
% (, month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise OEmbedException( % url)
if isinstance(self._meta.model._meta.get_field(self._meta.date_field), DateTimeField):
min_date = datetime.datetime.combine(date, datetime.time.min)
max_date = datetime.datetime.combine(date, datetime.time.max)
query = { % self._meta.date_field: (min_date, max_date)}
else:
query = {self._meta.date_field: date}
for key, value in self._meta.fields_to_match.iteritems():
try:
query[value] = params[key]
except KeyError:
raise OEmbedException( % (key, .join(params.keys())))
try:
obj = self.get_queryset().get(**query)
except self._meta.model.DoesNotExist:
raise OEmbedException()
return obj | Parses the date from a url and uses it in the query. For objects which
are unique for date. |
387,210 | def server_inspect_exception(self, req_event, rep_event, task_ctx, exc_info):
if self._hide_zerorpc_frames:
traceback = exc_info[2]
while traceback:
zerorpc_frame = traceback.tb_frame
zerorpc_frame.f_locals["__traceback_hide__"] = True
frame_info = inspect.getframeinfo(zerorpc_frame)
if frame_info.function == "__call__" or frame_info.function == "_receiver":
break
traceback = traceback.tb_next
self._elasticapm_client.capture_exception(exc_info, extra=task_ctx, handled=False) | Called when an exception has been raised in the code run by ZeroRPC |
387,211 | def close(self):
if self.delegate:
return self._close()
future = self._framework.get_future(self.get_io_loop())
future.set_result(None)
return future | Close this change stream.
Stops any "async for" loops using this change stream. |
387,212 | def get_location(dom, location):
node = dom.documentElement
for i in location:
node = get_child(node, i)
if not node:
raise ValueError( % location)
return node | Get the node at the specified location in the dom.
Location is a sequence of child indices, starting at the children of the
root element. If there is no node at this location, raise a ValueError. |
387,213 | def allele_reads_from_locus_reads(locus_reads, n_ref):
for locus_read in locus_reads:
allele_read = AlleleRead.from_locus_read(locus_read, n_ref)
if allele_read is None:
continue
else:
yield allele_read | Given a collection of LocusRead objects, returns a
list of AlleleRead objects
(which are split into prefix/allele/suffix nucleotide strings).
Parameters
----------
locus_reads : sequence of LocusRead records
n_ref : int
Number of reference nucleotides affected by variant.
Generates AlleleRead objects. |
387,214 | def _compensate_pressure(self, adc_p):
var_1 = (self._temp_fine / 2.0) - 64000.0
var_2 = ((var_1 / 4.0) * (var_1 / 4.0)) / 2048
var_2 *= self._calibration_p[5]
var_2 += ((var_1 * self._calibration_p[4]) * 2.0)
var_2 = (var_2 / 4.0) + (self._calibration_p[3] * 65536.0)
var_1 = (((self._calibration_p[2]
* (((var_1 / 4.0) * (var_1 / 4.0)) / 8192)) / 8)
+ ((self._calibration_p[1] * var_1) / 2.0))
var_1 /= 262144
var_1 = ((32768 + var_1) * self._calibration_p[0]) / 32768
if var_1 == 0:
return 0
pressure = ((1048576 - adc_p) - (var_2 / 4096)) * 3125
if pressure < 0x80000000:
pressure = (pressure * 2.0) / var_1
else:
pressure = (pressure / var_1) * 2
var_1 = (self._calibration_p[8]
* (((pressure / 8.0) * (pressure / 8.0)) / 8192.0)) / 4096
var_2 = ((pressure / 4.0) * self._calibration_p[7]) / 8192.0
pressure += ((var_1 + var_2 + self._calibration_p[6]) / 16.0)
return pressure / 100 | Compensate pressure.
Formula from datasheet Bosch BME280 Environmental sensor.
8.1 Compensation formulas in double precision floating point
Edition BST-BME280-DS001-10 | Revision 1.1 | May 2015. |
387,215 | def split_sentences(tokens):
begin = 0
for i, token in enumerate(tokens):
if is_end_of_sentence(tokens, i):
yield tokens[begin:i+1]
begin = i+1
if begin <= len(tokens)-1:
yield tokens[begin:] | Split sentences (based on tokenised data), returns sentences as a list of lists of tokens, each sentence is a list of tokens |
387,216 | def editors(self):
lay = self.layout()
return [lay.itemAt(i).widget() for i in range(lay.count())] | Returns the editors that are associated with this edit.
:return [<XLineEdit>, ..] |
387,217 | def action_spatial(self, action):
if self.surf.surf_type & SurfType.FEATURE:
return action.action_feature_layer
elif self.surf.surf_type & SurfType.RGB:
return action.action_render
else:
assert self.surf.surf_type & (SurfType.RGB | SurfType.FEATURE) | Given an Action, return the right spatial action. |
387,218 | def connect(self):
if not self.connected():
self._ws = create_connection(self.WS_URI)
message = {
:self.WS_TYPE,
:self.WS_PRODUCT_ID
}
self._ws.send(dumps(message))
with self._lock:
if not self._thread:
thread = Thread(target=self._keep_alive_thread, args=[])
thread.start() | Connects and subscribes to the WebSocket Feed. |
387,219 | def add_value(self, value, index_point):
if index_point not in self.index:
self.values.append(value)
self.index.append(index_point) | The function is addeing new value to provied index. If index does not exist |
387,220 | def ensure(assertion, message=None):
message = message or assertion
if not assertion:
raise AssertionError(message)
return True | Checks an assertion argument for truth-ness. Will return ``True`` or
explicitly raise ``AssertionError``. This is to deal with environments
using ``python -O` or ``PYTHONOPTIMIZE=``.
:param assertion: some value to evaluate for truth-ness
:param message: optional message used for raising AssertionError |
387,221 | def bulk_overwrite(self, entities_and_kinds):
EntityGroupMembership.objects.filter(entity_group=self).delete()
return self.bulk_add_entities(entities_and_kinds) | Update the group to the given entities and sub-entity groups.
After this operation, the only members of this EntityGroup
will be the given entities, and sub-entity groups.
:type entities_and_kinds: List of (Entity, EntityKind) pairs.
:param entities_and_kinds: A list of entity, entity-kind pairs
to set to the EntityGroup. In the pairs the entity-kind
can be ``None``, to add a single entity, or some entity
kind to add all sub-entities of that kind. |
387,222 | def setCodecPreferences(self, codecs):
if not codecs:
self._preferred_codecs = []
capabilities = get_capabilities(self.kind).codecs
unique = []
for codec in reversed(codecs):
if codec not in capabilities:
raise ValueError()
if codec not in unique:
unique.insert(0, codec)
self._preferred_codecs = unique | Override the default codec preferences.
See :meth:`RTCRtpSender.getCapabilities` and :meth:`RTCRtpReceiver.getCapabilities`
for the supported codecs.
:param: codecs: A list of :class:`RTCRtpCodecCapability`, in decreasing order
of preference. If empty, restores the default preferences. |
387,223 | def compute_acf(cls, filename, start_index=None, end_index=None,
per_walker=False, walkers=None, parameters=None,
temps=None):
acfs = {}
with cls._io(filename, ) as fp:
if parameters is None:
parameters = fp.variable_params
if isinstance(parameters, str) or isinstance(parameters, unicode):
parameters = [parameters]
if isinstance(temps, int):
temps = [temps]
elif temps == :
temps = numpy.arange(fp.ntemps)
elif temps is None:
temps = [0]
for param in parameters:
subacfs = []
for tk in temps:
if per_walker:
if walkers is None:
walkers = numpy.arange(fp.nwalkers)
arrays = [cls.compute_acfs(filename,
start_index=start_index,
end_index=end_index,
per_walker=False,
walkers=ii,
parameters=param,
temps=tk)[param][0, :]
for ii in walkers]
subacfs.append(numpy.vstack(arrays))
else:
samples = fp.read_raw_samples(
param, thin_start=start_index,
thin_interval=1, thin_end=end_index,
walkers=walkers, temps=tk, flatten=False)[param]
samples = samples.mean(axis=1)[0, :]
thisacf = autocorrelation.calculate_acf(
samples).numpy()
subacfs.append(thisacf)
acfs[param] = numpy.stack(subacfs)
return acfs | Computes the autocorrleation function of the model params in the
given file.
By default, parameter values are averaged over all walkers at each
iteration. The ACF is then calculated over the averaged chain for each
temperature. An ACF per-walker will be returned instead if
``per_walker=True``.
Parameters
-----------
filename : str
Name of a samples file to compute ACFs for.
start_index : {None, int}
The start index to compute the acl from. If None, will try to use
the number of burn-in iterations in the file; otherwise, will start
at the first sample.
end_index : {None, int}
The end index to compute the acl to. If None, will go to the end
of the current iteration.
per_walker : optional, bool
Return the ACF for each walker separately. Default is False.
walkers : optional, int or array
Calculate the ACF using only the given walkers. If None (the
default) all walkers will be used.
parameters : optional, str or array
Calculate the ACF for only the given parameters. If None (the
default) will calculate the ACF for all of the model params.
temps : optional, (list of) int or 'all'
The temperature index (or list of indices) to retrieve. If None
(the default), the ACF will only be computed for the coldest (= 0)
temperature chain. To compute an ACF for all temperates pass 'all',
or a list of all of the temperatures.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. If
``per-walker`` is True, the arrays will have shape
``ntemps x nwalkers x niterations``. Otherwise, the returned array
will have shape ``ntemps x niterations``. |
387,224 | def _perturbation(self):
if self.P>1:
scales = []
for term_i in range(self.n_randEffs):
_scales = sp.randn(self.diag[term_i].shape[0])
if self.jitter[term_i]>0:
_scales = sp.concatenate((_scales,sp.zeros(1)))
scales.append(_scales)
scales = sp.concatenate(scales)
else:
scales = sp.randn(self.vd.getNumberScales())
return scales | Internal function for parameter initialization
Returns Gaussian perturbation |
387,225 | def _get_webapi_requests(self):
headers = {
:
,
:
,
:
,
:
,
:
,
:
,
:
}
NCloudBot.req.headers.update(headers)
return NCloudBot.req | Update headers of webapi for Requests. |
387,226 | def sim(
self,
src,
tar,
qval=1,
mode=,
long_strings=False,
boost_threshold=0.7,
scaling_factor=0.1,
):
if mode == :
if boost_threshold > 1 or boost_threshold < 0:
raise ValueError(
+
)
if scaling_factor > 0.25 or scaling_factor < 0:
raise ValueError(
+
)
if src == tar:
return 1.0
src = QGrams(src.strip(), qval)._ordered_list
tar = QGrams(tar.strip(), qval)._ordered_list
lens = len(src)
lent = len(tar)
if lens == 0 or lent == 0:
return 0.0
if lens > lent:
search_range = lens
minv = lent
else:
search_range = lent
minv = lens
src_flag = [0] * search_range
tar_flag = [0] * search_range
search_range = max(0, search_range // 2 - 1)
num_com = 0
yl1 = lent - 1
for i in range(lens):
low_lim = (i - search_range) if (i >= search_range) else 0
hi_lim = (i + search_range) if ((i + search_range) <= yl1) else yl1
for j in range(low_lim, hi_lim + 1):
if (tar_flag[j] == 0) and (tar[j] == src[i]):
tar_flag[j] = 1
src_flag[i] = 1
num_com += 1
break
if num_com == 0:
return 0.0
k = n_trans = 0
for i in range(lens):
if src_flag[i] != 0:
j = 0
for j in range(k, lent):
if tar_flag[j] != 0:
k = j + 1
break
if src[i] != tar[j]:
n_trans += 1
n_trans //= 2
weight = (
num_com / lens + num_com / lent + (num_com - n_trans) / num_com
)
weight /= 3.0
if mode == and weight > boost_threshold:
j = 4 if (minv >= 4) else minv
i = 0
while (i < j) and (src[i] == tar[i]):
i += 1
weight += i * scaling_factor * (1.0 - weight)
if (
long_strings
and (minv > 4)
and (num_com > i + 1)
and (2 * num_com >= minv + i)
):
weight += (1.0 - weight) * (
(num_com - i - 1) / (lens + lent - i * 2 + 2)
)
return weight | Return the Jaro or Jaro-Winkler similarity of two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
qval : int
The length of each q-gram (defaults to 1: character-wise matching)
mode : str
Indicates which variant of this distance metric to compute:
- ``winkler`` -- computes the Jaro-Winkler distance (default)
which increases the score for matches near the start of the
word
- ``jaro`` -- computes the Jaro distance
long_strings : bool
Set to True to "Increase the probability of a match when the number
of matched characters is large. This option allows for a little
more tolerance when the strings are large. It is not an appropriate
test when comparing fixed length fields such as phone and social
security numbers." (Used in 'winkler' mode only.)
boost_threshold : float
A value between 0 and 1, below which the Winkler boost is not
applied (defaults to 0.7). (Used in 'winkler' mode only.)
scaling_factor : float
A value between 0 and 0.25, indicating by how much to boost scores
for matching prefixes (defaults to 0.1). (Used in 'winkler' mode
only.)
Returns
-------
float
Jaro or Jaro-Winkler similarity
Raises
------
ValueError
Unsupported boost_threshold assignment; boost_threshold must be
between 0 and 1.
ValueError
Unsupported scaling_factor assignment; scaling_factor must be
between 0 and 0.25.'
Examples
--------
>>> round(sim_jaro_winkler('cat', 'hat'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil'), 12)
0.805
>>> round(sim_jaro_winkler('aluminum', 'Catalan'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC'), 12)
0.833333333333
>>> round(sim_jaro_winkler('cat', 'hat', mode='jaro'), 12)
0.777777777778
>>> round(sim_jaro_winkler('Niall', 'Neil', mode='jaro'), 12)
0.783333333333
>>> round(sim_jaro_winkler('aluminum', 'Catalan', mode='jaro'), 12)
0.60119047619
>>> round(sim_jaro_winkler('ATCG', 'TAGC', mode='jaro'), 12)
0.833333333333 |
387,227 | def make_filter(self, fieldname, query_func, expct_value):
s property based
on query_func eqneltltegtgtestartswithendswith{} {} {}val', query_func, expct_value)
return actual_filter | makes a filter that will be appliead to an object's property based
on query_func |
387,228 | def translate(s, table, deletions=""):
if deletions or table is None:
return s.translate(table, deletions)
else:
return s.translate(table + s[:0]) | translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings. |
387,229 | def get(aadb: str):
if (aadb):
cfg = Config()
value = cfg.get(ConfigKeys.asset_allocation_database_path)
click.echo(value)
if not aadb:
click.echo("Use --help for more information.") | Retrieves a value from config |
387,230 | def tpu_estimator_model_fn(model_type,
transformer_model,
model_dir,
use_tpu,
mesh_shape,
layout_rules,
batch_size,
sequence_length,
autostack,
metric_names):
def my_model_fn(features, labels, mode, params=None, config=None):
del labels, config
global_step = tf.train.get_global_step()
if use_tpu:
ctx = params["context"]
num_hosts = ctx.num_hosts
host_placement_fn = ctx.tpu_host_placement_function
device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)]
replica_cache_size = 300 * 1000000
worker0_mem = replica_cache_size * ctx.num_replicas
devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1)
var_placer = mtf.utils.BalancedVariablePlacer(device_list,
devices_memeory_usage)
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl(
mesh_shape, layout_rules, mesh_devices, ctx.device_assignment)
else:
var_placer = None
mesh_devices = [""] * mesh_shape.size
mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl(
mesh_shape, layout_rules, mesh_devices)
graph = mtf.Graph()
mesh = mtf.Mesh(graph, "my_mesh", var_placer)
def _import_feature(key, allow_missing=False):
batch_dim = mtf.Dimension("batch", batch_size)
length_dim = mtf.Dimension("length", sequence_length)
mtf_shape = mtf.Shape([batch_dim, length_dim])
if key not in features:
if allow_missing:
return None
else:
raise ValueError(
"feature not found %s - features %s = " % (key, features))
tf.logging.info("Import feature %s: %s" % (key, features[key]))
x = tf.to_int32(features[key])
if not use_tpu:
x = tf.Print(
x, [x], "import feature %s" % key, summarize=1000, first_n=1)
return mtf.import_fully_replicated(mesh, x, mtf_shape, name=key)
if mode == tf.estimator.ModeKeys.PREDICT:
inputs = _import_feature("inputs")
if isinstance(transformer_model, transformer.Unitransformer):
mtf_samples = transformer_model.sample_autoregressive(
inputs, variable_dtype=get_variable_dtype())
elif isinstance(transformer_model, transformer.Bitransformer):
mtf_samples = transformer_model.decode(
inputs, variable_dtype=get_variable_dtype())
else:
raise ValueError("unrecognized class")
mtf_samples = mtf.anonymize(mtf_samples)
lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack)
outputs = lowering.export_to_tf_tensor(mtf_samples)
predictions = {"outputs": outputs}
return tpu_estimator.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
prediction_hooks=[mtf.MtfRestoreHook(lowering)])
targets = _import_feature("targets")
anon_targets = mtf.anonymize(targets)
if model_type == "lm":
_, length_dim = targets.shape
inputs = mtf.shift(targets, offset=1, dim=length_dim, wrap=False)
else:
inputs = _import_feature("inputs")
if mode == tf.estimator.ModeKeys.EVAL:
if isinstance(transformer_model, transformer.Unitransformer):
mtf_samples = transformer_model.sample_autoregressive(
inputs, variable_dtype=get_variable_dtype())
elif isinstance(transformer_model, transformer.Bitransformer):
mtf_samples = transformer_model.decode(
inputs, variable_dtype=get_variable_dtype())
else:
raise ValueError("unrecognized class")
mtf_samples = mtf.anonymize(mtf_samples)
lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack)
outputs = lowering.export_to_tf_tensor(mtf_samples)
labels = lowering.export_to_tf_tensor(anon_targets)
restore_hook = mtf.MtfRestoreHook(lowering)
loss=tf.constant(0.),
evaluation_hooks=[restore_hook],
eval_metrics=eval_metrics)
if isinstance(transformer_model, transformer.Unitransformer):
position_kwargs = dict(
sequence_id=_import_feature("targets_segmentation", True),
position=_import_feature("targets_position", True),
)
elif isinstance(transformer_model, transformer.Bitransformer):
position_kwargs = dict(
encoder_sequence_id=_import_feature("inputs_segmentation", True),
decoder_sequence_id=_import_feature("targets_segmentation", True),
encoder_position=_import_feature("inputs_position", True),
decoder_position=_import_feature("targets_position", True),
)
else:
raise ValueError("unrecognized class")
logits, loss = transformer_model.call_simple(
inputs=inputs,
targets=targets,
compute_loss=True,
mode=mode,
variable_dtype=get_variable_dtype(),
**position_kwargs)
if use_tpu and logits is not None:
logits = mtf.anonymize(logits)
if mode == tf.estimator.ModeKeys.TRAIN:
var_grads = mtf.gradients(
[loss], [v.outputs[0] for v in graph.trainable_variables])
optimizer = mtf.optimize.AdafactorOptimizer()
update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables)
lowering = mtf.Lowering(graph, {mesh: mesh_impl}, autostack=autostack)
tf_loss = lowering.export_to_tf_tensor(loss)
tf_loss = tf.to_float(tf_loss)
if not use_tpu:
tf_loss = tf.Print(tf_loss, [tf_loss, tf.train.get_global_step()],
"step, tf_loss")
if mode == tf.estimator.ModeKeys.TRAIN:
tf_update_ops = [lowering.lowered_operation(op) for op in update_ops]
tf_update_ops.append(tf.assign_add(global_step, 1))
train_op = tf.group(tf_update_ops)
with mtf.utils.outside_all_rewrites():
restore_hook = mtf.MtfRestoreHook(lowering)
saver = tf.train.Saver(
tf.global_variables(),
sharded=True,
max_to_keep=10,
keep_checkpoint_every_n_hours=2,
defer_build=False,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
saver_listener = mtf.MtfCheckpointSaverListener(lowering)
saver_hook = tf.train.CheckpointSaverHook(
model_dir, save_steps=1000, saver=saver, listeners=[saver_listener])
gin_config_saver_hook = gin.tf.GinConfigSaverHook(
model_dir, summarize_config=True)
if mode == tf.estimator.ModeKeys.TRAIN:
if use_tpu:
return tpu_estimator.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
training_hooks=[
restore_hook,
saver_hook,
gin_config_saver_hook,
])
else:
return tf.estimator.EstimatorSpec(
tf.estimator.ModeKeys.TRAIN,
loss=tf_loss,
train_op=train_op,
training_chief_hooks=[
restore_hook,
saver_hook,
gin_config_saver_hook,
])
return my_model_fn | Create a TPUEstimator model function.
Args:
model_type: a string
transformer_model: a transformer.Unitransformer or transformer.Bitransformer
model_dir: a string
use_tpu: a boolean
mesh_shape: a mtf.Shape
layout_rules: a mtf.LayoutRules
batch_size: an integer
sequence_length: an integer
autostack: a boolean
metric_names: list of strings giving the metric names. If None, then
computes padded_neg_log_perplexity
Returns:
a function to be passed to TPUEstimator |
387,231 | def _get_logical_raid_levels(self):
logical_drive_details = self._get_logical_drive_resource()
raid_level = {}
if logical_drive_details:
for item in logical_drive_details:
if in item:
raid_level_var = "logical_raid_level_" + item[]
raid_level.update({raid_level_var: })
return raid_level if len(raid_level.keys()) > 0 else None | Gets the different raid levels configured on a server.
:returns a dictionary of logical_raid_levels set to true.
Example if raid level 1+0 and 6 are configured, it returns
{'logical_raid_level_10': 'true',
'logical_raid_level_6': 'true'} |
387,232 | def clickmap(parser, token):
bits = token.split_contents()
if len(bits) > 1:
raise TemplateSyntaxError(" takes no arguments" % bits[0])
return ClickmapNode() | Clickmap tracker template tag.
Renders Javascript code to track page visits. You must supply
your clickmap tracker ID (as a string) in the ``CLICKMAP_TRACKER_ID``
setting. |
387,233 | def _send(self):
if not statsd:
return
for metric in self.metrics:
self.connection.send()
self.metrics = [] | Send data to statsd. Fire and forget. Cross fingers and it'll arrive. |
387,234 | def jit_load(self):
try:
model = importlib.import_module( + self.model, )
device = getattr(model, self.device)
self.system.__dict__[self.name] = device(self.system, self.name)
g = self.system.__dict__[self.name]._group
self.system.group_add(g)
self.system.__dict__[g].register_model(self.name)
self.system.devman.register_device(self.name)
self.loaded = 1
logger.debug(.format(
self.model, self.device))
except ImportError:
logger.error(
.format(self.model, self.device))
except AttributeError:
logger.error(
.format(self.model, self.device)) | Import and instantiate this JIT object
Returns
------- |
387,235 | def get_attrs(self):
return FrozenOrderedDict((a, getattr(self.ds, a)) for a in self.ds.ncattrs()) | Get the global attributes from underlying data set. |
387,236 | def delete_all(config=None):
storm_ = get_storm_instance(config)
try:
storm_.delete_all_entries()
print(get_formatted_message(, ))
except Exception as error:
print(get_formatted_message(str(error), ), file=sys.stderr)
sys.exit(1) | Deletes all hosts from ssh config. |
387,237 | def open_db(db, zipped=None, encoding=None, fieldnames_lower=True, case_sensitive=True):
kwargs = dict(
encoding=encoding,
fieldnames_lower=fieldnames_lower,
case_sensitive=case_sensitive,
)
if zipped:
with Dbf.open_zip(db, zipped, **kwargs) as dbf:
yield dbf
else:
with Dbf.open(db, **kwargs) as dbf:
yield dbf | Context manager. Allows reading DBF file (maybe even from zip).
:param str|unicode|file db: .dbf file name or a file-like object.
:param str|unicode zipped: .zip file path or a file-like object.
:param str|unicode encoding: Encoding used by DB.
This will be used if there's no encoding information in the DB itself.
:param bool fieldnames_lower: Lowercase field names.
:param bool case_sensitive: Whether DB filename is case sensitive.
:rtype: Dbf |
387,238 | def distributions(self, _args):
ctx = self.ctx
dists = Distribution.get_distributions(ctx)
if dists:
print(
.format(Style=Out_Style, Fore=Out_Fore))
pretty_log_dists(dists, print)
else:
print(
.format(Style=Out_Style)) | Lists all distributions currently available (i.e. that have already
been built). |
387,239 | def append_manage_data_op(self, data_name, data_value, source=None):
op = operation.ManageData(data_name, data_value, source)
return self.append_op(op) | Append a :class:`ManageData <stellar_base.operation.ManageData>`
operation to the list of operations.
:param str data_name: String up to 64 bytes long. If this is a new Name
it will add the given name/value pair to the account. If this Name
is already present then the associated value will be modified.
:param data_value: If not present then the existing
Name will be deleted. If present then this value will be set in the
DataEntry. Up to 64 bytes long.
:type data_value: str, bytes, None
:param str source: The source account on which data is being managed.
operation.
:return: This builder instance. |
387,240 | def dumps(self, script):
"Return a compressed representation of script as a binary string."
string = BytesIO()
self._dump(script, string, self._protocol, self._version)
return string.getvalue() | Return a compressed representation of script as a binary string. |
387,241 | def set_entries(self, entries: List[Tuple[str, str]], titles, resources):
self.entries = []
for flag, pagename in entries:
title = titles[pagename].children[0]
resource = resources.get(pagename, None)
if resource and hasattr(resource,
) and not \
resource.is_published:
continue
self.entries.append(dict(
title=title, href=pagename, resource=resource
))
self.result_count = len(self.entries) | Provide the template the data for the toc entries |
387,242 | def get_support_variables(polynomial):
support = []
if is_number_type(polynomial):
return support
for monomial in polynomial.expand().as_coefficients_dict():
mon, _ = __separate_scalar_factor(monomial)
symbolic_support = flatten(split_commutative_parts(mon))
for s in symbolic_support:
if isinstance(s, Pow):
base = s.base
if is_adjoint(base):
base = base.adjoint()
support.append(base)
elif is_adjoint(s):
support.append(s.adjoint())
elif isinstance(s, Operator):
support.append(s)
return support | Gets the support of a polynomial. |
387,243 | def create_ver_browser(self, layout):
brws = ComboBoxBrowser(1, headers=[])
layout.insertWidget(1, brws)
return brws | Create a version browser and insert it into the given layout
:param layout: the layout to insert the browser into
:type layout: QLayout
:returns: the created browser
:rtype: :class:`jukeboxcore.gui.widgets.browser.ComboBoxBrowser`
:raises: None |
387,244 | def integers(start, count):
if count < 0:
raise ValueError("integers() count cannot be negative")
return query(irange(start, start + count)) | Generates in sequence the integral numbers within a range.
Note: This method uses deferred execution.
Args:
start: The first integer in the sequence.
count: The number of sequential integers to generate.
Returns:
A Queryable over the specified range of integers.
Raises:
ValueError: If count is negative. |
387,245 | def get_published(self, layer_id, expand=[]):
target_url = self.client.get_url(, , , {: layer_id})
return self._get(target_url, expand=expand) | Get the latest published version of this layer.
:raises NotFound: if there is no published version. |
387,246 | def get_extra_functions(self) -> Dict[str, Callable]:
if self.channel_type == ChannelType.Master:
raise NameError("get_extra_function is not available on master channels.")
methods = {}
for mName in dir(self):
m = getattr(self, mName)
if callable(m) and getattr(m, "extra_fn", False):
methods[mName] = m
return methods | Get a list of additional features
Returns:
Dict[str, Callable]: A dict of methods marked as additional features.
Method can be called with ``get_extra_functions()["methodName"]()``. |
387,247 | def is_de_listed(self):
env = Environment.get_instance()
instrument = env.get_instrument(self._order_book_id)
current_date = env.trading_dt
if instrument.de_listed_date is not None:
if instrument.de_listed_date.date() > env.config.base.end_date:
return False
if current_date >= env.data_proxy.get_previous_trading_date(instrument.de_listed_date):
return True
return False | 判断合约是否过期 |
387,248 | def _write_cache(self, lines, append=False):
mode = if append else
with open(self.filepath, mode, encoding=) as fh:
fh.writelines(line + for line in lines) | Write virtualenv metadata to cache. |
387,249 | def fit(self, train_x, train_y):
if self.first_fitted:
self.incremental_fit(train_x, train_y)
else:
self.first_fit(train_x, train_y) | Fit the regressor with more data.
Args:
train_x: A list of NetworkDescriptor.
train_y: A list of metric values. |
387,250 | def _process_results(self, raw_results, *args, **kwargs):
if in raw_results:
for agg_fieldname, agg_info in raw_results[].items():
agg_info[] =
for bucket_item in agg_info[]:
if in bucket_item:
bucket_item[] = bucket_item[]
bucket_item[] = bucket_item[]
agg_info[] = agg_info[]
raw_results[] = raw_results[]
return super(ICEkitConfigurableElasticBackend, self) \
._process_results(raw_results, *args, **kwargs) | Naively translate between the 'aggregations' search result data
structure returned by ElasticSearch 2+ in response to 'aggs' queries
into a structure with 'facets'-like content that Haystack (2.6.1) can
understand and process, then pass it on to Haystack's default result
processing code.
WARNING: Only 'terms' facet types are currently supported.
An example result:
{
'hits': <BLAH>
'aggregations': {
'type_exact': {
'doc_count_error_upper_bound': 0,
'sum_other_doc_count': 0,
'buckets': [
{'key': 'artwork', 'doc_count': 14145},
{'key': 'artist', 'doc_count': 3360},
{'key': 'event', 'doc_count': 2606},
{'key': 'exhibition', 'doc_count': 416},
{'key': 'essay', 'doc_count': 20},
{'key': 'publication', 'doc_count': 1}
]
}
}
}
Will be translated to look like:
{
'hits': <BLAH>
'facets': {
'type_exact': {
'_type': 'terms',
'terms': [
{'term': 'artwork', 'count': 14145},
{'term': 'artist', 'count': 3360},
{'term': 'event', 'count': 2606},
{'term': 'exhibition', 'count': 416},
{'term': 'essay', 'count': 20},
{'term': 'publication', 'count': 1}
]
}
}
}
NOTE: We don't bother cleaning up the data quite this much really, we
just translate and duplicate item names and leave the old ones in place
for a time when Haystack may support the real returned results. |
387,251 | def read_raw(data_path):
with open(data_path, ) as f:
data = pickle.load(f)
return data | Parameters
----------
data_path : str |
387,252 | def get_trees(self, data, showerrors = False):
if showerrors:
raise NotImplementedError("This parser doesn't implement errors")
self.data = data
self.index = 0
try:
return [self.__aux_parser(self._productionset.initialsymbol)]
except (IndexError, ParseError):
return [] | returns a list of trees with valid guesses |
387,253 | def direct_messages(self, delegate, params={}, extra_args=None):
return self.__get(, delegate, params,
txml.Direct, extra_args=extra_args) | Get direct messages for the authenticating user.
Search results are returned one message at a time a DirectMessage
objects |
387,254 | def _on_group_stream_changed(self, data):
self._groups.get(data.get()).update_stream(data) | Handle group stream change. |
387,255 | def _run_apt_command(cmd, fatal=False):
cmd_env = {
: os.environ.get(, )}
if fatal:
_run_with_retries(
cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,),
retry_message="Couldn't acquire DPKG lock")
else:
env = os.environ.copy()
env.update(cmd_env)
subprocess.call(cmd, env=env) | Run an apt command with optional retries.
:param: cmd: str: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and
retried. |
387,256 | async def _on_trace_notification(self, trace_event):
conn_string = trace_event.get()
payload = trace_event.get()
await self.notify_event(conn_string, , payload) | Callback function called when a trace chunk is received.
Args:
trace_chunk (dict): The received trace chunk information |
387,257 | def on_quote_changed(self, tiny_quote):
data = tiny_quote
str_log = "on_quote_changed symbol=%s open=%s high=%s close=%s low=%s" % (data.symbol, data.openPrice, data.highPrice, data.lastPrice, data.lowPrice)
self.log(str_log) | 报价、摆盘实时数据变化时,会触发该回调 |
387,258 | def create(self, data):
if data is None:
return None
prototype = {}
errors = {}
for field_name, field_spec in self.spec.fields.items():
try:
value = self._create_value(data, field_name, self.spec)
except ValidationError, e:
if field_name not in self.default_create_values:
if hasattr(e, ):
errors.update(dict(zip(
[field_name + + key for key in e.message_dict.keys()],
e.message_dict.values())))
else:
errors[field_name] = e.messages
else:
key_name = self.property_name_map[field_name]
prototype[key_name] = value
if self.prevent_extra_fields:
extras = set(data.keys()) - set(self.property_name_map.keys())
if extras:
errors[.join(extras)] = []
if errors:
raise ValidationError(errors)
_data = deepcopy(self.default_create_values)
_data.update(prototype)
if self.klass:
instance = self.klass()
instance.__dict__.update(prototype)
return instance
else:
return prototype | Create object from the given data.
The given data may or may not have been validated prior to calling
this function. This function will try its best in creating the object.
If the resulting object cannot be produced, raises ``ValidationError``.
The spec can affect how individual fields will be created by
implementing ``clean()`` for the fields needing customization.
:param data: the data as a dictionary.
:return: instance of ``klass`` or dictionary.
:raises: ``ValidationError`` if factory is unable to create object. |
387,259 | def calculated_intervals(self, value):
if not value:
self._calculated_intervals = TimeIntervals()
return
if isinstance(value, TimeInterval):
value = TimeIntervals([value])
elif isinstance(value, TimeIntervals):
pass
elif isinstance(value, list):
value = TimeIntervals(value)
else:
raise TypeError("Expected list/TimeInterval/TimeIntervals, got {}".format(type(value)))
for interval in value:
if interval.end > utcnow():
raise ValueError("Calculated intervals should not be in the future")
self._calculated_intervals = value | Set the calculated intervals
This will be written to the stream_status collection if it's in the database channel
:param value: The calculated intervals
:type value: TimeIntervals, TimeInterval, list[TimeInterval] |
387,260 | def addExpectedFailure(self, test: unittest.case.TestCase, err: tuple) -> None:
self.add_result(TestState.expected_failure, test, err) | Transforms the test in a serializable version of it and sends it to a queue for further analysis
:param test: the test to save
:param err: tuple of the form (Exception class, Exception instance, traceback) |
387,261 | def poll_event(self):
curses.flushinp()
ch = self.screen.getch()
if ch == 27:
return EVENT_ESC
elif ch == -1 or ch == curses.KEY_RESIZE:
return EVENT_RESIZE
elif ch == 10 or ch == curses.KEY_ENTER:
return EVENT_ENTER
elif ch == 127 or ch == curses.KEY_BACKSPACE:
return EVENT_BACKSPACE
elif ch == curses.KEY_UP:
return EVENT_UP
elif ch == curses.KEY_DOWN:
return EVENT_DOWN
elif ch == curses.KEY_LEFT:
return EVENT_LEFT
elif ch == curses.KEY_RIGHT:
return EVENT_RIGHT
elif ch == 3:
return EVENT_CTRL_C
elif 0 <= ch < 256:
return chr(ch)
else:
return EVENT_UNHANDLED | Waits for an event to happen and returns a string related to the event.
If the event is a normal (letter) key press, the letter is returned (case sensitive)
:return: Event type |
387,262 | def eigenvectors_nrev(T, right=True):
r
if right:
val, R = eig(T, left=False, right=True)
perm = np.argsort(np.abs(val))[::-1]
eigvec = R[:, perm]
else:
val, L = eig(T, left=True, right=False)
perm = np.argsort(np.abs(val))[::-1]
eigvec = L[:, perm]
return eigvec | r"""Compute eigenvectors of transition matrix.
Parameters
----------
T : (d, d) ndarray
Transition matrix (stochastic matrix)
k : int or tuple of ints, optional
Compute the first k eigenvalues of T
right : bool, optional
If right=True compute right eigenvectors, left eigenvectors
otherwise
Returns
-------
eigvec : (d, d) ndarray
The eigenvectors of T ordered with decreasing absolute value
of the corresponding eigenvalue |
387,263 | def consume_network_packet_messages_from_redis():
sub = KombuSubscriber(
name,
FORWARD_BROKER_URL,
FORWARD_SSL_OPTIONS)
seconds_to_consume = 10.0
heartbeat = 60
serializer = "application/json"
queue = FORWARD_QUEUE
sub.consume(
callback=recv_msg,
queue=queue,
exchange=None,
routing_key=None,
serializer=serializer,
heartbeat=heartbeat,
time_to_wait=seconds_to_consume)
log.info("end - {}".format(name)) | consume_network_packet_messages_from_redis
Setup a ``celery_connectors.KombuSubscriber`` to consume meessages
from the ``FORWARD_BROKER_URL`` broker in the ``FORWARD_QUEUE``
queue. |
387,264 | def get_imports(filename):
with open(filename, "rb") as f:
src = f.read()
finder = ImportFinder()
finder.visit(ast.parse(src, filename=filename))
imports = []
for i in finder.imports:
name, _, is_from, is_star = i
imports.append(i + (resolve_import(name, is_from, is_star),))
return imports | Get all the imports in a file.
Each import is a tuple of:
(name, alias, is_from, is_star, source_file) |
387,265 | def get(self, file_id: str) -> [typing.BinaryIO, str, datetime.datetime]:
raise NotImplementedError("Downloading files for downloading files in FileStore has not been implemented yet.") | Return the file identified by a file_id string, its file name and upload date. |
387,266 | def fit(self, X, y=None, groups=None, **fit_params):
return self._fit(X, y=y, groups=groups, **fit_params) | Run fit on the estimator with parameters chosen sequentially by SigOpt.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning. |
387,267 | def entrypoint(cls):
if not isinstance(cls, type) or not issubclass(cls, Command):
raise TypeError(f"inappropriate entrypoint instance of type {cls.__class__}")
cls._argcmdr_entrypoint_ = True
return cls | Mark the decorated command as the intended entrypoint of the
command module. |
387,268 | def url_(client_id: str, redirect_uri: str, *, scope: str = None, state: str = None, secure: bool = True) -> str:
attrs = {
: client_id,
: quote(redirect_uri)
}
if scope is not None:
attrs[] = quote(scope)
if state is not None:
attrs[] = state
parameters = .join(.format(*item) for item in attrs.items())
return OAuth2._BASE.format(parameters=parameters) | Construct a OAuth2 URL instead of an OAuth2 object. |
387,269 | def crosslisting_feature(catalog, soup):
listing = {}
for elem in soup.coursedb.findAll():
seats = int(elem[])
crns = [safeInt(crn.string) for crn in elem.findAll()]
cl = CrossListing(crns, seats)
for crn in crns:
listing[crn] = cl
catalog.crosslistings = FrozenDict(listing)
logger.info( % len(catalog.crosslistings)) | Parses all the crosslistings. These refer to the similar CRNs,
such as a grad & undergrad level course. |
387,270 | def _get_value(data_structure, key):
if len(key) == 0:
raise KeyError()
value = data_structure[key[0]]
if len(key) > 1:
return _get_value(value, key[1:])
return value | Return the value of a data_structure given a path.
:param data_structure: Dictionary, list or subscriptable object.
:param key: Array with the defined path ordered. |
387,271 | def getSparseTensor(numNonzeros, inputSize, outputSize,
onlyPositive=False,
fixedRange=1.0/24):
w = torch.Tensor(outputSize, inputSize, )
if onlyPositive:
w.data.uniform_(0, fixedRange)
else:
w.data.uniform_(-fixedRange, fixedRange)
if numNonzeros < inputSize:
numZeros = inputSize - numNonzeros
outputIndices = np.arange(outputSize)
inputIndices = np.array([np.random.permutation(inputSize)[:numZeros]
for _ in outputIndices], dtype=np.long)
zeroIndices = np.empty((outputSize, numZeros, 2), dtype=np.long)
zeroIndices[:, :, 0] = outputIndices[:, None]
zeroIndices[:, :, 1] = inputIndices
zeroIndices = torch.LongTensor(zeroIndices.reshape(-1, 2))
zeroWts = (zeroIndices[:, 0], zeroIndices[:, 1])
w.data[zeroWts] = 0.0
return w | Return a random tensor that is initialized like a weight matrix
Size is outputSize X inputSize, where weightSparsity% of each row is non-zero |
387,272 | def step( self, local_inv=None, peer_table=None, peer_queue=None, con=None, path=None ):
if path is None:
path = self.atlasdb_path
if self.max_neighbors is None:
self.max_neighbors = atlas_max_neighbors()
log.debug("%s: max neighbors is %s" % (self.my_hostport, self.max_neighbors))
current_peers = self.get_current_peers( peer_table=peer_table )
num_added = self.update_new_peers( 10, current_peers, peer_queue=peer_queue, peer_table=peer_table, con=con, path=path )
if self.my_hostport in self.current_peer_neighbors:
self.current_peer_neighbors.remove(self.my_hostport)
log.debug("%s: neighbors of %s are (%s): %s" % (self.my_hostport, self.current_peer, len(self.current_peer_neighbors), ",".join(self.current_peer_neighbors)))
self.new_peers = list(set( self.new_peers + peer_neighbors ))
if self.current_peer is not None:
next_peer, next_peer_neighbors = self.random_walk_graph( self.prev_peer, self.prev_peer_degree, self.current_peer, self.current_peer_neighbors, con=con, path=path, peer_table=peer_table )
if next_peer is not None and next_peer_neighbors is not None:
self.prev_peer = self.current_peer
self.prev_peer_degree = len(self.current_peer_neighbors)
self.current_peer = next_peer
self.current_peer_neighbors = next_peer_neighbors
self.new_peers = list(set(self.new_peers + self.current_peer_neighbors))
else:
log.error("%s: failed to walk from %s" % (self.my_hostport, self.current_peer))
self.random_walk_reset()
num_removed = self.update_existing_peers( 10, con=con, path=path, peer_table=peer_table )
return num_added, num_removed | Execute one round of the peer discovery algorithm:
* Add at most 10 new peers from the pending peer queue
(but ping them first, and drop hosts if the pending queue
gets to be too long).
* Execute one step of the MHRWDA algorithm. Add any new
peers from the neighbor sets discovered.
* Remove at most 10 old, unresponsive peers from the peer DB. |
387,273 | def namespace(self):
if self.m_name == -1 or (self.m_event != const.START_TAG and self.m_event != const.END_TAG):
return u
if self.m_namespaceUri == 0xFFFFFFFF:
return u
return self.sb[self.m_namespaceUri] | Return the Namespace URI (if any) as a String for the current tag |
387,274 | def get_repo(self, auth, username, repo_name):
path = "/repos/{u}/{r}".format(u=username, r=repo_name)
response = self.get(path, auth=auth)
return GogsRepo.from_json(response.json()) | Returns a the repository with name ``repo_name`` owned by
the user with username ``username``.
:param auth.Authentication auth: authentication object
:param str username: username of owner of repository
:param str repo_name: name of repository
:return: a representation of the retrieved repository
:rtype: GogsRepo
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced |
387,275 | def eval(self, construct):
data = clips.data.DataObject(self._env)
if lib.EnvEval(self._env, construct.encode(), data.byref) != 1:
raise CLIPSError(self._env)
return data.value | Evaluate an expression returning its value.
The Python equivalent of the CLIPS eval command. |
387,276 | def config_merge_text(source=,
merge_config=None,
merge_path=None,
saltenv=):
*
config_txt = __salt__[](source=source)[][source]
return __salt__[](initial_config=config_txt,
merge_config=merge_config,
merge_path=merge_path,
saltenv=saltenv) | .. versionadded:: 2019.2.0
Return the merge result of the configuration from ``source`` with the
merge configuration, as plain text (without loading the config on the
device).
source: ``running``
The configuration type to retrieve from the network device. Default:
``running``. Available options: ``running``, ``startup``, ``candidate``.
merge_config
The config to be merged into the initial config, sent as text. This
argument is ignored when ``merge_path`` is set.
merge_path
Absolute or remote path from where to load the merge configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``merge_path`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' napalm.config_merge_text merge_path=salt://path/to/merge.cfg |
387,277 | def delete_file(self, path, prefixed_path, source_storage):
if self.faster:
return True
else:
return super(Command, self).delete_file(path, prefixed_path, source_storage) | We don't need all the file_exists stuff because we have to override all files anyways. |
387,278 | def map(source = , z = 0, x = 0, y = 0, format = ,
srs=, bin=None, hexPerTile=None, style=,
taxonKey=None, country=None, publishingCountry=None, publisher=None,
datasetKey=None, year=None, basisOfRecord=None, **kwargs):
t work)
:param publishingCountry: [str] The 2-letter country code (as per
ISO-3166-1) of the country in which the occurrence was recorded.
:return: An object of class GbifMap
For mvt format, see https://github.com/tilezen/mapbox-vector-tile to
decode, and example below
Usage::
from pygbif import maps
out = maps.map(taxonKey = 2435098)
out.response
out.path
out.img
out.plot()
out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1))
out.response
out.path
out.img
out.plot()
maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857")
maps.map(taxonKey = 212, year = 1998, bin = "hex",
hexPerTile = 30, style = "classic-noborder.poly")
maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot()
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = "HUMAN_OBSERVATION", bin = "hex",
hexPerTile = 500).plot()
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"],
hexPerTile = 500, bin = "hex").plot()
from pygbif import maps
x = maps.map(taxonKey = 2480498, year = 2010,
format = ".mvt")
x.response
x.path
x.img
import mapbox_vector_tile
mapbox_vector_tile.decode(x.response.content)
[email protected]@[email protected]@[email protected]:3857EPSG:4326EPSG:3575EPSG:3031srssquarehexbinstylehttps://api.gbif.org/v2/map/occurrence/%s/%s/%s/%s%ssrsbinhexPerTilestyletaxonKeycountrypublishingCountrypublisherdatasetKeyyearbasisOfRecord_.image/pngapplication/x-protobuf'
out = gbif_GET_map(url, args, ctype, **kwargs)
return GbifMap(out) | GBIF maps API
:param source: [str] Either ``density`` for fast, precalculated tiles,
or ``adhoc`` for any search
:param z: [str] zoom level
:param x: [str] longitude
:param y: [str] latitude
:param format: [str] format of returned data. One of:
- ``.mvt`` - vector tile
- ``@Hx.png`` - 256px raster tile (for legacy clients)
- ``@1x.png`` - 512px raster tile, @2x.png for a 1024px raster tile
- ``@2x.png`` - 1024px raster tile
- ``@3x.png`` - 2048px raster tile
- ``@4x.png`` - 4096px raster tile
:param srs: [str] Spatial reference system. One of:
- ``EPSG:3857`` (Web Mercator)
- ``EPSG:4326`` (WGS84 plate caree)
- ``EPSG:3575`` (Arctic LAEA)
- ``EPSG:3031`` (Antarctic stereographic)
:param bin: [str] square or hex to aggregate occurrence counts into
squares or hexagons. Points by default.
:param hexPerTile: [str] sets the size of the hexagons (the number
horizontally across a tile)
:param squareSize: [str] sets the size of the squares. Choose a factor
of 4096 so they tessalate correctly: probably from 8, 16, 32, 64,
128, 256, 512.
:param style: [str] for raster tiles, choose from the available styles.
Defaults to classic.point.
:param taxonKey: [int] A GBIF occurrence identifier
:param datasetKey: [str] The occurrence dataset key (a uuid)
:param country: [str] The 2-letter country code (as per ISO-3166-1) of
the country in which the occurrence was recorded. See here
http://en.wikipedia.org/wiki/ISO_3166-1_alpha-2
:param basisOfRecord: [str] Basis of record, as defined in the BasisOfRecord enum
http://gbif.github.io/gbif-api/apidocs/org/gbif/api/vocabulary/BasisOfRecord.html
Acceptable values are
- ``FOSSIL_SPECIMEN`` An occurrence record describing a fossilized specimen.
- ``HUMAN_OBSERVATION`` An occurrence record describing an observation made by one or more people.
- ``LITERATURE`` An occurrence record based on literature alone.
- ``LIVING_SPECIMEN`` An occurrence record describing a living specimen, e.g.
- ``MACHINE_OBSERVATION`` An occurrence record describing an observation made by a machine.
- ``OBSERVATION`` An occurrence record describing an observation.
- ``PRESERVED_SPECIMEN`` An occurrence record describing a preserved specimen.
- ``UNKNOWN`` Unknown basis for the record.
:param year: [int] The 4 digit year. A year of 98 will be interpreted as
AD 98. Supports range queries, smaller,larger (e.g., ``1990,1991``,
whereas ``1991,1990`` wouldn't work)
:param publishingCountry: [str] The 2-letter country code (as per
ISO-3166-1) of the country in which the occurrence was recorded.
:return: An object of class GbifMap
For mvt format, see https://github.com/tilezen/mapbox-vector-tile to
decode, and example below
Usage::
from pygbif import maps
out = maps.map(taxonKey = 2435098)
out.response
out.path
out.img
out.plot()
out = maps.map(taxonKey = 2480498, year = range(2008, 2011+1))
out.response
out.path
out.img
out.plot()
# srs
maps.map(taxonKey = 2480498, year = 2010, srs = "EPSG:3857")
# bin
maps.map(taxonKey = 212, year = 1998, bin = "hex",
hexPerTile = 30, style = "classic-noborder.poly")
# style
maps.map(taxonKey = 2480498, style = "purpleYellow.point").plot()
# basisOfRecord
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = "HUMAN_OBSERVATION", bin = "hex",
hexPerTile = 500).plot()
maps.map(taxonKey = 2480498, year = 2010,
basisOfRecord = ["HUMAN_OBSERVATION", "LIVING_SPECIMEN"],
hexPerTile = 500, bin = "hex").plot()
# map vector tiles, gives back raw bytes
from pygbif import maps
x = maps.map(taxonKey = 2480498, year = 2010,
format = ".mvt")
x.response
x.path
x.img # None
import mapbox_vector_tile
mapbox_vector_tile.decode(x.response.content) |
387,279 | def cli(*args, **kwargs):
log.debug(.format(args, kwargs))
env.update(kwargs) | 通用自动化处理工具
详情参考 `GitHub <https://github.com/littlemo/mohand>`_ |
387,280 | def database(self, database_id, ddl_statements=(), pool=None):
return Database(database_id, self, ddl_statements=ddl_statements, pool=pool) | Factory to create a database within this instance.
:type database_id: str
:param database_id: The ID of the instance.
:type ddl_statements: list of string
:param ddl_statements: (Optional) DDL statements, excluding the
'CREATE DATABSE' statement.
:type pool: concrete subclass of
:class:`~google.cloud.spanner_v1.pool.AbstractSessionPool`.
:param pool: (Optional) session pool to be used by database.
:rtype: :class:`~google.cloud.spanner_v1.database.Database`
:returns: a database owned by this instance. |
387,281 | def numeric_function_clean_dataframe(self, axis):
result = None
query_compiler = self
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler | Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager. |
387,282 | def javascript_escape(s, quote_double_quotes=True):
ustring_re = re.compile(u"([\u0080-\uffff])")
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode()
elif type(s) != six.text_type:
raise TypeError(s)
s = s.replace(, )
s = s.replace(, )
s = s.replace(, )
s = s.replace(, )
s = s.replace("")
if quote_double_quotes:
s = s.replace(, )
return str(ustring_re.sub(fix, s)) | Escape characters for javascript strings. |
387,283 | def is_eligible(self, timestamp, status, notif_number, in_notif_time, interval, escal_period):
short_states = {
u: , u: , u: ,
u: , u: , u: ,
u: , u: , u: , u:
}
if not self.time_based:
if notif_number < self.first_notification:
return False
if self.last_notification and notif_number > self.last_notification:
return False
else:
if in_notif_time < self.first_notification_time * interval:
return False
if self.last_notification_time and \
in_notif_time > self.last_notification_time * interval:
return False
if status in short_states and short_states[status] not in self.escalation_options:
return False
if escal_period is not None and not escal_period.is_time_valid(timestamp):
return False
return True | Check if the escalation is eligible (notification is escalated or not)
Escalation is NOT eligible in ONE of the following condition is fulfilled::
* escalation is not time based and notification number not in range
[first_notification;last_notification] (if last_notif == 0, it's infinity)
* escalation is time based and notification time not in range
[first_notification_time;last_notification_time] (if last_notif_time == 0, it's infinity)
* status does not matches escalation_options ('WARNING' <=> 'w' ...)
* escalation_period is not legit for this time (now usually)
:param timestamp: timestamp to check if timeperiod is valid
:type timestamp: int
:param status: item status (one of the small_states key)
:type status: str
:param notif_number: current notification number
:type notif_number: int
:param in_notif_time: current notification time
:type in_notif_time: int
:param interval: time interval length
:type interval: int
:return: True if no condition has been fulfilled, otherwise False
:rtype: bool |
387,284 | def virtualenv_no_global():
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, )
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True | Return True if in a venv and no system site packages. |
387,285 | def set_passport_data_errors(self, user_id, errors):
from pytgbot.api_types.sendable.passport import PassportElementError
assert_type_or_raise(user_id, int, parameter_name="user_id")
assert_type_or_raise(errors, list, parameter_name="errors")
result = self.do("setPassportDataErrors", user_id=user_id, errors=errors)
if self.return_python_objects:
logger.debug("Trying to parse {data}".format(data=repr(result)))
try:
return from_array_list(bool, result, list_level=0, is_builtin=True)
except TgApiParseException:
logger.debug("Failed parsing as primitive bool", exc_info=True)
raise TgApiParseException("Could not parse result.")
return result | Informs a user that some of the Telegram Passport elements they provided contains errors. The user will not be able to re-submit their Passport to you until the errors are fixed (the contents of the field for which you returned the error must change). Returns True on success.
Use this if the data submitted by the user doesn't satisfy the standards your service requires for any reason. For example, if a birthday date seems invalid, a submitted document is blurry, a scan shows evidence of tampering, etc. Supply some details in the error message to make sure the user knows how to correct the issues.
https://core.telegram.org/bots/api#setpassportdataerrors
Parameters:
:param user_id: User identifier
:type user_id: int
:param errors: A JSON-serialized array describing the errors
:type errors: list of pytgbot.api_types.sendable.passport.PassportElementError
Returns:
:return: Returns True on success
:rtype: bool |
387,286 | def add_neighbor(self, edge: "Edge") -> None:
if edge is None or (edge.source != self and edge.target != self):
return
if edge.source == self:
other: Node = edge.target
elif edge.target == self:
other: Node = edge.source
else:
raise ValueError("Tried to add a neighbor with an invalid edge.")
edge_key: Tuple(int, int) = edge.key
if self._neighbors.get(edge_key) or self._neighbors.get((edge_key[1], edge_key[0])):
return
self._neighbors[edge_key] = edge
self.dispatch_event(NeighborAddedEvent(other)) | Adds a new neighbor to the node.
Arguments:
edge (Edge): The edge that would connect this node with its neighbor. |
387,287 | def header(self, array):
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self | Specify the header of the table |
387,288 | def relations_used(self):
g = self.get_graph()
types = set()
for (x,y,d) in g.edges(data=True):
types.add(d[])
return list(types) | Return list of all relations used to connect edges |
387,289 | def _build_environ(self) -> Dict[str, Optional[str]]:
d: Dict[str, Optional[str]] = {}
if self.__config__.case_insensitive:
env_vars = {k.lower(): v for k, v in os.environ.items()}
else:
env_vars = cast(Dict[str, str], os.environ)
for field in self.__fields__.values():
if field.has_alias:
env_name = field.alias
else:
env_name = self.__config__.env_prefix + field.name.upper()
env_name_ = env_name.lower() if self.__config__.case_insensitive else env_name
env_val = env_vars.get(env_name_, None)
if env_val:
if field.is_complex():
try:
env_val = json.loads(env_val)
except ValueError as e:
raise SettingsError(f) from e
d[field.alias] = env_val
return d | Build environment variables suitable for passing to the Model. |
387,290 | def transfer_size(self):
ts = self.attributes[]
if isinstance(ts, six.string_types):
ts = shlex.split(ts)
ts = [str(e) for e in ts]
return ts | Size of transfer in bytes (e.g.: 8, 4k, 2m, 1g) |
387,291 | def ask(question):
while True:
ans = input(question)
al = ans.lower()
if match(, al):
return True
elif match(, al):
return False
elif match(, al):
stdout.write(CYAN)
print("\nGoodbye.\n")
stdout.write(RESET)
quit()
else:
stdout.write(RED)
print("%s is invalid. Enter (y)es, (n)o or (q)uit." % ans)
stdout.write(RESET) | Infinite loop to get yes or no answer or quit the script. |
387,292 | def find_all_files(glob):
for finder in finders.get_finders():
for path, storage in finder.list([]):
if fnmatch.fnmatchcase(os.path.join(getattr(storage, , )
or , path),
glob):
yield path, storage | Finds all files in the django finders for a given glob,
returns the file path, if available, and the django storage object.
storage objects must implement the File storage API:
https://docs.djangoproject.com/en/dev/ref/files/storage/ |
387,293 | def get(self, path, content=True, type=None, format=None, load_alternative_format=True):
path = path.strip()
ext = os.path.splitext(path)[1]
if not self.exists(path) or (type != if type else ext not in self.all_nb_extensions()):
return super(TextFileContentsManager, self).get(path, content, type, format)
fmt = preferred_format(ext, self.preferred_jupytext_formats_read)
if ext == :
model = self._notebook_model(path, content=content)
else:
self.set_default_format_options(fmt, read=True)
with mock.patch(, _jupytext_reads(fmt)):
model = self._notebook_model(path, content=content)
if not load_alternative_format:
return model
if not content:
if path not in self.paired_notebooks:
return model
fmt, formats = self.paired_notebooks.get(path)
for alt_path, _ in paired_paths(path, fmt, formats):
if alt_path != path and self.exists(alt_path):
alt_model = self._notebook_model(alt_path, content=False)
if alt_model[] > model[]:
model[] = alt_model[]
return model
nbk = model[]
jupytext_formats = nbk.metadata.get(, {}).get() or self.default_formats(path)
jupytext_formats = long_form_multiple_formats(jupytext_formats)
alt_paths = [(path, fmt)]
if jupytext_formats:
try:
_, fmt = find_base_path_and_format(path, jupytext_formats)
alt_paths = paired_paths(path, fmt, jupytext_formats)
self.update_paired_notebooks(path, fmt, jupytext_formats)
except InconsistentPath as err:
self.log.info("Unable to read paired notebook: %s", str(err))
else:
if path in self.paired_notebooks:
fmt, formats = self.paired_notebooks.get(path)
alt_paths = paired_paths(path, fmt, formats)
if len(alt_paths) > 1 and ext == :
jupytext_metadata = model[][].get(, {})
self.set_default_format_options(jupytext_metadata, read=True)
if jupytext_metadata:
model[][][] = jupytext_metadata
org_model = model
fmt_inputs = fmt
path_inputs = path_outputs = path
model_outputs = None
if path.endswith():
for alt_path, alt_fmt in alt_paths:
if not alt_path.endswith() and self.exists(alt_path):
self.log.info(u.format(alt_path))
path_inputs = alt_path
fmt_inputs = alt_fmt
model_outputs = model
model = self.get(alt_path, content=content, type=type, format=format,
load_alternative_format=False)
break
else:
for alt_path, _ in alt_paths:
if alt_path.endswith() and self.exists(alt_path):
self.log.info(u.format(alt_path))
path_outputs = alt_path
model_outputs = self.get(alt_path, content=content, type=type, format=format,
load_alternative_format=False)
break
try:
check_file_version(model[], path_inputs, path_outputs)
except Exception as err:
raise HTTPError(400, str(err))
seems more recent than {src} (last modified {src_last})
Please either:
- open {src} in a text editor, make sure it is up to date, and save it,
- or delete {src} if not up to date,
- or increase check margin by adding, say,
c.ContentsManager.outdated_text_notebook_margin = 5
to your .jupyter/jupyter_notebook_config.py file
last_modifiedlast_modifiedcontentcontent.ipynbcontentjupytextmain_languagepythonkernelspecpythonkernelspeccontentcodetrustedtrustedpathpathnamename']
return model | Takes a path for an entity and returns its model |
387,294 | def write_file(content, *path):
with open(os.path.join(*path), "w") as file:
return file.write(content) | Simply write some content to a file, overriding the file if necessary. |
387,295 | def clean_whitespace(statement):
import re
statement.text = statement.text.replace(, ).replace(, ).replace(, )
statement.text = statement.text.strip()
statement.text = re.sub(, , statement.text)
return statement | Remove any consecutive whitespace characters from the statement text. |
387,296 | def post(self, res_path, data=None, files=None, timeout=10.):
resp = requests.post(
self.__res_uri(res_path),
data=data,
files=files,
headers=self.__headers(),
verify=False,
auth=self.__auth(),
timeout=timeout
)
return (
resp.status_code,
json.loads(resp.text)
) | Post operation.
:param str res_path:
Resource path.
:param list data:
Request parameters for data.
:param list files:
Request parameters for files.
:param float timeout:
Timeout in seconds.
:rtype:
tuple
:return:
Tuple with status code and response body. |
387,297 | def handle_get_token(self, req):
try:
pathsegs = split_path(req.path_info, minsegs=1, maxsegs=3,
rest_with_last=True)
except ValueError:
return HTTPNotFound(request=req)
if pathsegs[0] == and pathsegs[2] == :
account = pathsegs[1]
user = req.headers.get()
if not user:
user = unquote(req.headers.get(, ))
if not user or not in user:
return HTTPUnauthorized(request=req)
account2, user = user.split(, 1)
if account != account2:
return HTTPUnauthorized(request=req)
key = req.headers.get()
if not key:
key = unquote(req.headers.get(, ))
elif pathsegs[0] in (, ):
user = unquote(req.headers.get(, ))
if not user:
user = req.headers.get()
if not user or not in user:
return HTTPUnauthorized(request=req)
account, user = user.split(, 1)
key = unquote(req.headers.get(, ))
if not key:
key = req.headers.get()
else:
return HTTPBadRequest(request=req)
if not all((account, user, key)):
return HTTPUnauthorized(request=req)
if user == and self.super_admin_key and \
key == self.super_admin_key:
token = self.get_itoken(req.environ)
url = % (self.dsc_url, self.reseller_prefix)
return Response(
request=req,
content_type=CONTENT_TYPE_JSON,
body=json.dumps({: {: ,
: url}}),
headers={: token,
: token,
: url})
path = quote( % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
req.environ, , path).get_response(self.app)
if resp.status_int == 404:
return HTTPUnauthorized(request=req)
if resp.status_int // 100 != 2:
raise Exception( %
(path, resp.status))
user_detail = json.loads(resp.body)
if not self.credentials_match(user_detail, key):
return HTTPUnauthorized(request=req)
if not token:
| Handles the various `request for token and service end point(s)` calls.
There are various formats to support the various auth servers in the
past. Examples::
GET <auth-prefix>/v1/<act>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/v1.0
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
Values should be url encoded, "act%3Ausr" instead of "act:usr" for
example; however, for backwards compatibility the colon may be included
unencoded.
On successful authentication, the response will have X-Auth-Token and
X-Storage-Token set to the token to use with Swift and X-Storage-URL
set to the URL to the default Swift cluster to use.
The response body will be set to the account's services JSON object as
described here::
{"storage": { # Represents the Swift storage service end points
"default": "cluster1", # Indicates which cluster is the default
"cluster1": "<URL to use with Swift>",
# A Swift cluster that can be used with this account,
# "cluster1" is the name of the cluster which is usually a
# location indicator (like "dfw" for a datacenter region).
"cluster2": "<URL to use with Swift>"
# Another Swift cluster that can be used with this account,
# there will always be at least one Swift cluster to use or
# this whole "storage" dict won't be included at all.
},
"servers": { # Represents the Nova server service end points
# Expected to be similar to the "storage" dict, but not
# implemented yet.
},
# Possibly other service dicts, not implemented yet.
}
One can also include an "X-Auth-New-Token: true" header to
force issuing a new token and revoking any old token, even if
it hasn't expired yet.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with data set as explained
above. |
387,298 | def browse(self, ml_item=None, start=0, max_items=100,
full_album_art_uri=False, search_term=None, subcategories=None):
if ml_item is None:
search =
else:
search = ml_item.item_id
if subcategories is not None:
for category in subcategories:
search += + url_escape_path(really_unicode(category))
if search_term is not None:
search += + url_escape_path(really_unicode(search_term))
try:
response, metadata = \
self._music_lib_search(search, start, max_items)
except SoCoUPnPException as exception:
if exception.error_code == :
return SearchResult([], , 0, 0, None)
else:
raise exception
metadata[] =
containers = from_didl_string(response[])
item_list = []
for container in containers:
if full_album_art_uri:
self._update_album_art_to_full_uri(container)
item_list.append(container)
return SearchResult(item_list, **metadata) | Browse (get sub-elements from) a music library item.
Args:
ml_item (`DidlItem`): the item to browse, if left out or
`None`, items at the root level will be searched.
start (int): the starting index of the results.
max_items (int): the maximum number of items to return.
full_album_art_uri (bool): whether the album art URI should be
fully qualified with the relevant IP address.
search_term (str): A string that will be used to perform a fuzzy
search among the search results. If used in combination with
subcategories, the fuzzy search will be performed on the
subcategory. Note: Searching will not work if ``ml_item`` is
`None`.
subcategories (list): A list of strings that indicate one or more
subcategories to descend into. Note: Providing sub categories
will not work if ``ml_item`` is `None`.
Returns:
A `SearchResult` instance.
Raises:
AttributeError: if ``ml_item`` has no ``item_id`` attribute.
SoCoUPnPException: with ``error_code='701'`` if the item cannot be
browsed. |
387,299 | def _run_paired(paired):
from bcbio.structural import titancna
work_dir = _sv_workdir(paired.tumor_data)
seg_files = model_segments(tz.get_in(["depth", "bins", "normalized"], paired.tumor_data),
work_dir, paired)
call_file = call_copy_numbers(seg_files["seg"], work_dir, paired.tumor_data)
out = []
if paired.normal_data:
out.append(paired.normal_data)
if "sv" not in paired.tumor_data:
paired.tumor_data["sv"] = []
paired.tumor_data["sv"].append({"variantcaller": "gatk-cnv",
"call_file": call_file,
"vrn_file": titancna.to_vcf(call_file, "GATK4-CNV", _get_seg_header,
_seg_to_vcf, paired.tumor_data),
"seg": seg_files["seg"],
"plot": plot_model_segments(seg_files, work_dir, paired.tumor_data)})
out.append(paired.tumor_data)
return out | Run somatic variant calling pipeline. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.