Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
24,300 |
def trigger_event(name,
event,
value1=None,
value2=None,
value3=None
):
A value that we want to send.A second value that we want to send.A third value that we want to send.
ret = {: name,
: {},
: False,
: }
if __opts__[]:
ret[] = .format(event)
ret[] = None
return ret
ret[] = __salt__[](
event=event,
value1=value1,
value2=value2,
value3=value3
)
if ret and ret[]:
ret[] = True
ret[] = .format(name)
else:
ret[] = .format(name)
return ret
|
Trigger an event in IFTTT
.. code-block:: yaml
ifttt-event:
ifttt.trigger_event:
- event: TestEvent
- value1: 'A value that we want to send.'
- value2: 'A second value that we want to send.'
- value3: 'A third value that we want to send.'
The following parameters are required:
name
The unique name for this event.
event
The name of the event to trigger in IFTTT.
The following parameters are optional:
value1
One of the values that we can send to IFTT.
value2
One of the values that we can send to IFTT.
value3
One of the values that we can send to IFTT.
|
24,301 |
def gini(data):
flattened = np.sort(np.ravel(data))
N = np.size(flattened)
normalization = 1. / (np.abs(np.mean(flattened)) * N * (N - 1))
kernel = (2 * np.arange(1, N + 1) - N - 1) * np.abs(flattened)
G = normalization * np.sum(kernel)
return G
|
Calculate the `Gini coefficient
<https://en.wikipedia.org/wiki/Gini_coefficient>`_ of a 2D array.
The Gini coefficient is calculated using the prescription from `Lotz
et al. 2004 <http://adsabs.harvard.edu/abs/2004AJ....128..163L>`_
as:
.. math::
G = \\frac{1}{\\left | \\bar{x} \\right | n (n - 1)}
\\sum^{n}_{i} (2i - n - 1) \\left | x_i \\right |
where :math:`\\bar{x}` is the mean over all pixel values
:math:`x_i`.
The Gini coefficient is a way of measuring the inequality in a given
set of values. In the context of galaxy morphology, it measures how
the light of a galaxy image is distributed among its pixels. A
``G`` value of 0 corresponds to a galaxy image with the light evenly
distributed over all pixels while a ``G`` value of 1 represents a
galaxy image with all its light concentrated in just one pixel.
Usually Gini's measurement needs some sort of preprocessing for
defining the galaxy region in the image based on the quality of the
input data. As there is not a general standard for doing this, this
is left for the user.
Parameters
----------
data : array-like
The 2D data array or object that can be converted to an array.
Returns
-------
gini : `float`
The Gini coefficient of the input 2D array.
|
24,302 |
def single_device(cl_device_type=, platform=None, fallback_to_any_device_type=False):
if isinstance(cl_device_type, str):
cl_device_type = device_type_from_string(cl_device_type)
device = None
if platform is None:
platforms = cl.get_platforms()
else:
platforms = [platform]
for platform in platforms:
devices = platform.get_devices(device_type=cl_device_type)
for dev in devices:
if device_supports_double(dev):
try:
env = CLEnvironment(platform, dev)
return [env]
except cl.RuntimeError:
pass
if not device:
if fallback_to_any_device_type:
return cl.get_platforms()[0].get_devices()
else:
raise ValueError(.format(
cl.device_type.to_string(cl_device_type)))
raise ValueError()
|
Get a list containing a single device environment, for a device of the given type on the given platform.
This will only fetch devices that support double (possibly only double with a pragma
defined, but still, it should support double).
Args:
cl_device_type (cl.device_type.* or string): The type of the device we want,
can be a opencl device type or a string matching 'GPU', 'CPU' or 'ALL'.
platform (opencl platform): The opencl platform to select the devices from
fallback_to_any_device_type (boolean): If True, try to fallback to any possible device in the system.
Returns:
list of CLEnvironment: List with one element, the CL runtime environment requested.
|
24,303 |
def get_device(self, id=None):
if id is None:
if not self.devices:
raise ValueError( % self.hardware_id)
id, (device, version) = sorted(self.devices.items())[0]
elif id in self.devices:
device, version = self.devices[id]
else:
error = % id
log.error(error)
raise ValueError(error)
log.info("Using COM Port: %s, Device ID: %s, Device Ver: %s",
device, id, version)
return id, device, version
|
Returns details of either the first or specified device
:param int id: Identifier of desired device. If not given, first device
found will be returned
:returns tuple: Device ID, Device Address, Firmware Version
|
24,304 |
def tpictr(sample, lenout=_default_len_out, lenerr=_default_len_out):
sample = stypes.stringToCharP(sample)
pictur = stypes.stringToCharP(lenout)
errmsg = stypes.stringToCharP(lenerr)
lenout = ctypes.c_int(lenout)
lenerr = ctypes.c_int(lenerr)
ok = ctypes.c_int()
libspice.tpictr_c(sample, lenout, lenerr, pictur, ctypes.byref(ok), errmsg)
return stypes.toPythonString(pictur), ok.value, stypes.toPythonString(
errmsg)
|
Given a sample time string, create a time format picture
suitable for use by the routine timout.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tpictr_c.html
:param sample: A sample time string.
:type sample: str
:param lenout: The length for the output picture string.
:type lenout: int
:param lenerr: The length for the output error string.
:type lenerr: int
:return:
A format picture that describes sample,
Flag indicating whether sample parsed successfully,
Diagnostic returned if sample cannot be parsed
:rtype: tuple
|
24,305 |
def search(self, **kwargs):
Annotation.namespace` or `email`
`value` must be either an object (tested for equality), a
string describing a search pattern (regular expression), or a
lambda function which evaluates to `True` if the candidate
object matches the search criteria and `False` otherwise.
Returns
-------
match : bool
`True` if any of the search keys match the specified value,
`False` otherwise, or if the search keys do not exist
within the object.
Examples
--------
>>> J = jams.JObject(foo=5, needle=)
>>> J.search(needle=)
True
>>> J.search(needle=)
False
>>> J.search(badger=)
False
>>> J.search(foo=5)
True
>>> J.search(foo=10)
False
>>> J.search(foo=lambda x: x < 10)
True
>>> J.search(foo=lambda x: x > 10)
False
'
match = False
r_query = {}
myself = self.__class__.__name__
for k, value in six.iteritems(kwargs):
k_pop = query_pop(k, myself)
if k_pop:
r_query[k_pop] = value
if not r_query:
return False
for key in r_query:
if hasattr(self, key):
match |= match_query(getattr(self, key), r_query[key])
if not match:
for attr in dir(self):
obj = getattr(self, attr)
if isinstance(obj, JObject):
match |= obj.search(**r_query)
return match
|
Query this object (and its descendants).
Parameters
----------
kwargs
Each `(key, value)` pair encodes a search field in `key`
and a target value in `value`.
`key` must be a string, and should correspond to a property in
the JAMS object hierarchy, e.g., 'Annotation.namespace` or `email`
`value` must be either an object (tested for equality), a
string describing a search pattern (regular expression), or a
lambda function which evaluates to `True` if the candidate
object matches the search criteria and `False` otherwise.
Returns
-------
match : bool
`True` if any of the search keys match the specified value,
`False` otherwise, or if the search keys do not exist
within the object.
Examples
--------
>>> J = jams.JObject(foo=5, needle='quick brown fox')
>>> J.search(needle='.*brown.*')
True
>>> J.search(needle='.*orange.*')
False
>>> J.search(badger='.*brown.*')
False
>>> J.search(foo=5)
True
>>> J.search(foo=10)
False
>>> J.search(foo=lambda x: x < 10)
True
>>> J.search(foo=lambda x: x > 10)
False
|
24,306 |
def _load_root_directory(self):
kwargs = self._req_directory(0)
self._root_directory = Directory(api=self, **kwargs)
|
Load root directory, which has a cid of 0
|
24,307 |
def forbild(space, resolution=False, ear=True, value_type=,
scale=):
def transposeravel(arr):
return arr.T.ravel()
if not isinstance(space, DiscreteLp):
raise TypeError()
if space.ndim != 2:
raise TypeError()
scale, scale_in = str(scale).lower(), scale
value_type, value_type_in = str(value_type).lower(), value_type
phantomE, phantomC = _analytical_forbild_phantom(resolution, ear)
xcoord, ycoord = space.points().T
if scale == :
xcoord = ((xcoord - space.min_pt[0]) /
(space.max_pt[0] - space.min_pt[0]))
xcoord = 25.8 * xcoord - 12.8
ycoord = ((ycoord - space.min_pt[1]) /
(space.max_pt[1] - space.min_pt[1]))
ycoord = 25.8 * ycoord - 12.8
elif scale == :
pass
elif scale == :
xcoord *= 100.0
ycoord *= 100.0
elif scale == :
xcoord /= 10.0
ycoord /= 10.0
else:
raise ValueError(.format(scale_in))
image = np.zeros(space.size)
nclipinfo = 0
for k in range(phantomE.shape[0]):
Vx0 = np.array([transposeravel(xcoord) - phantomE[k, 0],
transposeravel(ycoord) - phantomE[k, 1]])
D = np.array([[1 / phantomE[k, 2], 0],
[0, 1 / phantomE[k, 3]]])
phi = np.deg2rad(phantomE[k, 4])
Q = np.array([[np.cos(phi), np.sin(phi)],
[-np.sin(phi), np.cos(phi)]])
f = phantomE[k, 5]
nclip = int(phantomE[k, 6])
equation1 = np.sum(D.dot(Q).dot(Vx0) ** 2, axis=0)
i = (equation1 <= 1.0)
for _ in range(nclip):
d = phantomC[0, nclipinfo]
psi = np.deg2rad(phantomC[1, nclipinfo])
equation2 = np.array([np.cos(psi), np.sin(psi)]).dot(Vx0)
i &= (equation2 < d)
nclipinfo += 1
image[i] += f
if value_type == :
materials = np.zeros(space.size, dtype=space.dtype)
materials[(image > 1.043) & (image <= 1.047)] = 1
materials[(image > 1.047) & (image <= 1.048)] = 2
materials[(image > 1.048) & (image <= 1.052)] = 3
materials[(image > 1.052) & (image <= 1.053)] = 4
materials[(image > 1.053) & (image <= 1.058)] = 5
materials[(image > 1.058) & (image <= 1.062)] = 6
materials[image > 1.75] = 7
return space.element(materials.reshape(space.shape))
elif value_type == :
return space.element(image.reshape(space.shape))
else:
raise ValueError(.format(value_type_in))
|
Standard FORBILD phantom in 2 dimensions.
The FORBILD phantom is intended for testing CT algorithms and is intended
to be similar to a human head.
The phantom is defined using the following materials:
========================= ===== ================
Material Index Density (g/cm^3)
========================= ===== ================
Air 0 0.0000
Cerebrospinal fluid (CSF) 1 1.0450
Small less dense sphere 2 1.0475
Brain 3 1.0500
Small more dense sphere 4 1.0525
Blood 5 1.0550
Eyes 6 1.0600
Bone 7 1.8000
========================= ===== ================
Parameters
----------
space : `DiscreteLp`
The space in which the phantom should be corrected. Needs to be two-
dimensional.
resolution : bool, optional
If ``True``, insert a small resolution test pattern to the left.
ear : bool, optional
If ``True``, insert an ear-like structure to the right.
value_type : {'density', 'materials'}, optional
The format the phantom should be given in.
'density' returns floats in the range [0, 1.8] (g/cm^3)
'materials' returns indices in the range [0, 7].
scale : {'auto', 'cm', 'meters', 'mm'}, optional
Controls how ``space`` should be rescaled to fit the definition of
the forbild phantom, which is defined on the square
[-12.8, 12.8] x [-12.8, 12.8] cm.
* ``'auto'`` means that space is rescaled to fit exactly. The space is
also centered at [0, 0].
* ``'cm'`` means the dimensions of the space should be used as-is.
* ``'m'`` means all dimensions of the space are multiplied by 100.
* ``'mm'`` means all dimensions of the space are divided by 10.
Returns
-------
forbild : ``space``-element
FORBILD phantom discretized by ``space``.
See Also
--------
shepp_logan : A simpler phantom for similar purposes, also working in 3d.
References
----------
.. _FORBILD phantom: www.imp.uni-erlangen.de/phantoms/head/head.html
.. _algorithm: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3426508/
|
24,308 |
def sector(self, start_ray, end_ray, start_distance=None, end_distance=None, units=):
if self.dataset is None:
raise ValueError()
ray_max, distance_max = self.dataset.shape
if start_ray > ray_max:
raise ValueError()
if start_ray < 0:
raise ValueError()
if start_distance is None:
start_distance_index = 0
else:
if units == :
start_distance_index = start_distance
elif units == :
try:
rscale = next(self.attr_gen()).value
except:
raise MissingMetadataError
start_distance_index = int(start_distance / rscale)
if end_distance is None:
end_distance_index = self.dataset.shape[1]
else:
if units == :
end_distance_index = end_distance
elif units == :
end_distance_index = int(end_distance / rscale)
if end_ray is None:
sector = self.dataset[start_ray, start_distance_index:end_distance_index]
else:
if start_ray <= end_ray:
sector = self.dataset[start_ray:end_ray+1, start_distance_index:end_distance_index]
else:
sector1 = self.dataset[start_ray:, start_distance_index:end_distance_index]
sector2 = self.dataset[:end_ray+1, start_distance_index:end_distance_index]
sector = np.concatenate((sector1, sector2), axis=0)
return sector
|
Slices a sector from the selected dataset.
Slice contains the start and end rays. If start and end rays are equal
one ray is returned. If the start_ray is greater than the end_ray
slicing continues over the 359-0 border.
Parameters
----------
start_ray : int
Starting ray of of the slice first ray is 0
end_ray : int
End ray of the slice, last ray is 359
Keywords
--------
start_distance : int
Starting distance of the slice, if not defined sector starts
form zero
end_distance : int
Ending distance of the slice, if not defined sector continues to
the end last ray of the dataset
units : str
Units used in distance slicing. Option 'b' means that bin number
is used as index. Option 'm' means that meters are used and the
slicing index is calculated using bin width.
Returns
-------
sector : ndarray
Numpy array containing the sector values
Examples
--------
Get one ray from the selected dataset
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> ray = pvol.sector(10, 10)
Get sector from selected dataset, rays from 100 to 200
at distances from 5 km to 10 km.
>>> pvol = odimPVOL('pvol.h5')
>>> pvol.select_dataset('A', 'DBZH')
>>> sector = pvol.sector(100, 200, 5000, 10000)
|
24,309 |
def load_model(self, name=None):
if self.clobber:
return False
if name is None:
name = self.name
file = os.path.join(self.dir, % name)
if os.path.exists(file):
if not self.is_parent:
log.info("Loading ..." % name)
try:
data = np.load(file)
for key in data.keys():
try:
setattr(self, key, data[key][()])
except NotImplementedError:
pass
pl.close()
return True
except:
log.warn("Error loading ." % name)
exctype, value, tb = sys.exc_info()
for line in traceback.format_exception_only(exctype, value):
ln = line.replace(, )
log.warn(ln)
os.rename(file, file + )
if self.is_parent:
raise Exception(
% (self.name, self.ID))
return False
|
Loads a saved version of the model.
|
24,310 |
def open(filename, mode="rb",
format=None, check=-1, preset=None, filters=None,
encoding=None, errors=None, newline=None):
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument not supported in binary mode")
if errors is not None:
raise ValueError("Argument not supported in binary mode")
if newline is not None:
raise ValueError("Argument not supported in binary mode")
lz_mode = mode.replace("t", "")
binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
preset=preset, filters=filters)
if "t" in mode:
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
|
Open an LZMA-compressed file in binary or text mode.
filename can be either an actual file name (given as a str or bytes object),
in which case the named file is opened, or it can be an existing file object
to read from or write to.
The mode argument can be "r", "rb" (default), "w", "wb", "a", or "ab" for
binary mode, or "rt", "wt" or "at" for text mode.
The format, check, preset and filters arguments specify the compression
settings, as for LZMACompressor, LZMADecompressor and LZMAFile.
For binary mode, this function is equivalent to the LZMAFile constructor:
LZMAFile(filename, mode, ...). In this case, the encoding, errors and
newline arguments must not be provided.
For text mode, a LZMAFile object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error handling
behavior, and line ending(s).
|
24,311 |
def generate_tree_path(fileDigest, depth):
if(depth < 0):
raise Exception("depth level can not be negative")
if(os.path.split(fileDigest)[1] != fileDigest):
raise Exception("fileDigest cannot contain path separator")
min = (2**(depth + 1)) - 1
if(len(fileDigest) < min):
raise Exception("fileDigest too short for the given depth")
path = ""
index = 0
for p in range(1, depth + 1):
jump = 2**p
path = os.path.join(path, fileDigest[index:index + jump])
index += jump
path = os.path.join(path, fileDigest[index:])
return path
|
Generate a relative path from the given fileDigest
relative path has a numbers of directories levels according to @depth
Args:
fileDigest -- digest for which the relative path will be generate
depth -- number of levels to use in relative path generation
Returns:
relative path for the given digest
|
24,312 |
def _get_memory_banks_listed_in_dir(path):
return [int(entry[4:]) for entry in os.listdir(path) if entry.startswith()]
|
Get all memory banks the kernel lists in a given directory.
Such a directory can be /sys/devices/system/node/ (contains all memory banks)
or /sys/devices/system/cpu/cpu*/ (contains all memory banks on the same NUMA node as that core).
|
24,313 |
def read(self, source_path):
with pelican_open(source_path) as text:
text_lines = list(text.splitlines())
header, content = self._separate_header_and_content(text_lines)
metadatas = self._parse_metadatas(header)
metadatas_processed = {
key
: self.process_metadata(key, value)
for key, value in metadatas.items()
}
content_html = convert_html("\n".join(content),
highlight=self.code_highlight)
return content_html, metadatas_processed
|
Parse content and metadata of Org files
Keyword Arguments:
source_path -- Path to the Org file to parse
|
24,314 |
def save_token(token, domain=, token_file=None):
token_file = token_file or get_token_filename()
if os.path.exists(token_file):
saved_tokens = json.load(open(token_file))
else:
saved_tokens = {}
saved_tokens[domain] = token
directory, filename = os.path.split(token_file)
if directory and not os.path.exists(directory):
os.makedirs(directory)
with open(token_file, ) as f:
json.dump(saved_tokens, f)
|
Take a long-lived API token and store it to a local file. Long-lived
tokens can be retrieved through the UI. Optional arguments are the
domain for which the token is valid and the file in which to store the
token.
|
24,315 |
def convert_timedelta(duration):
days, seconds = duration.days, duration.seconds
hours = seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
return days, hours, minutes, seconds
|
Summary:
Convert duration into component time units
Args:
:duration (datetime.timedelta): time duration to convert
Returns:
days, hours, minutes, seconds | TYPE: tuple (integers)
|
24,316 |
def create_dataset(parent, path, overwrite=False, **kwargs):
if path in parent and overwrite:
del parent[path]
try:
return parent.create_dataset(path, **kwargs)
except RuntimeError as exc:
if str(exc) == :
exc.args = (
.format(str(exc), path),)
raise
|
Create a new dataset inside the parent HDF5 object
Parameters
----------
parent : `h5py.Group`, `h5py.File`
the object in which to create a new dataset
path : `str`
the path at which to create the new dataset
overwrite : `bool`
if `True`, delete any existing dataset at the desired path,
default: `False`
**kwargs
other arguments are passed directly to
:meth:`h5py.Group.create_dataset`
Returns
-------
dataset : `h5py.Dataset`
the newly created dataset
|
24,317 |
def chess960_pos(self) -> Optional[int]:
if self.occupied_co[WHITE] != BB_RANK_1 | BB_RANK_2:
return None
if self.occupied_co[BLACK] != BB_RANK_7 | BB_RANK_8:
return None
if self.pawns != BB_RANK_2 | BB_RANK_7:
return None
if self.promoted:
return None
brnqk = [self.bishops, self.rooks, self.knights, self.queens, self.kings]
if [popcount(pieces) for pieces in brnqk] != [4, 4, 4, 2, 2]:
return None
if any((BB_RANK_1 & pieces) << 56 != BB_RANK_8 & pieces for pieces in brnqk):
return None
x = self.bishops & (2 + 8 + 32 + 128)
if not x:
return None
bs1 = (lsb(x) - 1) // 2
cc_pos = bs1
x = self.bishops & (1 + 4 + 16 + 64)
if not x:
return None
bs2 = lsb(x) * 2
cc_pos += bs2
q = 0
qf = False
n0 = 0
n1 = 0
n0f = False
n1f = False
rf = 0
n0s = [0, 4, 7, 9]
for square in range(A1, H1 + 1):
bb = BB_SQUARES[square]
if bb & self.queens:
qf = True
elif bb & self.rooks or bb & self.kings:
if bb & self.kings:
if rf != 1:
return None
else:
rf += 1
if not qf:
q += 1
if not n0f:
n0 += 1
elif not n1f:
n1 += 1
elif bb & self.knights:
if not qf:
q += 1
if not n0f:
n0f = True
elif not n1f:
n1f = True
if n0 < 4 and n1f and qf:
cc_pos += q * 16
krn = n0s[n0] + n1
cc_pos += krn * 96
return cc_pos
else:
return None
|
Gets the Chess960 starting position index between 0 and 959
or ``None``.
|
24,318 |
def set_properties(self, properties):
if not isinstance(properties, dict):
raise TypeError("Waiting for dictionary")
for forbidden_key in OBJECTCLASS, SERVICE_ID:
try:
del properties[forbidden_key]
except KeyError:
pass
to_delete = []
for key, value in properties.items():
if self.__properties.get(key) == value:
to_delete.append(key)
for key in to_delete:
del properties[key]
if not properties:
return
try:
properties[SERVICE_RANKING] = int(properties[SERVICE_RANKING])
except (ValueError, TypeError):
del properties[SERVICE_RANKING]
except KeyError:
pass
with self.__reference._props_lock:
previous = self.__properties.copy()
self.__properties.update(properties)
if self.__reference.needs_sort_update():
self.__update_callback(self.__reference)
event = ServiceEvent(
ServiceEvent.MODIFIED, self.__reference, previous
)
self.__framework._dispatcher.fire_service_event(event)
|
Updates the service properties
:param properties: The new properties
:raise TypeError: The argument is not a dictionary
|
24,319 |
def get_bin_query_session(self, proxy):
if not self.supports_bin_query():
raise errors.Unimplemented()
return sessions.BinQuerySession(proxy=proxy, runtime=self._runtime)
|
Gets the bin query session.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.resource.BinQuerySession) - a ``BinQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_bin_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_bin_query()`` is ``true``.*
|
24,320 |
def get_file_hexdigest(filename, blocksize=1024*1024*10):
if hashlib.__name__ == :
m = hashlib.md5()
else:
m = hashlib.new()
fd = open(filename, )
while True:
data = fd.read(blocksize)
if len(data) == 0:
break
m.update(data)
fd.close()
return m.hexdigest()
|
Get a hex digest of a file.
|
24,321 |
def initreadtxt(self, idftxt):
iddfhandle = StringIO(iddcurrent.iddtxt)
if self.getiddname() == None:
self.setiddname(iddfhandle)
idfhandle = StringIO(idftxt)
self.idfname = idfhandle
self.read()
|
Use the current IDD and read an IDF from text data. If the IDD has not
yet been initialised then this is done first.
Parameters
----------
idftxt : str
Text representing an IDF file.
|
24,322 |
def dot(vec1, vec2):
if isinstance(vec1, Vector2) \
and isinstance(vec2, Vector2):
return ((vec1.X * vec2.X) + (vec1.Y * vec2.Y))
else:
raise TypeError("vec1 and vec2 must be Vector2's")
|
Calculate the dot product between two Vectors
|
24,323 |
def to_ped(self):
ped_header = [
,
,
,
,
,
,
]
extra_headers = [
,
,
,
]
for individual_id in self.individuals:
individual = self.individuals[individual_id]
for info in individual.extra_info:
if info in extra_headers:
if info not in ped_header:
ped_header.append(info)
self.logger.debug("Ped headers found: {0}".format(
.join(ped_header)
))
yield .join(ped_header)
for family_id in self.families:
for individual_id in self.families[family_id].individuals:
individual = self.families[family_id].individuals[individual_id].to_json()
ped_info = []
ped_info.append(individual[])
ped_info.append(individual[])
ped_info.append(individual[])
ped_info.append(individual[])
ped_info.append(individual[])
ped_info.append(individual[])
if len(ped_header) > 6:
for header in ped_header[6:]:
ped_info.append(individual[].get(header, ))
yield .join(ped_info)
|
Return a generator with the info in ped format.
Yields:
An iterator with the family info in ped format
|
24,324 |
def merge_into_group(self, group):
super(VerboseMixin, self).merge_into_group(group)
group.verbose_name = self.verbose_name
group.help_text = self.help_text
|
Redefines :meth:`~candv.base.Constant.merge_into_group` and adds
``verbose_name`` and ``help_text`` attributes to the target group.
|
24,325 |
def main(sample_id, trace_file, workdir):
stats_suffix = ".stats.json"
stats_path = join(workdir, sample_id + stats_suffix)
trace_path = join(workdir, trace_file)
logger.info("Starting pipeline status routine")
logger.debug("Checking for previous pipeline status data")
stats_array = get_previous_stats(stats_path)
logger.info("Stats JSON object set to : {}".format(stats_array))
tag = " getStats"
logger.debug("Tag variable set to: {}".format(tag))
logger.info("Starting parsing of trace file: {}".format(trace_path))
with open(trace_path) as fh:
header = next(fh).strip().split()
logger.debug("Header set to: {}".format(header))
for line in fh:
fields = line.strip().split("\t")
if tag in fields[2] and fields[3] == "COMPLETED":
logger.debug(
"Parsing trace line with COMPLETED status: {}".format(
line))
current_json = get_json_info(fields, header)
stats_array[fields[0]] = current_json
else:
logger.debug(
"Ignoring trace line without COMPLETED status"
" or stats specific tag: {}".format(
line))
with open(join(stats_path), "w") as fh, open(".report.json", "w") as rfh:
fh.write(json.dumps(stats_array, separators=(",", ":")))
rfh.write(json.dumps(stats_array, separators=(",", ":")))
|
Parses a nextflow trace file, searches for processes with a specific tag
and sends a JSON report with the relevant information
The expected fields for the trace file are::
0. task_id
1. process
2. tag
3. status
4. exit code
5. start timestamp
6. container
7. cpus
8. duration
9. realtime
10. queue
11. cpu percentage
12. memory percentage
13. real memory size of the process
14. virtual memory size of the process
Parameters
----------
trace_file : str
Path to the nextflow trace file
|
24,326 |
def parse_image_response(self, response):
if in response.headers.get():
xml = xmltodict.parse(response.text)
self.analyze_reply_code(xml_response_dict=xml)
obj = self._response_object_from_header(
obj_head_dict=response.headers,
content=response.content)
return obj
|
Parse a single object from the RETS feed
:param response: The response from the RETS server
:return: Object
|
24,327 |
def value(self):
binary = UBInt8(self.sub_type).pack() + self.sub_value.pack()
return BinaryData(binary)
|
Return sub type and sub value as binary data.
Returns:
:class:`~pyof.foundation.basic_types.BinaryData`:
BinaryData calculated.
|
24,328 |
def create_annotation(xml_file, from_fasst):
xml_file = Path(xml_file)
try:
mat = loadmat(str(from_fasst), variable_names=, struct_as_record=False,
squeeze_me=True)
except ValueError:
raise UnrecognizedFormat(str(from_fasst) + )
D = mat[]
info = D.other.info
score = D.other.CRC.score
microsecond, second = modf(info.hour[2])
start_time = datetime(*info.date, int(info.hour[0]), int(info.hour[1]),
int(second), int(microsecond * 1e6))
first_sec = score[3, 0][0]
last_sec = score[0, 0].shape[0] * score[2, 0]
root = Element()
root.set(, VERSION)
info = SubElement(root, )
x = SubElement(info, )
x.text = D.other.info.fname
x = SubElement(info, )
x.text = D.other.info.fname
x = SubElement(info, )
x.text = start_time.isoformat()
x = SubElement(info, )
x.text = str(int(first_sec))
x = SubElement(info, )
x.text = str(int(last_sec))
xml = parseString(tostring(root))
with xml_file.open() as f:
f.write(xml.toxml())
annot = Annotations(xml_file)
n_raters = score.shape[1]
for i_rater in range(n_raters):
rater_name = score[1, i_rater]
epoch_length = int(score[2, i_rater])
annot.add_rater(rater_name, epoch_length=epoch_length)
for epoch_start, epoch in enumerate(score[0, i_rater]):
if isnan(epoch):
continue
annot.set_stage_for_epoch(epoch_start * epoch_length,
FASST_STAGE_KEY[int(epoch)], save=False)
annot.save()
return annot
|
Create annotations by importing from FASST sleep scoring file.
Parameters
----------
xml_file : path to xml file
annotation file that will be created
from_fasst : path to FASST file
.mat file containing the scores
Returns
-------
instance of Annotations
TODO
----
Merge create_annotation and create_empty_annotations
|
24,329 |
def _split_markdown(text_url, tld_pos):
left_bracket_pos = text_url.find()
if left_bracket_pos > tld_pos-3:
return text_url
right_bracket_pos = text_url.find()
if right_bracket_pos < tld_pos:
return text_url
middle_pos = text_url.rfind("](")
if middle_pos > tld_pos:
return text_url[left_bracket_pos+1:middle_pos]
return text_url
|
Split markdown URL. There is an issue wen Markdown URL is found.
Parsing of the URL does not stop on right place so wrongly found URL
has to be split.
:param str text_url: URL that we want to extract from enclosure
:param int tld_pos: position of TLD
:return: URL that has removed enclosure
:rtype: str
|
24,330 |
def get(self, field):
if field in (, , ):
return self.data[field]
else:
return self.data.get(, {})[field]
|
Returns the value of a user field.
:param str field:
The name of the user field.
:returns: str -- the value
|
24,331 |
def assert_is_instance(obj, cls, msg_fmt="{msg}"):
if not isinstance(obj, cls):
msg = "{!r} is an instance of {!r}, expected {!r}".format(
obj, obj.__class__, cls
)
types = cls if isinstance(cls, tuple) else (cls,)
fail(msg_fmt.format(msg=msg, obj=obj, types=types))
|
Fail if an object is not an instance of a class or tuple of classes.
>>> assert_is_instance(5, int)
>>> assert_is_instance('foo', (str, bytes))
>>> assert_is_instance(5, str)
Traceback (most recent call last):
...
AssertionError: 5 is an instance of <class 'int'>, expected <class 'str'>
The following msg_fmt arguments are supported:
* msg - the default error message
* obj - object to test
* types - tuple of types tested against
|
24,332 |
def _check_constant_params(
a, has_const=False, use_const=True, rtol=1e-05, atol=1e-08
):
if all((has_const, use_const)):
if not _confirm_constant(a):
raise ValueError(
"Data does not contain a constant; specify" " has_const=False"
)
k = a.shape[-1] - 1
elif not any((has_const, use_const)):
if _confirm_constant(a):
raise ValueError(
"Data already contains a constant; specify" " has_const=True"
)
k = a.shape[-1]
elif not has_const and use_const:
c1 = np.allclose(a.mean(axis=0), b=0.0, rtol=rtol, atol=atol)
c2 = np.allclose(a.std(axis=0), b=1.0, rtol=rtol, atol=atol)
if c1 and c2:
raise ValueError(
"Data appears to be ~N(0,1). Specify" " use_constant=False."
)
try:
a = add_constant(a, has_constant="raise")
except ValueError as e:
raise ValueError(
"X data already contains a constant; please specify"
" has_const=True"
) from e
k = a.shape[-1] - 1
else:
raise ValueError("`use_const` == False implies has_const is False.")
return k, a
|
Helper func to interaction between has_const and use_const params.
has_const use_const outcome
--------- --------- -------
True True Confirm that a has constant; return a
False False Confirm that a doesn't have constant; return a
False True Confirm that a doesn't have constant; add constant
True False ValueError
|
24,333 |
def service_set_tag(path, service_name, tag):
compose_result, err = __load_docker_compose(path)
if err:
return err
services = compose_result[][]
if service_name not in services:
return __standardize_result(False,
.format(service_name),
None, None)
if not in services[service_name]:
return __standardize_result(False,
.format(service_name),
None, None)
image = services[service_name][].split()[0]
services[service_name][] = .format(image, tag)
return __dump_compose_file(path, compose_result,
.format(service_name, tag),
already_existed=True)
|
Change the tag of a docker-compose service
This does not pull or up the service
This wil re-write your yaml file. Comments will be lost. Indentation is set to 2 spaces
path
Path where the docker-compose file is stored on the server
service_name
Name of the service to remove
tag
Name of the tag (often used as version) that the service image should have
CLI Example:
.. code-block:: bash
salt myminion dockercompose.service_create /path/where/docker-compose/stored service_name tag
|
24,334 |
def human_size(size, a_kilobyte_is_1024_bytes=False, precision=1, target=None):
if size < 0:
raise ValueError()
multiple = 1024 if a_kilobyte_is_1024_bytes else 1000
for suffix in SUFFIXES[multiple]:
if target:
if suffix == target:
break
size /= float(multiple)
else:
if size >= multiple:
size /= float(multiple)
else:
break
return .format(size, precision, suffix)
|
Convert a file size to human-readable form.
Keyword arguments:
size -- file size in bytes
a_kilobyte_is_1024_bytes -- if True (default), use multiples of 1024
if False, use multiples of 1000
Returns: string
Credit: <http://diveintopython3.org/your-first-python-program.html>
>>> print(human_size(1000000000000, True))
931.3GiB
>>> print(human_size(1000000000000))
1.0Tb
>>> print(human_size(300))
300.0
|
24,335 |
def export(self, filename, offset=0, length=None):
self.__validate_offset(filename=filename, offset=offset, length=length)
with open(filename, ) as f:
if length is None:
length = len(self.data) - offset
if offset > 0:
output = self.data[offset:length]
else:
output = self.data[:length]
f.write(output)
|
Exports byte array to specified destination
Args:
filename (str): destination to output file
offset (int): byte offset (default: 0)
|
24,336 |
def permitted_query(self, query, group, operations):
session = query.session
models = session.router
user = group.user
if user.is_superuser:
return query
roles = group.roles.query()
roles = group.roles.query()
throgh_model = models.role.permissions.model
models[throgh_model].filter(role=roles,
permission__model_type=query.model,
permission__operations=operations)
permissions = router.permission.filter(model_type=query.model,
level=operations)
owner_query = query.filter(user=user)
roles = models.role.filter(model_type=query.model, level__ge=level)
groups = Role.groups.throughquery(
session).filter(role=roles).get_field()
if user.groups.filter(id=groups).count():
permitted = models.instancerole.filter(
role=roles).get_field()
return owner_query.union(model.objects.filter(id=permitted))
else:
return owner_query
|
Change the ``query`` so that only instances for which
``group`` has roles with permission on ``operations`` are returned.
|
24,337 |
def get_devices(self):
if self.__util is None:
import tango
db = tango.Database()
else:
db = self.__util.get_database()
server = self.server_instance
dev_list = db.get_device_class_list(server)
class_map, dev_map = {}, {}
for class_name, dev_name in zip(dev_list[1::2], dev_list[::2]):
dev_names = class_map.get(class_name)
if dev_names is None:
class_map[class_name] = dev_names = []
dev_name = dev_name.lower()
dev_names.append(dev_name)
dev_map[dev_name] = class_name
return class_map, dev_map
|
Helper that retuns a dict of devices for this server.
:return:
Returns a tuple of two elements:
- dict<tango class name : list of device names>
- dict<device names : tango class name>
:rtype: tuple<dict, dict>
|
24,338 |
def Miqueu(T, Tc, Vc, omega):
rs constant and the Boltsman constant.
Internal units of volume are mL/mol and mN/m. However, either a typo
is in the article or author
Vc = Vc*1E6
t = 1.-T/Tc
sigma = k*Tc*(N_A/Vc)**(2/3.)*(4.35 + 4.14*omega)*t**1.26*(1+0.19*t**0.5 - 0.25*t)*10000
return sigma
|
r'''Calculates air-water surface tension using the methods of [1]_.
.. math::
\sigma = k T_c \left( \frac{N_a}{V_c}\right)^{2/3}
(4.35 + 4.14 \omega)t^{1.26}(1+0.19t^{0.5} - 0.487t)
Parameters
----------
T : float
Temperature of fluid [K]
Tc : float
Critical temperature of fluid [K]
Vc : float
Critical volume of fluid [m^3/mol]
omega : float
Acentric factor for fluid, [-]
Returns
-------
sigma : float
Liquid surface tension, N/m
Notes
-----
Uses Avogadro's constant and the Boltsman constant.
Internal units of volume are mL/mol and mN/m. However, either a typo
is in the article or author's work, or my value of k is off by 10; this is
corrected nonetheless.
Created with 31 normal fluids, none polar or hydrogen bonded. Has an
AARD of 3.5%.
Examples
--------
Bromotrifluoromethane, 2.45 mN/m
>>> Miqueu(300., 340.1, 0.000199, 0.1687)
0.003474099603581931
References
----------
.. [1] Miqueu, C, D Broseta, J Satherley, B Mendiboure, J Lachaise, and
A Graciaa. "An Extended Scaled Equation for the Temperature Dependence
of the Surface Tension of Pure Compounds Inferred from an Analysis of
Experimental Data." Fluid Phase Equilibria 172, no. 2 (July 5, 2000):
169-82. doi:10.1016/S0378-3812(00)00384-8.
|
24,339 |
def upload_files(self, container, src_dst_map, content_type=None):
if not content_type:
content_type = "application/octet.stream"
url = self.make_url(container, None, None)
headers = self._base_headers
multi_files = []
try:
for src_path in src_dst_map:
dst_name = src_dst_map[src_path]
if not dst_name:
dst_name = os.path.basename(src_path)
multi_files.append(
(, (dst_name, open(src_path, ), content_type)))
rsp = requests.post(url, headers=headers, files=multi_files,
timeout=self._timeout)
except requests.exceptions.ConnectionError as e:
RestHttp._raise_conn_error(e)
finally:
for n, info in multi_files:
dst, f, ctype = info
f.close()
return self._handle_response(rsp)
|
Upload multiple files.
|
24,340 |
def delete(self):
del self.bg.widget
del self.bg
del self._pos
del self._size
self.actions = {}
for e_type,e_handlers in self.peng.eventHandlers.items():
if True or e_type in eh:
to_del = []
for e_handler in e_handlers:
if isinstance(e_handler,weakref.ref):
if super(weakref.WeakMethod,e_handler).__call__() is self:
to_del.append(e_handler)
elif e_handler is self:
to_del.append(e_handler)
for d in to_del:
try:
del e_handlers[e_handlers.index(d)]
except Exception:
import traceback;traceback.print_exc()
|
Deletes resources of this widget that require manual cleanup.
Currently removes all actions, event handlers and the background.
The background itself should automatically remove all vertex lists to avoid visual artifacts.
Note that this method is currently experimental, as it seems to have a memory leak.
|
24,341 |
def _parse_resolution(self, tokens):
return self.resolution_parser.parse(self.parse_keyword(Keyword.GROUP_BY, tokens))
|
Parse resolution from the GROUP BY statement.
E.g. GROUP BY time(10s) would mean a 10 second resolution
:param tokens:
:return:
|
24,342 |
def replace_s(self, c_bra, c_ket, s):
adjustment = len(s) - (c_ket - c_bra)
self.current = self.current[0:c_bra] + s + self.current[c_ket:]
self.limit += adjustment
if self.cursor >= c_ket:
self.cursor += adjustment
elif self.cursor > c_bra:
self.cursor = c_bra
return adjustment
|
to replace chars between c_bra and c_ket in self.current by the
chars in s.
@type c_bra int
@type c_ket int
@type s: string
|
24,343 |
def custom_role(self):
if self.api and self.custom_role_id:
return self.api._get_custom_role(self.custom_role_id)
|
| Comment: A custom role if the user is an agent on the Enterprise plan
|
24,344 |
def isRegionValid(self):
screens = PlatformManager.getScreenDetails()
for screen in screens:
s_x, s_y, s_w, s_h = screen["rect"]
if self.x+self.w >= s_x and s_x+s_w >= self.x and self.y+self.h >= s_y and s_y+s_h >= self.y:
return True
return False
|
Returns false if the whole region is not even partially inside any screen, otherwise true
|
24,345 |
def is_layer_compatible(self, layer, layer_purpose=None, keywords=None):
if not layer_purpose:
layer_purpose = self.get_parent_mode_constraints()[0][]
if layer_geometry != self.get_layer_geometry_key(layer):
return False
if not keywords or not in keywords:
return True
keyword_version = str(keywords[])
if not is_keyword_version_supported(keyword_version):
return True
if (layer_purpose in keywords
and keywords[layer_purpose] != subcategory):
return False
return True
|
Validate if a given layer is compatible for selected IF
as a given layer_purpose
:param layer: The layer to be validated
:type layer: QgsVectorLayer | QgsRasterLayer
:param layer_purpose: The layer_purpose the layer is validated for
:type layer_purpose: None, string
:param keywords: The layer keywords
:type keywords: None, dict
:returns: True if layer is appropriate for the selected role
:rtype: boolean
|
24,346 |
def mergeCatalogs(catalog_list):
for c in catalog_list:
if c.data.dtype.names != catalog_list[0].data.dtype.names:
msg = "Catalog data columns not the same."
raise Exception(msg)
data = np.concatenate([c.data for c in catalog_list])
config = catalog_list[0].config
return Catalog(config,data=data)
|
Merge a list of Catalogs.
Parameters:
-----------
catalog_list : List of Catalog objects.
Returns:
--------
catalog : Combined Catalog object
|
24,347 |
def binary_xloss(logits, labels, ignore=None):
logits, labels = flatten_binary_scores(logits, labels, ignore)
loss = StableBCELoss()(logits, Variable(labels.float()))
return loss
|
Binary Cross entropy loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
ignore: void class id
|
24,348 |
def get_content_models(self):
models = []
for model in self.concrete_model.get_content_models():
try:
admin_url(model, "add")
except NoReverseMatch:
continue
else:
setattr(model, "meta_verbose_name", model._meta.verbose_name)
setattr(model, "add_url", admin_url(model, "add"))
models.append(model)
return models
|
Return all subclasses that are admin registered.
|
24,349 |
def to_time(self, phase, component=None, t0=, **kwargs):
if kwargs.get(, False):
raise ValueError("support for phshift was removed as of 2.1. Please pass t0 instead.")
ephem = self.get_ephemeris(component=component, t0=t0, **kwargs)
if isinstance(phase, list):
phase = np.array(phase)
t0 = ephem.get(, 0.0)
period = ephem.get(, 1.0)
dpdt = ephem.get(, 0.0)
if dpdt != 0:
time = t0 + 1./dpdt*(np.exp(dpdt*(phase))-period)
else:
time = t0 + (phase)*period
return time
|
Get the time(s) of a phase(s) for a given ephemeris
:parameter phase: phase to convert to times (should be in
same system as t0s)
:type phase: float, list, or array
` :parameter str component: component for which to get the ephemeris.
If not given, component will default to the top-most level of the
current hierarchy
:parameter t0: qualifier of the parameter to be used for t0
:type t0: str
:parameter **kwargs: any value passed through kwargs will override the
ephemeris retrieved by component (ie period, t0, dpdt).
Note: be careful about units - input values will not be converted.
:return: time (float) or times (array)
|
24,350 |
def integrate_orbit(self, **time_spec):
pos = self.w0.xyz.decompose(self.units).value
vel = self.w0.v_xyz.decompose(self.units).value
w0 = np.ascontiguousarray(np.vstack((pos, vel)).T)
t = parse_time_specification(self.units, **time_spec)
ws = _direct_nbody_dop853(w0, t, self._ext_ham,
self.particle_potentials)
pos = np.rollaxis(np.array(ws[..., :3]), axis=2)
vel = np.rollaxis(np.array(ws[..., 3:]), axis=2)
orbits = Orbit(
pos=pos * self.units[],
vel=vel * self.units[] / self.units[],
t=t * self.units[])
return orbits
|
Integrate the initial conditions in the combined external potential
plus N-body forces.
This integration uses the `~gala.integrate.DOPRI853Integrator`.
Parameters
----------
**time_spec
Specification of how long to integrate. See documentation
for `~gala.integrate.parse_time_specification`.
Returns
-------
orbit : `~gala.dynamics.Orbit`
The orbits of the particles.
|
24,351 |
def get_connection_logging(self, loadbalancer):
uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer)
resp, body = self.api.method_get(uri)
ret = body.get("connectionLogging", {}).get("enabled", False)
return ret
|
Returns the connection logging setting for the given load balancer.
|
24,352 |
def get_basic_profile(self, user_id, scope=):
profile = _get(
token=self.oauth.get_app_token(scope),
uri= + urllib.quote(user_id)
)
try:
return json.loads(profile)
except:
raise MxitAPIException()
|
Retrieve the Mxit user's basic profile
No user authentication required
|
24,353 |
def update_git_devstr(version, path=None):
try:
return version
|
Updates the git revision string if and only if the path is being imported
directly from a git working copy. This ensures that the revision number in
the version string is accurate.
|
24,354 |
def get_redshift(self, dist):
dist, input_is_array = ensurearray(dist)
try:
zs = self.nearby_d2z(dist)
except TypeError:
self.setup_interpolant()
zs = self.nearby_d2z(dist)
replacemask = numpy.isnan(zs)
if replacemask.any():
zs[replacemask] = self.faraway_d2z(dist[replacemask])
replacemask = numpy.isnan(zs)
if replacemask.any():
if not (dist > 0.).all() and numpy.isfinite(dist).all():
raise ValueError("distance must be finite and > 0")
zs[replacemask] = _redshift(dist[replacemask],
cosmology=self.cosmology)
return formatreturn(zs, input_is_array)
|
Returns the redshift for the given distance.
|
24,355 |
def get_games_by_season(self, season):
try:
season = int(season)
except ValueError:
raise FantasyDataError()
result = self._method_call("Games/{season}", "stats", season=season)
return result
|
Game schedule for a specified season.
|
24,356 |
def execute_deploy_clone_from_vm(self, si, logger, vcenter_data_model, reservation_id, deployment_params, cancellation_context, folder_manager):
self._prepare_deployed_apps_folder(deployment_params, si, logger, folder_manager, vcenter_data_model)
deploy_result = self.deployer.deploy_clone_from_vm(si, logger, deployment_params, vcenter_data_model,
reservation_id, cancellation_context)
return deploy_result
|
Calls the deployer to deploy vm from another vm
:param cancellation_context:
:param str reservation_id:
:param si:
:param logger:
:type deployment_params: DeployFromTemplateDetails
:param vcenter_data_model:
:return:
|
24,357 |
def type_names_mn(prefix, sizerangem, sizerangen):
lm = []
ln = []
namelist = []
for i in sizerangem: lm.append(i)
for i in sizerangen: ln.append(i)
validpairs = [tuple([m,n]) for m in lm for n in ln if m+n<=256]
for i in validpairs:
namelist.append(prefix + str(i[0]) + + str(i[1]))
return tuple(namelist)
|
Helper for type name generation, like: fixed0x8 .. fixed0x256
|
24,358 |
def get_shard_num_by_key_id(self, key_id):
m = hashlib.md5(str(key_id).encode()).hexdigest()
val = (int(m[0:2], 16) |
int(m[2:4], 16) << 8 |
int(m[4:6], 16) << 16 |
int(m[6:8], 16) << 24)
return val % self.num_shards()
|
get_shard_num_by_key_id returns the Redis shard number (zero-indexed)
given a key id.
Keyword arguments:
key_id -- the key id (e.g. '12345' or 'anythingcangohere')
This method is critical in how the Redis cluster sharding works. We
emulate twemproxy's md5 distribution algorithm.
|
24,359 |
def volume_infos(pool=None, volume=None, **kwargs):
result = {}
conn = __get_conn(**kwargs)
try:
backing_stores = _get_all_volumes_paths(conn)
domains = _get_domain(conn)
domains_list = domains if isinstance(domains, list) else [domains]
disks = {domain.name():
{node.get() for node
in ElementTree.fromstring(domain.XMLDesc(0)).findall()}
for domain in domains_list}
def _volume_extract_infos(vol):
types = [, , , , , ]
infos = vol.info()
used_by = []
if vol.path():
as_backing_store = {path for (path, all_paths) in backing_stores.items() if vol.path() in all_paths}
used_by = [vm_name for (vm_name, vm_disks) in disks.items()
if vm_disks & as_backing_store or vol.path() in vm_disks]
return {
: types[infos[0]] if infos[0] < len(types) else ,
: vol.key(),
: vol.path(),
: infos[1],
: infos[2],
: used_by,
}
pools = [obj for obj in conn.listAllStoragePools() if pool is None or obj.name() == pool]
vols = {pool_obj.name(): {vol.name(): _volume_extract_infos(vol)
for vol in pool_obj.listAllVolumes()
if (volume is None or vol.name() == volume) and _is_valid_volume(vol)}
for pool_obj in pools}
return {pool_name: volumes for (pool_name, volumes) in vols.items() if volumes}
except libvirt.libvirtError as err:
log.debug(, str(err))
finally:
conn.close()
return result
|
Provide details on a storage volume. If no volume name is provided, the infos
all the volumes contained in the pool are provided. If no pool is provided,
the infos of the volumes of all pools are output.
:param pool: libvirt storage pool name (default: ``None``)
:param volume: name of the volume to get infos from (default: ``None``)
:param connection: libvirt connection URI, overriding defaults
:param username: username to connect with, overriding defaults
:param password: password to connect with, overriding defaults
.. versionadded:: Neon
CLI Example:
.. code-block:: bash
salt "*" virt.volume_infos <pool> <volume>
|
24,360 |
def _set_area_range(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("range_address",area_range.area_range, yang_name="area-range", rest_name="range", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: u, u: None, u: None, u: u}}), is_container=, yang_name="area-range", rest_name="range", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: u, u: None, u: None, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__area_range = t
if hasattr(self, ):
self._set()
|
Setter method for area_range, mapped from YANG variable /rbridge_id/ipv6/router/ospf/area/area_range (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_area_range is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_area_range() directly.
YANG Description: If the ABR that connects the NSSA to other areas needs to summarize the routes in the NSSA before translating them into Type-5 LSAs and flooding them into the other areas, configure an address range. The ABR creates an aggregate value based on the address range. The aggregate value becomes the address that the ABR advertises instead of advertising the individual addresses represented by the aggregate.
|
24,361 |
def make_coins(self, collection, text, subreference="", lang=None):
if lang is None:
lang = self.__default_lang__
return "url_ver=Z39.88-2004"\
"&ctx_ver=Z39.88-2004"\
"&rft_val_fmt=info%3Aofi%2Ffmt%3Akev%3Amtx%3Abook"\
"&rft_id={cid}"\
"&rft.genre=bookitem"\
"&rft.btitle={title}"\
"&rft.edition={edition}"\
"&rft.au={author}"\
"&rft.atitle={pages}"\
"&rft.language={language}"\
"&rft.pages={pages}".format(
title=quote(str(text.get_title(lang))), author=quote(str(text.get_creator(lang))),
cid=url_for(".r_collection", objectId=collection.id, _external=True),
language=collection.lang, pages=quote(subreference), edition=quote(str(text.get_description(lang)))
)
|
Creates a CoINS Title string from information
:param collection: Collection to create coins from
:param text: Text/Passage object
:param subreference: Subreference
:param lang: Locale information
:return: Coins HTML title value
|
24,362 |
def get_tags(blog_id, username, password):
authenticate(username, password)
site = Site.objects.get_current()
return [tag_structure(tag, site)
for tag in Tag.objects.usage_for_queryset(
Entry.published.all(), counts=True)]
|
wp.getTags(blog_id, username, password)
=> tag structure[]
|
24,363 |
def post_dns_record(**kwargs):
if in kwargs:
f_kwargs = kwargs[]
del kwargs[]
kwargs.update(f_kwargs)
mandatory_kwargs = (, , , )
for i in mandatory_kwargs:
if kwargs[i]:
pass
else:
error = .format(i, kwargs[i], mandatory_kwargs)
raise SaltInvocationError(error)
domain = query(method=, droplet_id=kwargs[])
if domain:
result = query(
method=,
droplet_id=kwargs[],
command=,
args={: kwargs[], : kwargs[], : kwargs[]},
http_method=
)
return result
return False
|
Creates a DNS record for the given name if the domain is managed with DO.
|
24,364 |
def _win32_symlink2(path, link, allow_fallback=True, verbose=0):
if _win32_can_symlink():
return _win32_symlink(path, link, verbose)
else:
return _win32_junction(path, link, verbose)
|
Perform a real symbolic link if possible. However, on most versions of
windows you need special privledges to create a real symlink. Therefore, we
try to create a symlink, but if that fails we fallback to using a junction.
AFAIK, the main difference between symlinks and junctions are that symlinks
can reference relative or absolute paths, where as junctions always
reference absolute paths. Not 100% on this though. Windows is weird.
Note that junctions will not register as links via `islink`, but I
believe real symlinks will.
|
24,365 |
def push(self):
print()
push_kwargs = {}
push_args = []
if self.settings[]:
push_kwargs[] = True
if self.settings[]:
push_kwargs[] = True
else:
if in self.remotes:
self.remotes.remove()
if not self.remotes:
return
push_args.append(self.remotes)
try:
self.git.push(*push_args, **push_kwargs)
self.pushed = True
except GitError as error:
error.message = "`git push` failed"
raise error
|
Push the changes back to the remote(s) after fetching
|
24,366 |
def _spec_to_globs(address_mapper, specs):
patterns = set()
for spec in specs:
patterns.update(spec.make_glob_patterns(address_mapper))
return PathGlobs(include=patterns, exclude=address_mapper.build_ignore_patterns)
|
Given a Specs object, return a PathGlobs object for the build files that it matches.
|
24,367 |
def _permute_aux_specs(self):
calc_aux_mapping = self._NAMES_SUITE_TO_CALC.copy()
calc_aux_mapping[_OBJ_LIB_STR] = None
[calc_aux_mapping.pop(core) for core in self._CORE_SPEC_NAMES]
specs = self._get_aux_specs()
for suite_name, calc_name in calc_aux_mapping.items():
specs[calc_name] = specs.pop(suite_name)
return _permuted_dicts_of_specs(specs)
|
Generate all permutations of the non-core specifications.
|
24,368 |
def send(self,
data: Object,
retries: int = Session.MAX_RETRIES,
timeout: float = Session.WAIT_TIMEOUT):
if not self.is_started:
raise ConnectionError("Client has not been started")
if self.no_updates:
data = functions.InvokeWithoutUpdates(query=data)
if self.takeout_id:
data = functions.InvokeWithTakeout(takeout_id=self.takeout_id, query=data)
r = self.session.send(data, retries, timeout)
self.fetch_peers(getattr(r, "users", []))
self.fetch_peers(getattr(r, "chats", []))
return r
|
Use this method to send Raw Function queries.
This method makes possible to manually call every single Telegram API method in a low-level manner.
Available functions are listed in the :obj:`functions <pyrogram.api.functions>` package and may accept compound
data types from :obj:`types <pyrogram.api.types>` as well as bare types such as ``int``, ``str``, etc...
Args:
data (``Object``):
The API Schema function filled with proper arguments.
retries (``int``):
Number of retries.
timeout (``float``):
Timeout in seconds.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
|
24,369 |
def delete_item(self, item):
for relation, dst in self.relations_of(item, True):
self.delete_relation(item, relation, dst)
for src, relation in self.relations_to(item, True):
self.delete_relation(src, relation, item)
h = self._item_hash(item)
if item in self:
self.nodes[h].clear()
del self.nodes[h]
|
removes an item from the db
|
24,370 |
def attributes_section(thing, doc, header_level):
if not inspect.isclass(thing):
return []
props, class_doc = _split_props(thing, doc["Attributes"])
tl = type_list(inspect.signature(thing), class_doc, "\n
if len(tl) == 0 and len(props) > 0:
tl.append("\n
for prop in props:
tl.append(f"- [`{prop}`](
return tl
|
Generate an attributes section for classes.
Prefers type annotations, if they are present.
Parameters
----------
thing : class
Class to document
doc : dict
Numpydoc output
header_level : int
Number of `#`s to use for header
Returns
-------
list of str
Markdown formatted attribute list
|
24,371 |
def _is_small_vcf(vcf_file):
count = 0
small_thresh = 250
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("
count += 1
if count > small_thresh:
return False
return True
|
Check for small VCFs which we want to analyze quicker.
|
24,372 |
def sign_message(data_to_sign, digest_alg, sign_key,
use_signed_attributes=True):
if use_signed_attributes:
digest_func = hashlib.new(digest_alg)
digest_func.update(data_to_sign)
message_digest = digest_func.digest()
class SmimeCapability(core.Sequence):
_fields = [
(, core.Any, {: True}),
(, core.Any, {: True}),
(, core.Any, {: True}),
(, core.Any, {: True}),
(, core.Any, {: True})
]
class SmimeCapabilities(core.Sequence):
_fields = [
(, SmimeCapability),
(, SmimeCapability, {: True}),
(, SmimeCapability, {: True}),
(, SmimeCapability, {: True}),
(, SmimeCapability, {: True}),
(, SmimeCapability, {: True}),
]
smime_cap = OrderedDict([
(, OrderedDict([
(, core.ObjectIdentifier())])),
(, OrderedDict([
(, core.ObjectIdentifier()),
(, core.Integer(128))])),
(, OrderedDict([
(, core.ObjectIdentifier()),
(, core.Integer(128))])),
])
signed_attributes = cms.CMSAttributes([
cms.CMSAttribute({
: cms.CMSAttributeType(),
: cms.SetOfContentType([
cms.ContentType()
])
}),
cms.CMSAttribute({
: cms.CMSAttributeType(),
: cms.SetOfTime([
cms.Time({
: core.UTCTime(datetime.now())
})
])
}),
cms.CMSAttribute({
: cms.CMSAttributeType(),
: cms.SetOfOctetString([
core.OctetString(message_digest)
])
}),
cms.CMSAttribute({
: cms.CMSAttributeType(),
: cms.SetOfAny([
core.Any(SmimeCapabilities(smime_cap))
])
}),
])
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], signed_attributes.dump(), digest_alg)
else:
signed_attributes = None
signature = asymmetric.rsa_pkcs1v15_sign(
sign_key[0], data_to_sign, digest_alg)
return cms.ContentInfo({
: cms.ContentType(),
: cms.SignedData({
: cms.CMSVersion(),
: cms.DigestAlgorithms([
algos.DigestAlgorithm({
: algos.DigestAlgorithmId(digest_alg)
})
]),
: cms.ContentInfo({
: cms.ContentType()
}),
: cms.CertificateSet([
cms.CertificateChoices({
: sign_key[1].asn1
})
]),
: cms.SignerInfos([
cms.SignerInfo({
: cms.CMSVersion(),
: cms.SignerIdentifier({
: cms.IssuerAndSerialNumber({
: sign_key[1].asn1[
][],
: sign_key[1].asn1[
][]
})
}),
: algos.DigestAlgorithm({
: algos.DigestAlgorithmId(digest_alg)
}),
: signed_attributes,
: algos.SignedDigestAlgorithm({
:
algos.SignedDigestAlgorithmId()
}),
: core.OctetString(signature)
})
])
})
}).dump()
|
Function signs the data and returns the generated ASN.1
:param data_to_sign: A byte string of the data to be signed.
:param digest_alg:
The digest algorithm to be used for generating the signature.
:param sign_key: The key to be used for generating the signature.
:param use_signed_attributes: Optional attribute to indicate weather the
CMS signature attributes should be included in the signature or not.
:return: A CMS ASN.1 byte string of the signed data.
|
24,373 |
def _parse_categories(element):
reference = {}
items = element.findall("./{%s}category" % WP_NAMESPACE)
for item in items:
term_id = item.find("./{%s}term_id" % WP_NAMESPACE).text
nicename = item.find("./{%s}category_nicename" % WP_NAMESPACE).text
name = item.find("./{%s}cat_name" % WP_NAMESPACE).text
parent = item.find("./{%s}category_parent" % WP_NAMESPACE).text
category = {
"term_id": term_id,
"nicename": nicename,
"name": name,
"parent": parent
}
reference[nicename] = category
return _build_category_tree(None, reference=reference)
|
Returns a list with categories with relations.
|
24,374 |
def df_random(num_numeric=3, num_categorical=3, num_rows=100):
df = pd.DataFrame()
column_names = string.ascii_lowercase
for name in column_names[:num_numeric]:
df[name] = df_numeric_column(num_rows=num_rows)
for name in column_names[num_numeric:num_numeric+num_categorical]:
df[name] = df_categorical_column([, , ], num_rows=num_rows)
return df
|
Generate a dataframe with random data. This is a general method
to easily generate a random dataframe, for more control of the
random 'distributions' use the column methods (df_numeric_column, df_categorical_column)
For other distributions you can use numpy methods directly (see example at bottom of this file)
Args:
num_numeric (int): The number of numeric columns (default = 3)
num_categorical (int): The number of categorical columns (default = 3)
num_rows (int): The number of rows to generate (default = 100)
|
24,375 |
def rpc_get_account_record(self, address, token_type, **con_info):
if not check_account_address(address):
return {: , : 400}
if not check_token_type(token_type):
return {: , : 400}
if is_c32_address(address):
address = c32ToB58(address)
db = get_db_state(self.working_dir)
account = db.get_account(address, token_type)
db.close()
if account is None:
return {: , : 404}
state = self.export_account_state(account)
return self.success_response({: state})
|
Get the current state of an account
|
24,376 |
def __initialize_ui(self):
self.Lines_Columns_label.setAlignment(Qt.AlignRight)
self.Lines_Columns_label.setText(self.__Lines_Columns_label_default_text.format(1, 1))
self.Languages_comboBox.setModel(self.__container.languages_model)
self.Languages_comboBox.currentIndexChanged.connect(self.__Languages_comboBox__currentIndexChanged)
|
Initializes the Widget ui.
|
24,377 |
def get_salt_interface(vm_, opts):
public_ipsprivate_ips
salt_host = salt.config.get_cloud_config_value(
, vm_, opts, default=False,
search_global=False
)
if salt_host is False:
salt_host = salt.config.get_cloud_config_value(
, vm_, opts, default=,
search_global=False
)
return salt_host
|
Return the salt_interface type to connect to. Either 'public_ips' (default)
or 'private_ips'.
|
24,378 |
def store_object(self, obj_name, data, content_type=None, etag=None,
content_encoding=None, ttl=None, return_none=False,
headers=None, extra_info=None):
return self.create(obj_name=obj_name, data=data,
content_type=content_type, etag=etag,
content_encoding=content_encoding, ttl=ttl,
return_none=return_none, headers=headers)
|
Creates a new object in this container, and populates it with the given
data. A StorageObject reference to the uploaded file will be returned,
unless 'return_none' is set to True.
The 'extra_info' parameter is included for backwards compatibility. It
is no longer used at all, and will not be modified with swiftclient
info, since swiftclient is not used any more.
|
24,379 |
async def load(self, request, resource=None, **kwargs):
schema = self.get_schema(request, resource=resource, **kwargs)
data = await self.parse(request)
resource, errors = schema.load(
data, partial=resource is not None, many=isinstance(data, list))
if errors:
raise RESTBadRequest(reason=, json={: errors})
return resource
|
Load resource from given data.
|
24,380 |
def weekdays(self):
if not self.root.xpath():
return set(range(7))
return set(int(d) - 1 for d in self.root.xpath())
|
A set of integers representing the weekdays the schedule recurs on,
with Monday = 0 and Sunday = 6.
|
24,381 |
def check_lines(first, second):
if not (
first.__class__ is Linearization
and second.__class__ is Linearization
and first.error == 0.0
and second.error == 0.0
):
return False, None
s, t, success = segment_intersection(
first.start_node, first.end_node, second.start_node, second.end_node
)
if success:
if _helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(
t, 0.0, 1.0
):
intersections = np.asfortranarray([[s], [t]])
result = intersections, False
else:
result = np.empty((2, 0), order="F"), False
else:
disjoint, params = parallel_lines_parameters(
first.start_node,
first.end_node,
second.start_node,
second.end_node,
)
if disjoint:
result = np.empty((2, 0), order="F"), False
else:
result = params, True
return True, result
|
Checks if two curves are lines and tries to intersect them.
.. note::
This is a helper for :func:`._all_intersections`.
If they are not lines / not linearized, immediately returns :data:`False`
with no "return value".
If they are lines, attempts to intersect them (even if they are parallel
and share a coincident segment).
Args:
first (Union[SubdividedCurve, Linearization]): First curve being
intersected.
second (Union[SubdividedCurve, Linearization]): Second curve being
intersected.
Returns:
Tuple[bool, Optional[Tuple[numpy.ndarray, bool]]]: A pair of
* Flag indicating if both candidates in the pair are lines.
* Optional "result" populated only if both candidates are lines.
When this result is populated, it will be a pair of
* array of parameters of intersection
* flag indicating if the two candidates share a coincident segment
|
24,382 |
def get_vprof_version(filename):
with open(filename) as src_file:
version_match = re.search(r"^__version__ = [\"]*)[Unable to find version info.')
|
Returns actual version specified in filename.
|
24,383 |
def cast(
source: Union[DataType, str], target: Union[DataType, str], **kwargs
) -> DataType:
source, result_target = dtype(source), dtype(target)
if not castable(source, result_target, **kwargs):
raise com.IbisTypeError(
.format(source, result_target)
)
return result_target
|
Attempts to implicitly cast from source dtype to target dtype
|
24,384 |
def t_bin_NUMBER(t):
r
t.value = int(t.value, 2)
t.lexer.begin()
return t
|
r'[01]+
|
24,385 |
def close(self, cancelled=False):
self._on_close(cancelled)
self._scene.remove_effect(self)
|
Close this temporary pop-up.
:param cancelled: Whether the pop-up was cancelled (e.g. by pressing Esc).
|
24,386 |
def estimate_cpd(self, node):
state_counts = self.state_counts(node)
state_counts.ix[:, (state_counts == 0).all()] = 1
parents = sorted(self.model.get_parents(node))
parents_cardinalities = [len(self.state_names[parent]) for parent in parents]
node_cardinality = len(self.state_names[node])
cpd = TabularCPD(node, node_cardinality, np.array(state_counts),
evidence=parents,
evidence_card=parents_cardinalities,
state_names=self.state_names)
cpd.normalize()
return cpd
|
Method to estimate the CPD for a given variable.
Parameters
----------
node: int, string (any hashable python object)
The name of the variable for which the CPD is to be estimated.
Returns
-------
CPD: TabularCPD
Examples
--------
>>> import pandas as pd
>>> from pgmpy.models import BayesianModel
>>> from pgmpy.estimators import MaximumLikelihoodEstimator
>>> data = pd.DataFrame(data={'A': [0, 0, 1], 'B': [0, 1, 0], 'C': [1, 1, 0]})
>>> model = BayesianModel([('A', 'C'), ('B', 'C')])
>>> cpd_A = MaximumLikelihoodEstimator(model, data).estimate_cpd('A')
>>> print(cpd_A)
ββββββββ€βββββββββββ
β A(0) β 0.666667 β
ββββββββΌβββββββββββ€
β A(1) β 0.333333 β
ββββββββ§βββββββββββ
>>> cpd_C = MaximumLikelihoodEstimator(model, data).estimate_cpd('C')
>>> print(cpd_C)
ββββββββ€βββββββ€βββββββ€βββββββ€βββββββ
β A β A(0) β A(0) β A(1) β A(1) β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β B β B(0) β B(1) β B(0) β B(1) β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β C(0) β 0.0 β 0.0 β 1.0 β 0.5 β
ββββββββΌβββββββΌβββββββΌβββββββΌβββββββ€
β C(1) β 1.0 β 1.0 β 0.0 β 0.5 β
ββββββββ§βββββββ§βββββββ§βββββββ§βββββββ
|
24,387 |
def ilsr_rankings(
n_items, data, alpha=0.0, initial_params=None, max_iter=100, tol=1e-8):
fun = functools.partial(
lsr_rankings, n_items=n_items, data=data, alpha=alpha)
return _ilsr(fun, initial_params, max_iter, tol)
|
Compute the ML estimate of model parameters using I-LSR.
This function computes the maximum-likelihood (ML) estimate of model
parameters given ranking data (see :ref:`data-rankings`), using the
iterative Luce Spectral Ranking algorithm [MG15]_.
The transition rates of the LSR Markov chain are initialized with
``alpha``. When ``alpha > 0``, this corresponds to a form of regularization
(see :ref:`regularization` for details).
Parameters
----------
n_items : int
Number of distinct items.
data : list of lists
Ranking data.
alpha : float, optional
Regularization parameter.
initial_params : array_like, optional
Parameters used to initialize the iterative procedure.
max_iter : int, optional
Maximum number of iterations allowed.
tol : float, optional
Maximum L1-norm of the difference between successive iterates to
declare convergence.
Returns
-------
params : numpy.ndarray
The ML estimate of model parameters.
|
24,388 |
def multi_replace(str_, search_list, repl_list):
r
if isinstance(repl_list, six.string_types):
repl_list_ = [repl_list] * len(search_list)
else:
repl_list_ = repl_list
newstr = str_
assert len(search_list) == len(repl_list_),
for search, repl in zip(search_list, repl_list_):
newstr = newstr.replace(search, repl)
return newstr
|
r"""
Performs multiple replace functions foreach item in search_list and
repl_list.
Args:
str_ (str): string to search
search_list (list): list of search strings
repl_list (list or str): one or multiple replace strings
Returns:
str: str_
CommandLine:
python -m utool.util_str --exec-multi_replace
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_str import * # NOQA
>>> str_ = 'foo. bar: baz; spam-eggs --- eggs+spam'
>>> search_list = ['.', ':', '---']
>>> repl_list = '@'
>>> str_ = multi_replace(str_, search_list, repl_list)
>>> result = ('str_ = %s' % (str(str_),))
>>> print(result)
str_ = foo@ bar@ baz; spam-eggs @ eggs+spam
|
24,389 |
def setRecord( self, record ):
super(XBasicCardWidget, self).setRecord(record)
browser = self.browserWidget()
if ( not browser ):
return
factory = browser.factory()
if ( not factory ):
return
self._thumbnailButton.setIcon(factory.thumbnail(record))
self._titleLabel.setText(factory.thumbnailText(record))
|
Sets the record that is linked with this widget.
:param record | <orb.Table>
|
24,390 |
async def shutdown(self):
if self.log_output:
logging.info()
else:
print()
await self.send_reset()
try:
self.loop.stop()
except:
pass
try:
self.loop.close()
except:
pass
sys.exit(0)
|
This method attempts an orderly shutdown
If any exceptions are thrown, just ignore them.
:returns: No return value
|
24,391 |
def get_pane_index(self, pane):
" Return the index of the given pane. ValueError if not found. "
assert isinstance(pane, Pane)
return self.panes.index(pane)
|
Return the index of the given pane. ValueError if not found.
|
24,392 |
def _set_topology_group(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("topology_group_id",topology_group.topology_group, yang_name="topology-group", rest_name="topology-group", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: None, u: None, u: u, u: None, u: None, u: None, u: None, u: u, u: u}}), is_container=, yang_name="topology-group", rest_name="topology-group", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None, u: None, u: u, u: None, u: None, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__topology_group = t
if hasattr(self, ):
self._set()
|
Setter method for topology_group, mapped from YANG variable /topology_group (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_topology_group is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_topology_group() directly.
|
24,393 |
def report(self, output_file=sys.stdout):
max_perf = self.results[]
if self._args and self._args.verbose >= 3:
print(.format(pformat(self.results)), file=output_file)
if self._args and self._args.verbose >= 1:
print(.format(pformat(self.results[])), file=output_file)
print(, file=output_file)
print(,
file=output_file)
print(,
file=output_file)
print(.format(
max_perf[self._args.unit]),
file=output_file)
for b in self.results[]:
print(
.format(
b[][self._args.unit], **b),
file=output_file)
print(, file=output_file)
if self.results[][] > max_perf[]:
print(.format(max_perf), file=output_file)
else:
print(, file=output_file)
bottleneck = self.results[][self.results[]]
print(.format(
bottleneck[][self._args.unit],
bottleneck[],
bottleneck[]),
file=output_file)
print(.format(bottleneck[]),
file=output_file)
|
Report analysis outcome in human readable form.
|
24,394 |
def read_asynchronously(library, session, count):
buffer = create_string_buffer(count)
job_id = ViJobId()
ret = library.viReadAsync(session, buffer, count, byref(job_id))
return buffer, job_id, ret
|
Reads data from device or interface asynchronously.
Corresponds to viReadAsync function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param count: Number of bytes to be read.
:return: result, jobid, return value of the library call.
:rtype: ctypes buffer, jobid, :class:`pyvisa.constants.StatusCode`
|
24,395 |
def table(columns, names, page_size=None, format_strings=None):
if page_size is None:
page =
else:
page =
div_id = uuid.uuid4()
column_descriptions = []
for column, name in zip(columns, names):
if column.dtype.kind == :
ctype =
else:
ctype =
column_descriptions.append((ctype, name))
data = []
for item in zip(*columns):
data.append(list(item))
return google_table_template.render(div_id=div_id,
page_enable=page,
column_descriptions = column_descriptions,
page_size=page_size,
data=data,
format_strings=format_strings,
)
|
Return an html table of this data
Parameters
----------
columns : list of numpy arrays
names : list of strings
The list of columns names
page_size : {int, None}, optional
The number of items to show on each page of the table
format_strings : {lists of strings, None}, optional
The ICU format string for this column, None for no formatting. All
columns must have a format string if provided.
Returns
-------
html_table : str
A str containing the html code to display a table of this data
|
24,396 |
def resize_file_to(self, in_path, out_path, keep_filename=False):
if keep_filename:
filename = path.join(out_path, path.basename(in_path))
else:
filename = path.join(out_path, self.get_thumbnail_name(in_path))
out_path = path.dirname(filename)
if not path.exists(out_path):
os.makedirs(out_path)
if not path.exists(filename):
try:
image = Image.open(in_path)
thumbnail = self.resize(image)
thumbnail.save(filename)
logger.info("Generated Thumbnail {0}".format(path.basename(filename)))
except IOError:
logger.info("Generating Thumbnail for {0} skipped".format(path.basename(filename)))
|
Given a filename, resize and save the image per the specification into out_path
:param in_path: path to image file to save. Must be supported by PIL
:param out_path: path to the directory root for the outputted thumbnails to be stored
:return: None
|
24,397 |
def trusted_permission(f):
@functools.wraps(f)
def wrapper(request, *args, **kwargs):
trusted(request)
return f(request, *args, **kwargs)
return wrapper
|
Access only by D1 infrastructure.
|
24,398 |
def _py_expand_short(subsequence, sequence, max_l_dist):
subseq_len = len(subsequence)
if subseq_len == 0:
return (0, 0)
scores = list(range(1, subseq_len + 1))
min_score = subseq_len
min_score_idx = -1
for seq_index, char in enumerate(sequence):
a = seq_index
c = a + 1
for subseq_index in range(subseq_len):
b = scores[subseq_index]
c = scores[subseq_index] = min(
a + (char != subsequence[subseq_index]),
b + 1,
c + 1,
)
a = b
if c <= min_score:
min_score = c
min_score_idx = seq_index
elif min(scores) >= min_score:
break
return (min_score, min_score_idx + 1) if min_score <= max_l_dist else (None, None)
|
Straightforward implementation of partial match expansion.
|
24,399 |
def get_data_frame_transform_stats(self, transform_id=None, params=None):
return self.transport.perform_request(
"GET",
_make_path("_data_frame", "transforms", transform_id, "_stats"),
params=params,
)
|
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/get-data-frame-transform-stats.html>`_
:arg transform_id: The id of the transform for which to get stats.
'_all' or '*' implies all transforms
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.