code
stringlengths 59
3.37k
| docstring
stringlengths 8
15.5k
|
---|---|
def dict_to_table(dictionary, list_of_keys=None):
table = Table()
if len(dictionary) > 0:
table['name'] = dictionary.keys()
prototype = dictionary.values()[0]
column_names = prototype.keys()
if list_of_keys is not None:
column_names = filter(lambda key: key in list_of_keys, column_names)
for column_name in column_names:
table[column_name] = map(lambda x: x[column_name], dictionary.values())
return table
|
Return a table representing the dictionary.
:param dictionary: the dictionary to represent
:param list_of_keys: optionally, only the keys in this list will be inserted in the table
:return: a Table instance
|
def render(template, saltenv='base', sls='', tmplpath=None, **kws):
template = tmplpath
if not os.path.isfile(template):
raise SaltRenderError('Template {0} is not a file!'.format(template))
tmp_data = salt.utils.templates.py(
template,
True,
__salt__=__salt__,
salt=__salt__,
__grains__=__grains__,
grains=__grains__,
__opts__=__opts__,
opts=__opts__,
__pillar__=__pillar__,
pillar=__pillar__,
__env__=saltenv,
saltenv=saltenv,
__sls__=sls,
sls=sls,
**kws)
if not tmp_data.get('result', False):
raise SaltRenderError(tmp_data.get('data',
'Unknown render error in py renderer'))
return tmp_data['data']
|
Render the python module's components
:rtype: string
|
def import_functions(names, src, dst):
for name in names:
module = importlib.import_module('pygsp.' + src)
setattr(sys.modules['pygsp.' + dst], name, getattr(module, name))
|
Import functions in package from their implementation modules.
|
def _get_target_brokers(self, over_loaded_brokers, under_loaded_brokers, sibling_distance):
over_loaded_brokers = sorted(
over_loaded_brokers,
key=lambda b: len(b.partitions),
reverse=True,
)
under_loaded_brokers = sorted(
under_loaded_brokers,
key=lambda b: len(b.partitions),
)
target = (None, None, None)
min_distance = sys.maxsize
best_partition = None
for source in over_loaded_brokers:
for dest in under_loaded_brokers:
if (len(source.partitions) - len(dest.partitions) > 1 or
source.decommissioned):
best_partition = source.get_preferred_partition(
dest,
sibling_distance[dest][source],
)
if best_partition is None:
continue
distance = sibling_distance[dest][source][best_partition.topic]
if distance < min_distance:
min_distance = distance
target = (source, dest, best_partition)
else:
break
return target
|
Pick best-suitable source-broker, destination-broker and partition to
balance partition-count over brokers in given replication-group.
|
def limit_(self, r=5):
try:
return self._duplicate_(self.df[:r])
except Exception as e:
self.err(e, "Can not limit data")
|
Returns a DataSwim instance with limited selection
|
def __remove_duplicates(self, _other):
if not isinstance(_other, type(self)) \
and not isinstance(_other, type(list)) \
and not isinstance(_other, type([])):
other = [_other]
else:
other = list(_other)
newother = []
for i in range(0, len(other)):
item = other.pop(0)
if not list.__contains__(self, item):
newother.append(item)
other = []
if newother != []:
other.append(newother[0])
for i in range(1, len(newother)):
item = newother.pop()
if not other.__contains__(item):
other.append(item)
return other
|
Remove from other items already in list.
|
def set_line_style(self, width = None, dash = None, dash_offset = 0):
if width is not None:
self._add_instruction("set_line_width", width)
if dash is not None:
self._add_instruction("set_dash", dash, dash_offset)
|
change width and dash of a line
|
def iter_up(self, include_self=True):
if include_self: yield self
parent = self.parent
while parent is not None:
yield parent
try:
parent = parent.parent
except AttributeError:
return
|
Iterates up the tree to the root.
|
def Where_filter_gen(*data):
where = []
def Fwhere(field, pattern):
where.append("WHERE {0} LIKE '{1}'".format(field, pattern))
def Fstring(field, string):
Fwhere(field, "%{0}%".format(string if not isinstance(string, str)
else str(string)))
def Fdict(field, data):
for key, value in data.items():
if value == '*':
Fstring(field, key)
else:
Fstring(field, "{0}:%{1}".format(key, value if not
isinstance(value, str)
else str(value)))
def Flist(field, data):
for elem in data:
Fstring(field, elem if not isinstance(elem, str) else
str(elem))
for field, data in data:
if isinstance(data, str):
Fstring(field, data)
elif isinstance(data, dict):
Fdict(field, data)
elif isinstance(data, list):
Flist(field, data)
return ' AND '.join(where)
|
Generate an sqlite "LIKE" filter generator based on the given data.
This functions arguments should be a N length series of field and data
tuples.
|
def iter_dialogs(
self, limit=None, *, offset_date=None, offset_id=0,
offset_peer=types.InputPeerEmpty(), ignore_migrated=False
):
return _DialogsIter(
self,
limit,
offset_date=offset_date,
offset_id=offset_id,
offset_peer=offset_peer,
ignore_migrated=ignore_migrated
)
|
Returns an iterator over the dialogs, yielding 'limit' at most.
Dialogs are the open "chats" or conversations with other people,
groups you have joined, or channels you are subscribed to.
Args:
limit (`int` | `None`):
How many dialogs to be retrieved as maximum. Can be set to
``None`` to retrieve all dialogs. Note that this may take
whole minutes if you have hundreds of dialogs, as Telegram
will tell the library to slow down through a
``FloodWaitError``.
offset_date (`datetime`, optional):
The offset date to be used.
offset_id (`int`, optional):
The message ID to be used as an offset.
offset_peer (:tl:`InputPeer`, optional):
The peer to be used as an offset.
ignore_migrated (`bool`, optional):
Whether :tl:`Chat` that have ``migrated_to`` a :tl:`Channel`
should be included or not. By default all the chats in your
dialogs are returned, but setting this to ``True`` will hide
them in the same way official applications do.
Yields:
Instances of `telethon.tl.custom.dialog.Dialog`.
|
def connect(self, address, token=None):
if self.connected:
self.subscriber.on_connect_error(
'Already connected to "%s"' % self.address)
return False
self.address = address
self.server_token = token
self.ingame = False
self.ws.settimeout(1)
self.ws.connect('ws://%s' % self.address, origin='http://agar.io')
if not self.connected:
self.subscriber.on_connect_error(
'Failed to connect to "%s"' % self.address)
return False
self.subscriber.on_sock_open()
if not self.connected:
self.subscriber.on_connect_error(
'Disconnected before sending handshake')
return False
self.send_handshake()
if self.server_token:
self.send_token(self.server_token)
old_nick = self.player.nick
self.player.reset()
self.world.reset()
self.player.nick = old_nick
return True
|
Connect the underlying websocket to the address,
send a handshake and optionally a token packet.
Returns `True` if connected, `False` if the connection failed.
:param address: string, `IP:PORT`
:param token: unique token, required by official servers,
acquired through utils.find_server()
:return: True if connected, False if not
|
def set_section(self, section):
if not isinstance(section, Section):
raise Exception("You")
try:
self.remove_section(section.name)
except:
pass
self._sections[section.name] = copy.deepcopy(section)
|
Set a section. If section already exists, overwrite the old one.
|
def datetime_to_djd(time):
if time.tzinfo is None:
time_utc = pytz.utc.localize(time)
else:
time_utc = time.astimezone(pytz.utc)
djd_start = pytz.utc.localize(dt.datetime(1899, 12, 31, 12))
djd = (time_utc - djd_start).total_seconds() * 1.0/(60 * 60 * 24)
return djd
|
Converts a datetime to the Dublin Julian Day
Parameters
----------
time : datetime.datetime
time to convert
Returns
-------
float
fractional days since 12/31/1899+0000
|
def _get_child_mock(mock, **kw):
attribute = "." + kw["name"] if "name" in kw else "()"
mock_name = _extract_mock_name(mock) + attribute
raise AttributeError(mock_name)
|
Intercepts call to generate new mocks and raises instead
|
def maybe_obj(str_or_obj):
if not isinstance(str_or_obj, six.string_types):
return str_or_obj
parts = str_or_obj.split(".")
mod, modname = None, None
for p in parts:
modname = p if modname is None else "%s.%s" % (modname, p)
try:
mod = __import__(modname)
except ImportError:
if mod is None:
raise
break
obj = mod
for p in parts[1:]:
obj = getattr(obj, p)
return obj
|
If argument is not a string, return it.
Otherwise import the dotted name and return that.
|
def classify_intersection(intersection, edge_nodes1, edge_nodes2):
r
if intersection.s == 1.0 or intersection.t == 1.0:
raise ValueError(
"Intersection occurs at the end of an edge",
"s",
intersection.s,
"t",
intersection.t,
)
nodes1 = edge_nodes1[intersection.index_first]
tangent1 = _curve_helpers.evaluate_hodograph(intersection.s, nodes1)
nodes2 = edge_nodes2[intersection.index_second]
tangent2 = _curve_helpers.evaluate_hodograph(intersection.t, nodes2)
if ignored_corner(
intersection, tangent1, tangent2, edge_nodes1, edge_nodes2
):
return CLASSIFICATION_T.IGNORED_CORNER
cross_prod = _helpers.cross_product(
tangent1.ravel(order="F"), tangent2.ravel(order="F")
)
if cross_prod < -ALMOST_TANGENT:
return CLASSIFICATION_T.FIRST
elif cross_prod > ALMOST_TANGENT:
return CLASSIFICATION_T.SECOND
else:
return classify_tangent_intersection(
intersection, nodes1, tangent1, nodes2, tangent2
)
|
r"""Determine which curve is on the "inside of the intersection".
.. note::
This is a helper used only by :meth:`.Surface.intersect`.
This is intended to be a helper for forming a :class:`.CurvedPolygon`
from the edge intersections of two :class:`.Surface`-s. In order
to move from one intersection to another (or to the end of an edge),
the interior edge must be determined at the point of intersection.
The "typical" case is on the interior of both edges:
.. image:: ../images/classify_intersection1.png
:align: center
.. testsetup:: classify-intersection1, classify-intersection2,
classify-intersection3, classify-intersection4,
classify-intersection5, classify-intersection6,
classify-intersection7, classify-intersection8,
classify-intersection9
import numpy as np
import bezier
from bezier import _curve_helpers
from bezier._intersection_helpers import Intersection
from bezier._surface_helpers import classify_intersection
def hodograph(curve, s):
return _curve_helpers.evaluate_hodograph(
s, curve._nodes)
def curvature(curve, s):
nodes = curve._nodes
tangent = _curve_helpers.evaluate_hodograph(
s, nodes)
return _curve_helpers.get_curvature(
nodes, tangent, s)
.. doctest:: classify-intersection1
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [1.0, 1.75, 2.0],
... [0.0, 0.25, 1.0],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [0.0, 1.6875, 2.0],
... [0.0, 0.0625, 0.5],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 0.25, 0.5
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> tangent1 = hodograph(curve1, s)
>>> tangent1
array([[1.25],
[0.75]])
>>> tangent2 = hodograph(curve2, t)
>>> tangent2
array([[2. ],
[0.5]])
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.FIRST: 0>
.. testcleanup:: classify-intersection1
import make_images
make_images.classify_intersection1(
s, curve1, tangent1, curve2, tangent2)
We determine the interior (i.e. left) one by using the `right-hand rule`_:
by embedding the tangent vectors in :math:`\mathbf{R}^3`, we
compute
.. _right-hand rule: https://en.wikipedia.org/wiki/Right-hand_rule
.. math::
\left[\begin{array}{c}
x_1'(s) \\ y_1'(s) \\ 0 \end{array}\right] \times
\left[\begin{array}{c}
x_2'(t) \\ y_2'(t) \\ 0 \end{array}\right] =
\left[\begin{array}{c}
0 \\ 0 \\ x_1'(s) y_2'(t) - x_2'(t) y_1'(s) \end{array}\right].
If the cross product quantity
:math:`B_1'(s) \times B_2'(t) = x_1'(s) y_2'(t) - x_2'(t) y_1'(s)`
is positive, then the first curve is "outside" / "to the right", i.e.
the second curve is interior. If the cross product is negative, the
first curve is interior.
When :math:`B_1'(s) \times B_2'(t) = 0`, the tangent
vectors are parallel, i.e. the intersection is a point of tangency:
.. image:: ../images/classify_intersection2.png
:align: center
.. doctest:: classify-intersection2
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [1.0, 1.5, 2.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [0.0, 1.5, 3.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 0.5, 0.5
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.TANGENT_SECOND: 4>
.. testcleanup:: classify-intersection2
import make_images
make_images.classify_intersection2(s, curve1, curve2)
Depending on the direction of the parameterizations, the interior
curve may change, but we can use the (signed) `curvature`_ of each
curve at that point to determine which is on the interior:
.. _curvature: https://en.wikipedia.org/wiki/Curvature
.. image:: ../images/classify_intersection3.png
:align: center
.. doctest:: classify-intersection3
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [2.0, 1.5, 1.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [3.0, 1.5, 0.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 0.5, 0.5
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.TANGENT_FIRST: 3>
.. testcleanup:: classify-intersection3
import make_images
make_images.classify_intersection3(s, curve1, curve2)
When the curves are moving in opposite directions at a point
of tangency, there is no side to choose. Either the point of tangency
is not part of any :class:`.CurvedPolygon` intersection
.. image:: ../images/classify_intersection4.png
:align: center
.. doctest:: classify-intersection4
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [2.0, 1.5, 1.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [0.0, 1.5, 3.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 0.5, 0.5
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.OPPOSED: 2>
.. testcleanup:: classify-intersection4
import make_images
make_images.classify_intersection4(s, curve1, curve2)
or the point of tangency is a "degenerate" part of two
:class:`.CurvedPolygon` intersections. It is "degenerate"
because from one direction, the point should be classified as
:attr:`~.IntersectionClassification.FIRST` and from another as
:attr:`~.IntersectionClassification.SECOND`.
.. image:: ../images/classify_intersection5.png
:align: center
.. doctest:: classify-intersection5
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [1.0, 1.5, 2.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [3.0, 1.5, 0.0],
... [0.0, 1.0, 0.0],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 0.5, 0.5
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.TANGENT_BOTH: 6>
.. testcleanup:: classify-intersection5
import make_images
make_images.classify_intersection5(s, curve1, curve2)
The :attr:`~.IntersectionClassification.TANGENT_BOTH` classification
can also occur if the curves are "kissing" but share a zero width
interior at the point of tangency:
.. image:: ../images/classify_intersection9.png
:align: center
.. doctest:: classify-intersection9
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [0.0, 20.0, 40.0],
... [0.0, 40.0, 0.0],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [40.0, 20.0, 0.0],
... [40.0, 0.0, 40.0],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 0.5, 0.5
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.TANGENT_BOTH: 6>
.. testcleanup:: classify-intersection9
import make_images
make_images.classify_intersection9(s, curve1, curve2)
However, if the `curvature`_ of each curve is identical, we
don't try to distinguish further:
.. image:: ../images/classify_intersection6.png
:align: center
.. doctest:: classify-intersection6
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [-0.125 , -0.125 , 0.375 ],
... [ 0.0625, -0.0625, 0.0625],
... ])
>>> curve1 = bezier.Curve(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [-0.25, -0.25, 0.75],
... [ 0.25, -0.25, 0.25],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 0.5, 0.5
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> hodograph(curve1, s)
array([[0.5],
[0. ]])
>>> hodograph(curve2, t)
array([[1.],
[0.]])
>>> curvature(curve1, s)
2.0
>>> curvature(curve2, t)
2.0
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
Traceback (most recent call last):
...
NotImplementedError: Tangent curves have same curvature.
.. testcleanup:: classify-intersection6
import make_images
make_images.classify_intersection6(s, curve1, curve2)
In addition to points of tangency, intersections that happen at
the end of an edge need special handling:
.. image:: ../images/classify_intersection7.png
:align: center
.. doctest:: classify-intersection7
:options: +NORMALIZE_WHITESPACE
>>> nodes1a = np.asfortranarray([
... [0.0, 4.5, 9.0 ],
... [0.0, 0.0, 2.25],
... ])
>>> curve1a = bezier.Curve(nodes1a, degree=2)
>>> nodes2 = np.asfortranarray([
... [11.25, 9.0, 2.75],
... [ 0.0 , 4.5, 1.0 ],
... ])
>>> curve2 = bezier.Curve(nodes2, degree=2)
>>> s, t = 1.0, 0.375
>>> curve1a.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(0, s, 0, t)
>>> edge_nodes1 = (nodes1a, None, None)
>>> edge_nodes2 = (nodes2, None, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
Traceback (most recent call last):
...
ValueError: ('Intersection occurs at the end of an edge',
's', 1.0, 't', 0.375)
>>>
>>> nodes1b = np.asfortranarray([
... [9.0, 4.5, 0.0],
... [2.25, 2.375, 2.5],
... ])
>>> curve1b = bezier.Curve(nodes1b, degree=2)
>>> curve1b.evaluate(0.0) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(1, 0.0, 0, t)
>>> edge_nodes1 = (nodes1a, nodes1b, None)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.FIRST: 0>
.. testcleanup:: classify-intersection7
import make_images
make_images.classify_intersection7(s, curve1a, curve1b, curve2)
As above, some intersections at the end of an edge are part of
an actual intersection. However, some surfaces may just "kiss" at a
corner intersection:
.. image:: ../images/classify_intersection8.png
:align: center
.. doctest:: classify-intersection8
:options: +NORMALIZE_WHITESPACE
>>> nodes1 = np.asfortranarray([
... [0.25, 0.0, 0.0, 0.625, 0.5 , 1.0 ],
... [1.0 , 0.5, 0.0, 0.875, 0.375, 0.75],
... ])
>>> surface1 = bezier.Surface(nodes1, degree=2)
>>> nodes2 = np.asfortranarray([
... [0.0625, -0.25, -1.0, -0.5 , -1.0, -1.0],
... [0.5 , 1.0 , 1.0, 0.125, 0.5, 0.0],
... ])
>>> surface2 = bezier.Surface(nodes2, degree=2)
>>> curve1, _, _ = surface1.edges
>>> edge_nodes1 = [curve.nodes for curve in surface1.edges]
>>> curve2, _, _ = surface2.edges
>>> edge_nodes2 = [curve.nodes for curve in surface2.edges]
>>> s, t = 0.5, 0.0
>>> curve1.evaluate(s) == curve2.evaluate(t)
array([[ True],
[ True]])
>>> intersection = Intersection(0, s, 0, t)
>>> classify_intersection(intersection, edge_nodes1, edge_nodes2)
<IntersectionClassification.IGNORED_CORNER: 5>
.. testcleanup:: classify-intersection8
import make_images
make_images.classify_intersection8(
s, curve1, surface1, curve2, surface2)
.. note::
This assumes the intersection occurs in :math:`\mathbf{R}^2`
but doesn't check this.
.. note::
This function doesn't allow wiggle room / round-off when checking
endpoints, nor when checking if the cross product is near zero,
nor when curvatures are compared. However, the most "correct"
version of this function likely should allow for some round off.
Args:
intersection (.Intersection): An intersection object.
edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The
nodes of the three edges of the first surface being intersected.
edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The
nodes of the three edges of the second surface being intersected.
Returns:
IntersectionClassification: The "inside" curve type, based on
the classification enum.
Raises:
ValueError: If the intersection occurs at the end of either
curve involved. This is because we want to classify which
curve to **move forward** on, and we can't move past the
end of a segment.
|
def _Gunzip(gzipped_content):
f = tempfile.NamedTemporaryFile(suffix='gz', mode='w+b', delete=False)
try:
f.write(gzipped_content)
f.close()
with gzip.open(f.name, 'rb') as h:
decompressed_content = h.read()
return decompressed_content
finally:
os.unlink(f.name)
|
Returns gunzipped content from gzipped contents.
|
def overlaps_range(self, begin, end):
if self.is_empty():
return False
elif begin >= end:
return False
elif self.overlaps_point(begin):
return True
return any(
self.overlaps_point(bound)
for bound in self.boundary_table
if begin < bound < end
)
|
Returns whether some interval in the tree overlaps the given
range. Returns False if given a null interval over which to
test.
Completes in O(r*log n) time, where r is the range length and n
is the table size.
:rtype: bool
|
def _compile(pattern, flags):
return re.compile(WcParse(pattern, flags & FLAG_MASK).parse())
|
Compile the pattern to regex.
|
def _learn(
permanences, rng,
activeCells, activeInput, growthCandidateInput,
sampleSize, initialPermanence, permanenceIncrement,
permanenceDecrement, connectedPermanence):
permanences.incrementNonZerosOnOuter(
activeCells, activeInput, permanenceIncrement)
permanences.incrementNonZerosOnRowsExcludingCols(
activeCells, activeInput, -permanenceDecrement)
permanences.clipRowsBelowAndAbove(
activeCells, 0.0, 1.0)
if sampleSize == -1:
permanences.setZerosOnOuter(
activeCells, activeInput, initialPermanence)
else:
existingSynapseCounts = permanences.nNonZerosPerRowOnCols(
activeCells, activeInput)
maxNewByCell = numpy.empty(len(activeCells), dtype="int32")
numpy.subtract(sampleSize, existingSynapseCounts, out=maxNewByCell)
permanences.setRandomZerosOnOuter(
activeCells, growthCandidateInput, maxNewByCell, initialPermanence, rng)
|
For each active cell, reinforce active synapses, punish inactive synapses,
and grow new synapses to a subset of the active input bits that the cell
isn't already connected to.
Parameters:
----------------------------
@param permanences (SparseMatrix)
Matrix of permanences, with cells as rows and inputs as columns
@param rng (Random)
Random number generator
@param activeCells (sorted sequence)
Sorted list of the cells that are learning
@param activeInput (sorted sequence)
Sorted list of active bits in the input
@param growthCandidateInput (sorted sequence)
Sorted list of active bits in the input that the activeCells may
grow new synapses to
For remaining parameters, see the __init__ docstring.
|
def _handle_units_placement(changeset, units, records):
for service_name, service in sorted(changeset.bundle['services'].items()):
num_units = service.get('num_units')
if num_units is None:
continue
placement_directives = service.get('to', [])
if not isinstance(placement_directives, (list, tuple)):
placement_directives = [placement_directives]
if placement_directives and not changeset.is_legacy_bundle():
placement_directives += (
placement_directives[-1:] *
(num_units - len(placement_directives)))
placed_in_services = {}
for i in range(num_units):
unit = units['{}/{}'.format(service_name, i)]
record = records[unit['record']]
if i < len(placement_directives):
record = _handle_unit_placement(
changeset, units, unit, record, placement_directives[i],
placed_in_services)
changeset.send(record)
|
Ensure that requires and placement directives are taken into account.
|
def verify_chunks(self, chunks):
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err
|
Verify the chunks in a list of low data structures
|
def setup_config(cfg, config_filenames=None, env_var_name=None):
if env_var_name is None:
env_var_name = "BB_CONFIG_FILE"
config_path = os.getenv(env_var_name, None)
if not config_path:
config_path = find_config(defaults=config_filenames)
if config_path:
cfg.load(config_path)
cfg["config_file"] = os.path.abspath(config_path)
cfg.init_from_env()
|
This will initialize the given configuration object.
The following resources are available in the same order:
1) Default settings.
2) Config file.
3) Environment variables.
WARNING: Environment variables do _not_ take precedence over the config
file right now. (init_from_env will refuse to update the
value, if there is already one.)
Args:
config_filenames: list of possible config filenames
env_var_name: name of the environment variable holding the config path
|
def normalizeGlyphOrder(value):
if not isinstance(value, (tuple, list)):
raise TypeError("Glyph order must be a list, not %s."
% type(value).__name__)
for v in value:
normalizeGlyphName(v)
duplicates = sorted(v for v, count in Counter(value).items() if count > 1)
if len(duplicates) != 0:
raise ValueError("Duplicate glyph names are not allowed. Glyph "
"name(s) '%s' are duplicate." % ", ".join(duplicates))
return tuple([unicode(v) for v in value])
|
Normalizes glyph order.
** **value** must be a ``tuple`` or ``list``.
* **value** items must normalize as glyph names with
:func:`normalizeGlyphName`.
* **value** must not repeat glyph names.
* Returned value will be a ``tuple`` of unencoded ``unicode`` strings.
|
def is_subnet_present(self, subnet_addr):
try:
subnet_list = self.neutronclient.list_subnets(body={})
subnet_dat = subnet_list.get('subnets')
for sub in subnet_dat:
if sub.get('cidr') == subnet_addr:
return True
return False
except Exception as exc:
LOG.error("Failed to list subnet %(sub)s, Exc %(exc)s",
{'sub': subnet_addr, 'exc': str(exc)})
return False
|
Returns if a subnet is present.
|
async def set_loop(self, loop_value):
if loop_value not in ['on', 'off', 'shuffle']:
self.statuslog.error("Loop value must be `off`, `on`, or `shuffle`")
return
self.loop_type = loop_value
if self.loop_type == 'on':
self.statuslog.info("Looping on")
elif self.loop_type == 'off':
self.statuslog.info("Looping off")
elif self.loop_type == 'shuffle':
self.statuslog.info("Looping on and shuffling")
|
Updates the loop value, can be 'off', 'on', or 'shuffle
|
def render(self, row, style=None, adopt=True):
group = self._proc_group(style, adopt=adopt)
if group == "override":
proc_keys = ["width", "override"]
else:
proc_keys = None
adjusted = self._set_widths(row, group)
proc_fields = [self.fields[c](row[c], keys=proc_keys)
for c in self.columns]
return self.style["separator_"].join(proc_fields) + "\n", adjusted
|
Render fields with values from `row`.
Parameters
----------
row : dict
A normalized row.
style : dict, optional
A style that follows the schema defined in pyout.elements. If
None, `self.style` is used.
adopt : bool, optional
Merge `self.style` and `style`, using the latter's keys
when there are conflicts. If False, treat `style` as a
standalone style.
Returns
-------
A tuple with the rendered value (str) and a flag that indicates whether
the field widths required adjustment (bool).
|
def dec2hms(x):
if not np.isfinite(x):
return 'XX:XX:XX.XX'
if x < 0:
x += 360
x /= 15.0
h = int(x)
x = (x - h) * 60
m = int(x)
s = (x - m) * 60
return '{0:02d}:{1:02d}:{2:05.2f}'.format(h, m, s)
|
Convert decimal degrees into a sexagessimal string in hours.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format HH:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
|
def domains(db, domain=None, top=False):
doms = []
with db.connect() as session:
if domain:
dom = find_domain(session, domain)
if not dom:
if not top:
raise NotFoundError(entity=domain)
else:
add_dot = lambda d: '.' + d if not d.startswith('.') else d
d = add_dot(domain)
tops = session.query(Domain).\
filter(Domain.is_top_domain).order_by(Domain.domain).all()
doms = [t for t in tops
if d.endswith(add_dot(t.domain))]
if not doms:
raise NotFoundError(entity=domain)
else:
doms = [dom]
else:
query = session.query(Domain)
if top:
query = query.filter(Domain.is_top_domain)
doms = query.order_by(Domain.domain).all()
session.expunge_all()
return doms
|
List the domains available in the registry.
The function will return the list of domains. Settting the top flag,
it will look for those domains that are top domains. If domain parameter
is set, it will only return the information about that domain.
When both paramaters are set, it will first search for the given domain.
If it is not found, it will look for its top domains. In the case of
neither the domain exists nor has top domains, a 'NotFoundError' exception
will be raised.
:param db: database manager
:param domain: name of the domain
:param top: filter by top domains
:returns: a list of domains
:raises NotFoundError: raised when the given domain is not found in the
registry
|
def update(self, stats):
if not self.export_enable:
return False
all_stats = stats.getAllExportsAsDict(plugin_list=self.plugins_to_export())
all_limits = stats.getAllLimitsAsDict(plugin_list=self.plugins_to_export())
for plugin in self.plugins_to_export():
if isinstance(all_stats[plugin], dict):
all_stats[plugin].update(all_limits[plugin])
elif isinstance(all_stats[plugin], list):
for i in all_stats[plugin]:
i.update(all_limits[plugin])
else:
continue
export_names, export_values = self.__build_export(all_stats[plugin])
self.export(plugin, export_names, export_values)
return True
|
Update stats to a server.
The method builds two lists: names and values
and calls the export method to export the stats.
Note: this class can be overwrite (for example in CSV and Graph).
|
def subtract(self):
if self.moc is None:
raise CommandError('No MOC information present for subtraction')
filename = self.params.pop()
self.moc -= MOC(filename=filename)
|
Subtract the given MOC from the running MOC.
This command takes the name of a MOC file to be subtracted from the
running MOC.
::
pymoctool a.fits --subtract b.fits --output difference.fits
|
def issue_tags(issue):
labels = issue.get('labels', [])
return [label['name'].replace('tag: ', '') for label in labels if label['name'].startswith('tag: ')]
|
Returns list of tags for this issue.
|
def on_message(self, con, event):
msg_type = event.getType()
nick = event.getFrom().getResource()
from_jid = event.getFrom().getStripped()
body = event.getBody()
if msg_type == 'chat' and body is None:
return
logger.debug('msg_type[%s] from[%s] nick[%s] body[%s]' % (msg_type, from_jid, nick, body,))
sender = filter(lambda m: m['JID'] == from_jid, self.params['MEMBERS'])
should_process = msg_type in ['message', 'chat', None] and body is not None and len(sender) == 1
if not should_process: return
sender = sender[0]
try:
for p in self.command_patterns:
reg, cmd = p
m = reg.match(body)
if m:
logger.info('pattern matched for bot command \'%s\'' % (cmd,))
function = getattr(self, str(cmd), None)
if function:
return function(sender, body, m)
words = body.split(' ')
cmd, args = words[0], words[1:]
if cmd and cmd[0] == '/':
cmd = cmd[1:]
command_handler = getattr(self, 'do_'+cmd, None)
if command_handler:
return command_handler(sender, body, args)
broadcast_body = '[%s] %s' % (sender['NICK'], body,)
return self.broadcast(broadcast_body, exclude=(sender,))
except:
logger.exception('Error handling message [%s] from [%s]' % (body, sender['JID']))
|
Handles messge stanzas
|
def show_link(name):
if __grains__['os_family'] == 'RedHat':
path = '/var/lib/'
elif __grains__['os_family'] == 'Suse':
path = '/var/lib/rpm/'
else:
path = '/var/lib/dpkg/'
path += 'alternatives/{0}'.format(name)
try:
with salt.utils.files.fopen(path, 'rb') as r_file:
contents = salt.utils.stringutils.to_unicode(r_file.read())
return contents.splitlines(True)[1].rstrip('\n')
except OSError:
log.error('alternatives: %s does not exist', name)
except (IOError, IndexError) as exc:
log.error(
'alternatives: unable to get master link for %s. '
'Exception: %s', name, exc
)
return False
|
Display master link for the alternative
.. versionadded:: 2015.8.13,2016.3.4,2016.11.0
CLI Example:
.. code-block:: bash
salt '*' alternatives.show_link editor
|
def prune_by_ngram_size(self, minimum=None, maximum=None):
self._logger.info('Pruning results by n-gram size')
if minimum:
self._matches = self._matches[
self._matches[constants.SIZE_FIELDNAME] >= minimum]
if maximum:
self._matches = self._matches[
self._matches[constants.SIZE_FIELDNAME] <= maximum]
|
Removes results rows whose n-gram size is outside the
range specified by `minimum` and `maximum`.
:param minimum: minimum n-gram size
:type minimum: `int`
:param maximum: maximum n-gram size
:type maximum: `int`
|
def sizeHint(self, option, index):
if self._widget is None:
return super(WidgetDelegate, self).sizeHint(option, index)
self.set_widget_index(index)
self._widget.resize(option.rect.size())
sh = self._widget.sizeHint()
return sh
|
Return the appropriate amount for the size of the widget
The widget will always be expanded to at least the size of the viewport.
:param option: the options for painting
:type option: :class:`QtGui.QStyleOptionViewItem`
:param index: the index to paint
:type index: :class:`QtCore.QModelIndex`
:returns: None
:rtype: None
:raises: None
|
def get_books_for_schedule(self, schedule):
slns = self._get_slns(schedule)
books = {}
for sln in slns:
try:
section_books = self.get_books_by_quarter_sln(
schedule.term.quarter, sln
)
books[sln] = section_books
except DataFailureException:
pass
return books
|
Returns a dictionary of data. SLNs are the keys, an array of Book
objects are the values.
|
def from_inline(cls: Type[RevocationType], version: int, currency: str, inline: str) -> RevocationType:
cert_data = Revocation.re_inline.match(inline)
if cert_data is None:
raise MalformedDocumentError("Revokation")
pubkey = cert_data.group(1)
signature = cert_data.group(2)
return cls(version, currency, pubkey, signature)
|
Return Revocation document instance from inline string
Only self.pubkey is populated.
You must populate self.identity with an Identity instance to use raw/sign/signed_raw methods
:param version: Version number
:param currency: Name of the currency
:param inline: Inline document
:return:
|
def legal_date(year, month, day):
try:
assert year >= 1
assert 0 < month <= 14
assert 0 < day <= 28
if month == 14:
if isleap(year + YEAR_EPOCH - 1):
assert day <= 2
else:
assert day == 1
except AssertionError:
raise ValueError("Invalid Positivist date: ({}, {}, {})".format(year, month, day))
return True
|
Checks if a given date is a legal positivist date
|
def on(self, event, handler):
event_hook = self.get_or_create(event)
event_hook.subscribe(handler)
return self
|
Attaches the handler to the specified event.
@param event: event to attach the handler to. Any object can be passed
as event, but string is preferable. If qcore.EnumBase
instance is passed, its name is used as event key.
@param handler: event handler.
@return: self, so calls like this can be chained together.
|
def _realPath(self, newPathName: str = None) -> str:
directory = self._directory()
assert directory
return os.path.join(directory.path,
newPathName if newPathName else self._pathName)
|
Private Real Path
Get path name.
@param newPathName: variable for new path name if passed argument.
@type newPathName: String
@return: Path Name as string.
|
def _check_cores_output_sizes(self):
for core_sizes in zip(*tuple(_get_flat_core_sizes(self._cores))):
first_core_list = core_sizes[0][1:]
for i, core_list in enumerate(core_sizes[1:]):
if core_list[1:] != first_core_list:
raise ValueError("The outputs of the provided cores are not able "
"to be concatenated along the first feature "
"dimension. Core 0 has shape %s, whereas Core %d "
"has shape %s - these must only differ in the first "
"dimension" % (core_sizes[0], i + 1, core_list))
|
Checks the output_sizes of the cores of the DeepRNN module.
Raises:
ValueError: if the outputs of the cores cannot be concatenated along their
first dimension.
|
def _contiguous_slices(self):
k = j = None
for i in self._sorted():
if k is None:
k = j = i
if i - j > 1:
yield slice(k, j + 1, 1)
k = i
j = i
if k is not None:
yield slice(k, j + 1, 1)
|
Internal iterator over contiguous slices in RangeSet.
|
def _hashes_match(self, a, b):
if len(a) != len(b):
return False
diff = 0
if six.PY2:
a = bytearray(a)
b = bytearray(b)
for x, y in zip(a, b):
diff |= x ^ y
return not diff
|
Constant time comparison of bytes for py3, strings for py2
|
def date_suggestions():
days_of_week = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday"
}
dates = [
'today',
'tomorrow',
]
dow = datetime.date.today().weekday()
for i in range(dow + 2 % 7, dow + 7):
dates.append(days_of_week[i % 7])
dates += ["1w", "2w", "1m", "2m", "3m", "1y"]
return dates
|
Returns a list of relative date that is presented to the user as auto
complete suggestions.
|
def fix_parameters(self):
for W, b in zip(self.W_list, self.b_list):
W.fix()
b.fix()
|
Helper function that fixes all parameters
|
def get_grade_system_query_session(self):
if not self.supports_grade_system_query():
raise errors.Unimplemented()
return sessions.GradeSystemQuerySession(runtime=self._runtime)
|
Gets the ``OsidSession`` associated with the grade system query service.
return: (osid.grading.GradeSystemQuerySession) - a
``GradeSystemQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
|
def get_multi(keys):
if not keys:
return []
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_get_hook(key)
entities_data, entities = adapter.get_multi(keys), []
for key, entity_data in zip(keys, entities_data):
if entity_data is None:
entities.append(None)
continue
model = _known_models[key.kind]
entity = model._load(key, entity_data)
entities.append(entity)
entity.post_get_hook()
return entities
|
Get a set of entities from Datastore by their respective keys.
Note:
This uses the adapter that is tied to the first model in the
list. If the keys have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to get.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
Returns:
list[Model]: Entities that do not exist are going to be None
in the result list. The order of results matches the order
of the input keys.
|
def _validate_no_rels(param, rels):
if param.field in rels:
raise InvalidQueryParams(**{
'detail': 'The sort query param value of "%s" is not '
'supported. Sorting on relationships is not '
'currently supported' % param.raw_field,
'links': LINK,
'parameter': PARAM,
})
|
Ensure the sortable field is not on a relationship
|
def rsync(local_path, remote_path, exclude=None, extra_opts=None):
if not local_path.endswith('/'):
local_path += '/'
exclude = exclude or []
exclude.extend(['*.egg-info', '*.pyc', '.git', '.gitignore',
'.gitmodules', '/build/', '/dist/'])
with hide('running'):
run("mkdir -p '{}'".format(remote_path))
return rsync_project(
remote_path, local_path, delete=True,
extra_opts='-i --omit-dir-times -FF ' +
(extra_opts if extra_opts else ''),
ssh_opts='-o StrictHostKeyChecking=no',
exclude=exclude)
|
Helper to rsync submodules across
|
def load_json(filename, **kwargs):
with open(filename, 'r', encoding='utf-8') as f:
return json.load(f, **kwargs)
|
Load a JSON object from the specified file.
Args:
filename: Path to the input JSON file.
**kwargs: Additional arguments to `json.load`.
Returns:
The object deserialized from JSON.
|
def mapTrace(trace, net, delta, verbose=False):
result = []
paths = {}
if verbose:
print("mapping trace with %s points" % len(trace))
for pos in trace:
newPaths = {}
candidates = net.getNeighboringEdges(pos[0], pos[1], delta)
if len(candidates) == 0 and verbose:
print("Found no candidate edges for %s,%s" % pos)
for edge, d in candidates:
if paths:
minDist = 1e400
minPath = None
for path, dist in paths.iteritems():
if dist < minDist:
if edge == path[-1]:
minPath = path
minDist = dist
elif edge in path[-1].getOutgoing():
minPath = path + (edge,)
minDist = dist
else:
minPath = path + (edge,)
minDist = dist + euclidean(
path[-1].getToNode().getCoord(),
edge.getFromNode().getCoord())
if minPath:
newPaths[minPath] = minDist + d * d
else:
newPaths[(edge,)] = d * d
if not newPaths:
if paths:
result += [e.getID() for e in _getMinPath(paths)]
paths = newPaths
if paths:
return result + [e.getID() for e in _getMinPath(paths)]
return result
|
matching a list of 2D positions to consecutive edges in a network
|
def _signature(self, cmd):
result = {}
client = cmd.get('client', 'minion')
if client == 'minion':
cmd['fun'] = 'sys.argspec'
cmd['kwarg'] = dict(module=cmd['module'])
result = self.run(cmd)
elif client == 'master':
parts = cmd['module'].split('.')
client = parts[0]
module = '.'.join(parts[1:])
if client == 'wheel':
functions = self.wheelClient.functions
elif client == 'runner':
functions = self.runnerClient.functions
result = {'master': salt.utils.args.argspec_report(functions, module)}
return result
|
Expects everything that signature does and also a client type string.
client can either be master or minion.
|
def blit(
self,
console: tcod.console.Console,
x: float,
y: float,
bg_blend: int,
scale_x: float,
scale_y: float,
angle: float,
) -> None:
lib.TCOD_image_blit(
self.image_c,
_console(console),
x,
y,
bg_blend,
scale_x,
scale_y,
angle,
)
|
Blit onto a Console using scaling and rotation.
Args:
console (Console): Blit destination Console.
x (float): Console X position for the center of the Image blit.
y (float): Console Y position for the center of the Image blit.
The Image blit is centered on this position.
bg_blend (int): Background blending mode to use.
scale_x (float): Scaling along Image x axis.
Set to 1 for no scaling. Must be over 0.
scale_y (float): Scaling along Image y axis.
Set to 1 for no scaling. Must be over 0.
angle (float): Rotation angle in radians. (Clockwise?)
|
def show_fit(w_fit, C_fit, r_fit, Xs):
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.scatter([X[0] for X in Xs], [X[1] for X in Xs], [X[2] for X in Xs])
theta = np.arccos(np.dot(w_fit, np.array([0, 0, 1])))
phi = np.arctan2(w_fit[1], w_fit[0])
M = np.dot(rotation_matrix_from_axis_and_angle(np.array([0, 0, 1]), phi),
rotation_matrix_from_axis_and_angle(np.array([0, 1, 0]), theta))
delta = np.linspace(-np.pi, np.pi, 20)
z = np.linspace(-10, 10, 20)
Delta, Z = np.meshgrid(delta, z)
X = r_fit * np.cos(Delta)
Y = r_fit * np.sin(Delta)
for i in range(len(X)):
for j in range(len(X[i])):
p = np.dot(M, np.array([X[i][j], Y[i][j], Z[i][j]])) + C_fit
X[i][j] = p[0]
Y[i][j] = p[1]
Z[i][j] = p[2]
ax.plot_surface(X, Y, Z, alpha=0.2)
ax.quiver(C_fit[0], C_fit[1], C_fit[2],
r_fit * w_fit[0], r_fit * w_fit[1], r_fit * w_fit[2], color='red')
plt.show()
|
Plot the fitting given the fitted axis direction, the fitted
center, the fitted radius and the data points.
|
def _init():
connection.connect()
ready_data = utils.encode_data('host:track-devices')
connection.adb_socket.send(ready_data)
status = connection.adb_socket.recv(4)
if status != b'OKAY':
raise RuntimeError('adb server return "{}", not OKAY'.format(str(status)))
|
build connection and init it
|
def predict_proba(self, time):
check_is_fitted(self, "unique_time_")
time = check_array(time, ensure_2d=False)
extends = time > self.unique_time_[-1]
if self.prob_[-1] > 0 and extends.any():
raise ValueError("time must be smaller than largest "
"observed time point: {}".format(self.unique_time_[-1]))
Shat = numpy.empty(time.shape, dtype=float)
Shat[extends] = 0.0
valid = ~extends
time = time[valid]
idx = numpy.searchsorted(self.unique_time_, time)
eps = numpy.finfo(self.unique_time_.dtype).eps
exact = numpy.absolute(self.unique_time_[idx] - time) < eps
idx[~exact] -= 1
Shat[valid] = self.prob_[idx]
return Shat
|
Return probability of an event after given time point.
:math:`\\hat{S}(t) = P(T > t)`
Parameters
----------
time : array, shape = (n_samples,)
Time to estimate probability at.
Returns
-------
prob : array, shape = (n_samples,)
Probability of an event.
|
def time_stats(self, **kwargs):
if 'time_stats' in self.attributes:
return self.attributes['time_stats']
path = '%s/%s/time_stats' % (self.manager.path, self.get_id())
return self.manager.gitlab.http_get(path, **kwargs)
|
Get time stats for the object.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabTimeTrackingError: If the time tracking update cannot be done
|
def formatargvalues(args, varargs, varkw, locals,
formatarg=str,
formatvarargs=lambda name: '*' + name,
formatvarkw=lambda name: '**' + name,
formatvalue=lambda value: '=' + repr(value),
join=joinseq):
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
specs = []
for i in range(len(args)):
specs.append(strseq(args[i], convert, join))
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
return '(' + string.join(specs, ', ') + ')'
|
Format an argument spec from the 4 values returned by getargvalues.
The first four arguments are (args, varargs, varkw, locals). The
next four arguments are the corresponding optional formatting functions
that are called to turn names and values into strings. The ninth
argument is an optional function to format the sequence of arguments.
|
def _get_cursor(self):
_options = self._get_options()
conn = psycopg2.connect(host=_options['host'],
user=_options['user'],
password=_options['pass'],
dbname=_options['db'],
port=_options['port'])
cursor = conn.cursor()
try:
yield cursor
log.debug('Connected to POSTGRES DB')
except psycopg2.DatabaseError as err:
log.exception('Error in ext_pillar POSTGRES: %s', err.args)
finally:
conn.close()
|
Yield a POSTGRES cursor
|
def mmi_to_delimited_file(self, force_flag=True):
LOGGER.debug('mmi_to_delimited_text requested.')
csv_path = os.path.join(
self.output_dir, 'mmi.csv')
if os.path.exists(csv_path) and force_flag is not True:
return csv_path
csv_file = open(csv_path, 'w')
csv_file.write(self.mmi_to_delimited_text())
csv_file.close()
csvt_path = os.path.join(
self.output_dir, self.output_basename + '.csvt')
csvt_file = open(csvt_path, 'w')
csvt_file.write('"Real","Real","Real"')
csvt_file.close()
return csv_path
|
Save mmi_data to delimited text file suitable for gdal_grid.
The output file will be of the same format as strings returned from
:func:`mmi_to_delimited_text`.
:param force_flag: Whether to force the regeneration of the output
file. Defaults to False.
:type force_flag: bool
:returns: The absolute file system path to the delimited text file.
:rtype: str
.. note:: An accompanying .csvt will be created which gdal uses to
determine field types. The csvt will contain the following string:
"Real","Real","Real". These types will be used in other conversion
operations. For example to convert the csv to a shp you would do::
ogr2ogr -select mmi -a_srs EPSG:4326 mmi.shp mmi.vrt mmi
|
def allow_pgcodes(cr, *codes):
try:
with cr.savepoint():
with core.tools.mute_logger('odoo.sql_db'):
yield
except (ProgrammingError, IntegrityError) as error:
msg = "Code: {code}. Class: {class_}. Error: {error}.".format(
code=error.pgcode,
class_=errorcodes.lookup(error.pgcode[:2]),
error=errorcodes.lookup(error.pgcode))
if error.pgcode in codes or error.pgcode[:2] in codes:
logger.info(msg)
else:
logger.exception(msg)
raise
|
Context manager that will omit specified error codes.
E.g., suppose you expect a migration to produce unique constraint
violations and you want to ignore them. Then you could just do::
with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION):
cr.execute("INSERT INTO me (name) SELECT name FROM you")
.. warning::
**All** sentences inside this context will be rolled back if **a single
error** is raised, so the above example would insert **nothing** if a
single row violates a unique constraint.
This would ignore duplicate files but insert the others::
cr.execute("SELECT name FROM you")
for row in cr.fetchall():
with allow_pgcodes(cr, psycopg2.errorcodes.UNIQUE_VIOLATION):
cr.execute("INSERT INTO me (name) VALUES (%s)", row[0])
:param *str codes:
Undefined amount of error codes found in :mod:`psycopg2.errorcodes`
that are allowed. Codes can have either 2 characters (indicating an
error class) or 5 (indicating a concrete error). Any other errors
will be raised.
|
def running_covar(xx=True, xy=False, yy=False, remove_mean=False, symmetrize=False, sparse_mode='auto',
modify_data=False, column_selection=None, diag_only=False, nsave=5):
return RunningCovar(compute_XX=xx, compute_XY=xy, compute_YY=yy, sparse_mode=sparse_mode, modify_data=modify_data,
remove_mean=remove_mean, symmetrize=symmetrize, column_selection=column_selection,
diag_only=diag_only, nsave=nsave)
|
Returns a running covariance estimator
Returns an estimator object that can be fed chunks of X and Y data, and
that can generate on-the-fly estimates of mean, covariance, running sum
and second moment matrix.
Parameters
----------
xx : bool
Estimate the covariance of X
xy : bool
Estimate the cross-covariance of X and Y
yy : bool
Estimate the covariance of Y
remove_mean : bool
Remove the data mean in the covariance estimation
symmetrize : bool
Use symmetric estimates with sum defined by sum_t x_t + y_t and
second moment matrices defined by X'X + Y'Y and Y'X + X'Y.
modify_data : bool
If remove_mean=True, the mean will be removed in the input data,
without creating an independent copy. This option is faster but should
only be selected if the input data is not used elsewhere.
sparse_mode : str
one of:
* 'dense' : always use dense mode
* 'sparse' : always use sparse mode if possible
* 'auto' : automatic
column_selection: ndarray(k, dtype=int) or None
Indices of those columns that are to be computed. If None, all columns are computed.
diag_only: bool
If True, the computation is restricted to the diagonal entries (autocorrelations) only.
nsave : int
Depth of Moment storage. Moments computed from each chunk will be
combined with Moments of similar statistical weight using the pairwise
combination algorithm described in [1]_.
References
----------
.. [1] http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
|
def set_wts_get_npred_wt(gta, maskname):
if is_null(maskname):
maskname = None
gta.set_weights_map(maskname)
for name in gta.like.sourceNames():
gta._init_source(name)
gta._update_roi()
return build_srcdict(gta, 'npred_wt')
|
Set a weights file and get the weighted npred for all the sources
Parameters
----------
gta : `fermipy.GTAnalysis`
The analysis object
maskname : str
The path to the file with the mask
Returns
-------
odict : dict
Dictionary mapping from source name to weighted npred
|
def money_flow(close_data, high_data, low_data, volume):
catch_errors.check_for_input_len_diff(
close_data, high_data, low_data, volume
)
mf = volume * tp(close_data, high_data, low_data)
return mf
|
Money Flow.
Formula:
MF = VOLUME * TYPICAL PRICE
|
def send_webapi(self, text, attachments=None, as_user=True, thread_ts=None):
self._client.send_message(
self._body['channel'],
text,
attachments=attachments,
as_user=as_user,
thread_ts=thread_ts)
|
Send a reply using Web API
(This function supports formatted message
when using a bot integration)
|
def call(self):
headers, data = self.prepare()
if _LOG.isEnabledFor(logging.DEBUG):
_LOG.debug("Sending %s, %s", headers, prettify(data))
response = requests.post(
self.endpoint,
headers=headers,
data=data.encode('utf-8'),
**self.request_args
)
_LOG.debug("Received %s, %s", response.headers, response.text)
status = response.status_code
if status == 200:
tree = XML.fromstring(response.content)
body = tree.find(
"{http://schemas.xmlsoap.org/soap/envelope/}Body")[0]
return body
elif status == 500:
tree = XML.fromstring(response.content)
fault = tree.find(
'.//{http://schemas.xmlsoap.org/soap/envelope/}Fault'
)
if fault is None:
response.raise_for_status()
faultcode = fault.findtext("faultcode")
faultstring = fault.findtext("faultstring")
faultdetail = fault.find("detail")
raise SoapFault(faultcode, faultstring, faultdetail)
else:
response.raise_for_status()
return None
|
Call the SOAP method on the server.
Returns:
str: the decapusulated SOAP response from the server,
still encoded as utf-8.
Raises:
SoapFault: if a SOAP error occurs.
~requests.exceptions.HTTPError: if an http error occurs.
|
def find_json(raw):
ret = {}
lines = __split(raw)
for ind, _ in enumerate(lines):
try:
working = '\n'.join(lines[ind:])
except UnicodeDecodeError:
working = '\n'.join(salt.utils.data.decode(lines[ind:]))
try:
ret = json.loads(working)
except ValueError:
continue
if ret:
return ret
if not ret:
raise ValueError
|
Pass in a raw string and load the json when it starts. This allows for a
string to start with garbage and end with json but be cleanly loaded
|
def delete_releasefile(self, release):
fp = release._releasefile.get_fullpath()
log.info("Deleting release file %s", fp)
delete_file(release._releasefile)
return ActionStatus(ActionStatus.SUCCESS,
msg="Deleted %s" % fp)
|
Delete the releasefile of the given release
This is inteded to be used in a action unit.
:param release: the release with the releasefile
:type release: :class:`Release`
:returns: an action status
:rtype: :class:`ActionStatus`
:raises: None
|
def smoothed(self, iterations=1):
copy = self.Clone(shallow=True)
copy.Smooth(iterations)
return copy
|
Return a smoothed copy of this histogram
Parameters
----------
iterations : int, optional (default=1)
The number of smoothing iterations
Returns
-------
hist : asrootpy'd histogram
The smoothed histogram
|
def dot_v3(v, w):
return sum([x * y for x, y in zip(v, w)])
|
Return the dotproduct of two vectors.
|
def _darwin_current_arch(self):
if sys.platform == "darwin":
if sys.maxsize > 2 ** 32:
return platform.mac_ver()[2]
else:
return platform.processor()
|
Add Mac OS X support.
|
def exit(self, pub_id, *node_ids):
try:
pub = self['pubs'][pub_id]
except KeyError:
raise ValueError('Pub {} is not available'.format(pub_id))
for node_id in node_ids:
node = self.get_agent(node_id)
if pub_id == node['pub']:
del node['pub']
pub['occupancy'] -= 1
|
Agents will notify the pub they want to leave
|
def add_germline_variants(self, germline_nucs, coding_pos):
if len(germline_nucs) != len(coding_pos):
raise ValueError('Each germline nucleotide should have a coding position')
es = list(self.exon_seq)
for i in range(len(germline_nucs)):
gl_nuc, cpos = germline_nucs[i].upper(), coding_pos[i]
if not utils.is_valid_nuc(gl_nuc):
raise ValueError('{0} is not a valid nucleotide'.format(gl_nuc))
if cpos >= 0:
es[cpos] = gl_nuc
self.exon_seq = ''.join(es)
|
Add potential germline variants into the nucleotide sequence.
Sequenced individuals may potentially have a SNP at a somatic mutation position.
Therefore they may differ from the reference genome. This method updates the gene
germline gene sequence to match the actual individual.
Parameters
----------
germline_nucs : list of str
list of DNA nucleotides containing the germline letter
coding_pos : int
0-based nucleotide position in coding sequence
NOTE: the self.exon_seq attribute is updated, no return value
|
def _update_config(args, update_fn, allow_missing=False):
new_i = None
for i, arg in enumerate(args):
if (is_std_config_arg(arg) or is_nested_config_arg(arg) or
(isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]))):
new_i = i
break
if new_i is None:
if allow_missing:
return args
else:
raise ValueError("Could not find configuration in args: %s" % str(args))
new_arg = args[new_i]
if is_nested_config_arg(new_arg):
new_arg["config"] = update_fn(copy.deepcopy(new_arg["config"]))
elif is_std_config_arg(new_arg):
new_arg = update_fn(copy.deepcopy(new_arg))
elif isinstance(arg, (list, tuple)) and is_nested_config_arg(new_arg[0]):
new_arg_first = new_arg[0]
new_arg_first["config"] = update_fn(copy.deepcopy(new_arg_first["config"]))
new_arg = [new_arg_first] + new_arg[1:]
else:
raise ValueError("Unexpected configuration dictionary: %s" % new_arg)
args = list(args)[:]
args[new_i] = new_arg
return args
|
Update configuration, nested in argument list, with the provided update function.
|
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(cookiejar.MISSING_FILENAME_TEXT)
go_cookies = []
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
go_cookies.append(py_to_go_cookie(cookie))
with open(filename, "w") as f:
f.write(json.dumps(go_cookies))
|
Implement the FileCookieJar abstract method.
|
def getISOBufferList(self):
transfer_p = self.__transfer
transfer = transfer_p.contents
if transfer.type != TRANSFER_TYPE_ISOCHRONOUS:
raise TypeError(
'This method cannot be called on non-iso transfers.'
)
return libusb1.get_iso_packet_buffer_list(transfer_p)
|
Get individual ISO transfer's buffer.
Returns a list with one item per ISO transfer, with their
individually-configured sizes.
Returned list is consistent with getISOSetupList return value.
Should not be called on a submitted transfer.
See also iterISO.
|
def mi_chain_rule(X, y):
chain = np.zeros(len(X))
chain[0] = mi(X[0], y)
for i in range(1, len(X)):
chain[i] = cond_mi(X[i], y, X[:i])
return chain
|
Decompose the information between all X and y according to the chain rule and return all the terms in the chain rule.
Inputs:
-------
X: iterable of iterables. You should be able to compute [mi(x, y) for x in X]
y: iterable of symbols
output:
-------
ndarray: terms of chaing rule
Implemenation notes:
I(X; y) = I(x0, x1, ..., xn; y)
= I(x0; y) + I(x1;y | x0) + I(x2; y | x0, x1) + ... + I(xn; y | x0, x1, ..., xn-1)
|
def calculate_clock_angle(inst):
clock_angle = np.degrees(np.arctan2(inst['BY_GSM'], inst['BZ_GSM']))
clock_angle[clock_angle < 0.0] += 360.0
inst['clock_angle'] = pds.Series(clock_angle, index=inst.data.index)
inst['BYZ_GSM'] = pds.Series(np.sqrt(inst['BY_GSM']**2 +
inst['BZ_GSM']**2),
index=inst.data.index)
return
|
Calculate IMF clock angle and magnitude of IMF in GSM Y-Z plane
Parameters
-----------
inst : pysat.Instrument
Instrument with OMNI HRO data
|
def set_load_resistance(self, resistance):
new_val = int(round(resistance * 100))
if not 0 <= new_val <= 50000:
raise ValueError("Load Resistance should be between 0-500 ohms")
self._load_mode = self.SET_TYPE_RESISTANCE
self._load_value = new_val
self.__set_parameters()
|
Changes load to resistance mode and sets resistance value.
Rounds to nearest 0.01 Ohms
:param resistance: Load Resistance in Ohms (0-500 ohms)
:return: None
|
def union(self, other):
if isinstance(other, self.__class__):
return self.client.sunion([self.name, other.name])
else:
return self._as_set().union(other)
|
Return the union of sets as a new set.
(i.e. all elements that are in either set.)
Operates on either redish.types.Set or __builtins__.set.
|
def _read24(self, register):
ret = 0.0
for b in self._read_register(register, 3):
ret *= 256.0
ret += float(b & 0xFF)
return ret
|
Read an unsigned 24-bit value as a floating point and return it.
|
def _graph_add_edge(self, src_block_id, dst_block_id, **kwargs):
dst_node = self._graph_get_node(dst_block_id, terminator_for_nonexistent_node=True)
if src_block_id is None:
self.graph.add_node(dst_node)
else:
src_node = self._graph_get_node(src_block_id, terminator_for_nonexistent_node=True)
self.graph.add_edge(src_node, dst_node, **kwargs)
|
Add an edge onto the graph.
:param BlockID src_block_id: The block ID for source node.
:param BlockID dst_block_id: The block Id for destination node.
:param str jumpkind: The jumpkind of the edge.
:param exit_stmt_idx: ID of the statement in the source IRSB where this edge is created from. 'default'
refers to the default exit.
:return: None
|
def md5_of_file(abspath):
chunk_size = 1024 * 1024
m = hashlib.md5()
with open(abspath, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
m.update(data)
return m.hexdigest()
|
Md5 value of a file.
|
def aggregations(self):
prev_month_start = get_prev_month(self.end, self.query.interval_)
self.query.since(prev_month_start)
agg = super().aggregations()
if agg is None:
agg = 0
return agg
|
Get the single valued aggregations with respect to the
previous time interval.
|
def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30 * 44100, hop=15 * 44100,
compute_permutation=False):
(sdr, isr, sir, sar, perm) = \
bss_eval(
reference_sources, estimated_sources,
window=window, hop=hop,
compute_permutation=compute_permutation, filters_len=512,
framewise_filters=True,
bsseval_sources_version=True)
return (sdr, sir, sar, perm)
|
BSS Eval v3 bss_eval_sources_framewise
Wrapper to ``bss_eval`` with the right parameters.
The call to this function is not recommended. See the description for the
``bsseval_sources`` parameter of ``bss_eval``.
|
def getSkeletalTrackingLevel(self, action):
fn = self.function_table.getSkeletalTrackingLevel
pSkeletalTrackingLevel = EVRSkeletalTrackingLevel()
result = fn(action, byref(pSkeletalTrackingLevel))
return result, pSkeletalTrackingLevel
|
Reads the level of accuracy to which the controller is able to track the user to recreate a skeletal pose
|
def parse_hstring(hs):
name, value, comment = yield_three(
[val.strip().strip("'") for val in filter(None, re.split("[=/]+", hs))]
)
try:
len(comment)
except:
pass
else:
comment = '/'.join(comment)
return name, value, comment
|
Parse a single item from the telescope server into name, value, comment.
|
def _get_offset_day(self, other):
mstart = datetime(other.year, other.month, 1)
wday = mstart.weekday()
shift_days = (self.weekday - wday) % 7
return 1 + shift_days + self.week * 7
|
Find the day in the same month as other that has the same
weekday as self.weekday and is the self.week'th such day in the month.
Parameters
----------
other : datetime
Returns
-------
day : int
|
def execute_pending_service_agreements(storage_path, account, actor_type, did_resolver_fn):
keeper = Keeper.get_instance()
for (agreement_id, did, _,
price, files, start_time, _) in get_service_agreements(storage_path):
ddo = did_resolver_fn(did)
for service in ddo.services:
if service.type != 'Access':
continue
consumer_provider_tuple = keeper.escrow_access_secretstore_template.get_agreement_data(
agreement_id)
if not consumer_provider_tuple:
continue
consumer, provider = consumer_provider_tuple
did = ddo.did
service_agreement = ServiceAgreement.from_service_dict(service.as_dictionary())
condition_ids = service_agreement.generate_agreement_condition_ids(
agreement_id, did, consumer, provider, keeper)
if actor_type == 'consumer':
assert account.address == consumer
process_agreement_events_consumer(
provider, agreement_id, did, service_agreement,
price, account, condition_ids, None)
else:
assert account.address == provider
process_agreement_events_publisher(
account, agreement_id, did, service_agreement,
price, consumer, condition_ids)
|
Iterates over pending service agreements recorded in the local storage,
fetches their service definitions, and subscribes to service agreement events.
:param storage_path: storage path for the internal db, str
:param account:
:param actor_type:
:param did_resolver_fn:
:return:
|
def line( loc, strg ):
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
|
Returns the line of text containing loc within a string, counting newlines as line separators.
|
def from_sqlite(cls, database_path, base_url, version='auto', client_id='ghost-admin'):
import os
import sqlite3
fd = os.open(database_path, os.O_RDONLY)
connection = sqlite3.connect('/dev/fd/%d' % fd)
os.close(fd)
try:
row = connection.execute(
'SELECT secret FROM clients WHERE slug = ?',
(client_id,)
).fetchone()
if row:
return cls(
base_url, version=version,
client_id=client_id, client_secret=row[0]
)
else:
raise GhostException(401, [{
'errorType': 'InternalError',
'message': 'No client_secret found for client_id: %s' % client_id
}])
finally:
connection.close()
|
Initialize a new Ghost API client,
reading the client ID and secret from the SQlite database.
:param database_path: The path to the database file.
:param base_url: The base url of the server
:param version: The server version to use (default: `auto`)
:param client_id: The client ID to look for in the database
:return: A new Ghost API client instance
|
def check_response(response):
if response.status_code < 200 or response.status_code > 300:
raise ServerError('API requests returned with error: %s'
% response.status_code)
try:
response_text = loads(response.text)
except ValueError:
raise ServerError('The API did not returned a JSON string.')
if not response_text:
raise EmptyResponse()
if 'failure' in response_text:
if response_text['failure'] == 'Falscher Dateityp':
raise UnsupportedFormat('Please look at picflash.org '
'witch formats are supported')
else:
raise UnkownError(response_text['failure'])
|
checks the response if the server returned an error raises an exception.
|
def load(self, name):
name = ctypes.util.find_library(name)
return ctypes.cdll.LoadLibrary(name)
|
Loads and returns foreign library.
|
def make_unpublished(self, request, queryset):
rows_updated = queryset.update(is_published=False)
self.message_user(request,
ungettext('%(count)d newsitem was unpublished',
'%(count)d newsitems were unpublished',
rows_updated) % {'count': rows_updated})
|
Marks selected news items as unpublished
|
def _substract_hemispheres(active, reference, active_sigma, reference_sigma, voxel_spacing):
active_kernel = _create_structure_array(active_sigma, voxel_spacing)
active_smoothed = gaussian_filter(active, sigma = active_kernel)
reference_kernel = _create_structure_array(reference_sigma, voxel_spacing)
reference_smoothed = gaussian_filter(reference, sigma = reference_kernel)
return active_smoothed - reference_smoothed
|
Helper function for `_extract_hemispheric_difference`.
Smoothes both images and then substracts the reference from the active image.
|
def reservoirLink(lines):
KEYWORDS = ('LINK',
'RESERVOIR',
'RES_MINWSE',
'RES_INITWSE',
'RES_MAXWSE',
'RES_NUMPTS',
'LAKE',
'MINWSE',
'INITWSE',
'MAXWSE',
'NUMPTS')
result = {'header': {'link': None,
'res_minwse': None,
'res_initwse': None,
'res_maxwse': None,
'res_numpts': None,
'minwse': None,
'initwse': None,
'maxwse': None,
'numpts': None},
'type': None,
'points': []}
pair = {'i': None,
'j': None}
chunks = pt.chunk(KEYWORDS, lines)
for key, chunkList in iteritems(chunks):
for chunk in chunkList:
schunk = chunk[0].strip().split()
if key in ('NUMPTS', 'RES_NUMPTS'):
result['header'][key.lower()] = schunk[1]
for idx in range(1, len(chunk)):
schunk = chunk[idx].strip().split()
for count, ordinate in enumerate(schunk):
if (count % 2) == 0:
pair['i'] = ordinate
else:
pair['j'] = ordinate
result['points'].append(pair)
pair = {'i': None,
'j': None}
elif key in ('LAKE', 'RESERVOIR'):
result['type'] = schunk[0]
else:
result['header'][key.lower()] = schunk[1]
return result
|
Parse RESERVOIR Link Method
|
def color_palette(palette=None, n_colors=None):
if palette is None:
palette = get_color_cycle()
if n_colors is None:
n_colors = len(palette)
elif not isinstance(palette, str):
if n_colors is None:
n_colors = len(palette)
else:
if palette.lower() not in PALETTES:
raise YellowbrickValueError(
"'{}' is not a recognized palette!".format(palette)
)
palette = PALETTES[palette.lower()]
if n_colors is None:
n_colors = len(palette)
pal_cycle = cycle(palette)
palette = [next(pal_cycle) for _ in range(n_colors)]
try:
palette = map(mpl.colors.colorConverter.to_rgb, palette)
palette = ColorPalette(palette)
except ValueError:
raise YellowbrickValueError(
"Could not generate a palette for %s" % str(palette)
)
return palette
|
Return a color palette object with color definition and handling.
Calling this function with ``palette=None`` will return the current
matplotlib color cycle.
This function can also be used in a ``with`` statement to temporarily
set the color cycle for a plot or set of plots.
Parameters
----------
palette : None or str or sequence
Name of a palette or ``None`` to return the current palette. If a
sequence the input colors are used but possibly cycled.
Available palette names from :py:mod:`yellowbrick.colors.palettes` are:
.. hlist::
:columns: 3
* :py:const:`accent`
* :py:const:`dark`
* :py:const:`paired`
* :py:const:`pastel`
* :py:const:`bold`
* :py:const:`muted`
* :py:const:`colorblind`
* :py:const:`sns_colorblind`
* :py:const:`sns_deep`
* :py:const:`sns_muted`
* :py:const:`sns_pastel`
* :py:const:`sns_bright`
* :py:const:`sns_dark`
* :py:const:`flatui`
* :py:const:`neural_paint`
n_colors : None or int
Number of colors in the palette. If ``None``, the default will depend
on how ``palette`` is specified. Named palettes default to 6 colors
which allow the use of the names "bgrmyck", though others do have more
or less colors; therefore reducing the size of the list can only be
done by specifying this parameter. Asking for more colors than exist
in the palette will cause it to cycle.
Returns
-------
list(tuple)
Returns a ColorPalette object, which behaves like a list, but can be
used as a context manager and possesses functions to convert colors.
.. seealso::
:func:`.set_palette`
Set the default color cycle for all plots.
:func:`.set_color_codes`
Reassign color codes like ``"b"``, ``"g"``, etc. to
colors from one of the yellowbrick palettes.
:func:`..colors.resolve_colors`
Resolve a color map or listed sequence of colors.
|
def generate_unique_name(name_prefix, reservation_id=None):
if reservation_id and isinstance(reservation_id, str) and len(reservation_id) >= 4:
unique_id = str(uuid.uuid4())[:4] + "-" + reservation_id[-4:]
else:
unique_id = str(uuid.uuid4())[:8]
return name_prefix + "_" + unique_id
|
Generate a unique name.
Method generate a guid and adds the first 8 characteres of the new guid to 'name_prefix'.
If reservation id is provided than the first 4 chars of the generated guid are taken and the last 4
of the reservation id
|
def console_print_rect_ex(
con: tcod.console.Console,
x: int,
y: int,
w: int,
h: int,
flag: int,
alignment: int,
fmt: str,
) -> int:
return int(
lib.TCOD_console_printf_rect_ex(
_console(con), x, y, w, h, flag, alignment, _fmt(fmt)
)
)
|
Print a string constrained to a rectangle with blend and alignment.
Returns:
int: The number of lines of text once word-wrapped.
.. deprecated:: 8.5
Use :any:`Console.print_rect` instead.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.