code
stringlengths 52
7.75k
| docs
stringlengths 1
5.85k
|
---|---|
def _ranges_key(r, delta_indices):
idx = r.index_symbol
if idx in delta_indices:
return (r.index_symbol.primed, r.index_symbol.name)
else:
# ranges that are not in delta_indices should remain in the original
# order
return (0, ' ') | Sorting key for ranges.
When used with ``reverse=True``, this can be used to sort index ranges into
the order we would prefer to eliminate them by evaluating KroneckerDeltas:
First, eliminate primed indices, then indices names higher in the alphabet. |
def _factors_for_expand_delta(expr):
from qnet.algebra.core.scalar_algebra import ScalarValue
from qnet.algebra.core.abstract_quantum_algebra import (
ScalarTimesQuantumExpression)
if isinstance(expr, ScalarTimesQuantumExpression):
yield from _factors_for_expand_delta(expr.coeff)
yield expr.term
elif isinstance(expr, ScalarValue):
yield from _factors_for_expand_delta(expr.val)
elif isinstance(expr, sympy.Basic) and expr.is_Mul:
yield from expr.args
else:
yield expr | Yield factors from expr, mixing sympy and QNET
Auxiliary routine for :func:`_expand_delta`. |
def _expand_delta(expr, idx):
found_first_delta = False
summands = None
for factor in _factors_for_expand_delta(expr):
need_to_expand = False
if not found_first_delta and isinstance(factor, sympy.Basic):
if factor.is_Add and _has_simple_delta(factor, idx):
need_to_expand = True
if need_to_expand:
found_first_delta = True
if summands is None:
summands = list(factor.args)
else:
summands = [summands[0]*t for t in factor.args]
else:
if summands is None:
summands = [factor, ]
else:
summands = [t*factor for t in summands]
return summands | Expand the first :class:`sympy.Add` containing a simple
:class:`sympy.KroneckerDelta`.
Auxiliary routine for :func:`_deltasummation`. Adapted from SymPy. The
input `expr` may be a :class:`.QuantumExpression` or a
`:class:`sympy.Basic` instance.
Returns a list of summands. The elements of the list may be
:class:`.QuantumExpression` or a `:class:`sympy.Basic` instances. There is
no guarantee of type stability: an input :class:`.QuantumExpression` may
result in a :class:`sympy.Basic` instance in the `summands`. |
def _split_sympy_quantum_factor(expr):
from qnet.algebra.core.abstract_quantum_algebra import (
QuantumExpression, ScalarTimesQuantumExpression)
from qnet.algebra.core.scalar_algebra import ScalarValue, ScalarTimes, One
if isinstance(expr, ScalarTimesQuantumExpression):
sympy_factor, quantum_factor = _split_sympy_quantum_factor(expr.coeff)
quantum_factor *= expr.term
elif isinstance(expr, ScalarValue):
sympy_factor = expr.val
quantum_factor = expr._one
elif isinstance(expr, ScalarTimes):
sympy_factor = sympy.S(1)
quantum_factor = expr._one
for op in expr.operands:
op_sympy, op_quantum = _split_sympy_quantum_factor(op)
sympy_factor *= op_sympy
quantum_factor *= op_quantum
elif isinstance(expr, sympy.Basic):
sympy_factor = expr
quantum_factor = One
else:
sympy_factor = sympy.S(1)
quantum_factor = expr
assert isinstance(sympy_factor, sympy.Basic)
assert isinstance(quantum_factor, QuantumExpression)
return sympy_factor, quantum_factor | Split a product into sympy and qnet factors
This is a helper routine for applying some sympy transformation on an
arbitrary product-like expression in QNET. The idea is this::
expr -> sympy_factor, quantum_factor
sympy_factor -> sympy_function(sympy_factor)
expr -> sympy_factor * quantum_factor |
def _extract_delta(expr, idx):
from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression
from qnet.algebra.core.scalar_algebra import ScalarValue
sympy_factor, quantum_factor = _split_sympy_quantum_factor(expr)
delta, new_expr = _sympy_extract_delta(sympy_factor, idx)
if delta is None:
new_expr = expr
else:
new_expr = new_expr * quantum_factor
if isinstance(new_expr, ScalarValue._val_types):
new_expr = ScalarValue.create(new_expr)
assert isinstance(new_expr, QuantumExpression)
return delta, new_expr | Extract a "simple" Kronecker delta containing `idx` from `expr`.
Assuming `expr` can be written as the product of a Kronecker Delta and a
`new_expr`, return a tuple of the sympy.KroneckerDelta instance and
`new_expr`. Otherwise, return a tuple of None and the original `expr`
(possibly converted to a :class:`.QuantumExpression`).
On input, `expr` can be a :class:`QuantumExpression` or a
:class:`sympy.Basic` object. On output, `new_expr` is guaranteed to be a
:class:`QuantumExpression`. |
def derivative_via_diff(cls, ops, kwargs):
assert len(ops) == 1
op = ops[0]
derivs = kwargs['derivs']
vals = kwargs['vals']
# both `derivs` and `vals` are guaranteed to be tuples, via the conversion
# that's happening in `QuantumDerivative.create`
for (sym, n) in derivs:
if sym.free_symbols.issubset(op.free_symbols):
for k in range(n):
op = op._diff(sym)
else:
return op.__class__._zero
if vals is not None:
try:
# for QuantumDerivative instance
return op.evaluate_at(vals)
except AttributeError:
# for explicit Expression
return op.substitute(vals)
else:
return op | Implementation of the :meth:`QuantumDerivative.create` interface via the
use of :meth:`QuantumExpression._diff`.
Thus, by having :meth:`.QuantumExpression.diff` delegate to
:meth:`.QuantumDerivative.create`, instead of
:meth:`.QuantumExpression._diff` directly, we get automatic caching of
derivatives |
def word_groups_for_language(language_code):
if language_code not in LANGUAGE_CODES:
message = '{} is not an available language code'.format(language_code)
raise InvalidLanguageCodeException(message)
return MATH_WORDS[language_code] | Return the math word groups for a language code.
The language_code should be an ISO 639-2 language code.
https://www.loc.gov/standards/iso639-2/php/code_list.php |
def words_for_language(language_code):
word_groups = word_groups_for_language(language_code)
words = []
for group in word_groups:
words.extend(word_groups[group].keys())
return words | Return the math words for a language code.
The language_code should be an ISO 639-2 language code.
https://www.loc.gov/standards/iso639-2/php/code_list.php |
def invert_permutation(permutation):
return tuple([permutation.index(p) for p in range(len(permutation))]) | Compute the image tuple of the inverse permutation.
:param permutation: A valid (cf. :py:func:check_permutation) permutation.
:return: The inverse permutation tuple
:rtype: tuple |
def permutation_to_disjoint_cycles(permutation):
if not check_permutation(permutation):
raise BadPermutationError('Malformed permutation %r' % permutation)
p_index = 0
current_cycle = [0]
# keep track of all remaining/unvisited indices
permutation_nums = list(range(1,len(permutation)))
cycles = []
while True:
# find next image point in cycle
p_index = permutation[p_index]
# if back at start of cycle
if p_index == current_cycle[0]:
# store cycle
cycles.append(current_cycle)
try:
# retrieve the next lowest un-used image point
p_index = permutation_nums.pop(0)
current_cycle = [p_index]
except IndexError:
break
else:
permutation_nums.remove(p_index)
current_cycle.append(p_index)
return cycles | Any permutation sigma can be represented as a product of cycles.
A cycle (c_1, .. c_n) is a closed sequence of indices such that
sigma(c_1) == c_2, sigma(c_2) == sigma^2(c_1)== c_3, ..., sigma(c_(n-1)) == c_n, sigma(c_n) == c_1
Any single length-n cycle admits n equivalent representations in
correspondence with which element one defines as c_1.
(0,1,2) == (1,2,0) == (2,0,1)
A decomposition into *disjoint* cycles can be made unique, by requiring
that the cycles are sorted by their smallest element, which is also the
left-most element of each cycle. Note that permutations generated by
disjoint cycles commute. E.g.,
(1, 0, 3, 2) == ((1,0),(3,2)) --> ((0,1),(2,3)) normal form
:param permutation: A valid permutation image tuple
:type permutation: tuple
:return: A list of disjoint cycles, that when comb
:rtype: list
:raise: BadPermutationError |
def permutation_from_disjoint_cycles(cycles, offset=0):
perm_length = sum(map(len, cycles))
res_perm = list(range(perm_length))
for c in cycles:
p1 = c[0] - offset
for p2 in c[1:]:
p2 = p2 - offset
res_perm[p1] = p2
p1 = p2
res_perm[p1] = c[0] - offset #close cycle
assert sorted(res_perm) == list(range(perm_length))
return tuple(res_perm) | Reconstruct a permutation image tuple from a list of disjoint cycles
:param cycles: sequence of disjoint cycles
:type cycles: list or tuple
:param offset: Offset to subtract from the resulting permutation image points
:type offset: int
:return: permutation image tuple
:rtype: tuple |
def permutation_from_block_permutations(permutations):
offset = 0
new_perm = []
for p in permutations:
new_perm[offset: offset +len(p)] = [p_i + offset for p_i in p]
offset += len(p)
return tuple(new_perm) | Reverse operation to :py:func:`permutation_to_block_permutations`
Compute the concatenation of permutations
``(1,2,0) [+] (0,2,1) --> (1,2,0,3,5,4)``
:param permutations: A list of permutation tuples
``[t = (t_0,...,t_n1), u = (u_0,...,u_n2),..., z = (z_0,...,z_nm)]``
:type permutations: list of tuples
:return: permutation image tuple
``s = t [+] u [+] ... [+] z``
:rtype: tuple |
def permute(sequence, permutation):
if len(sequence) != len(permutation):
raise ValueError((sequence, permutation))
if not check_permutation(permutation):
raise BadPermutationError(str(permutation))
if type(sequence) in (list, tuple, str):
constructor = type(sequence)
else:
constructor = list
return constructor((sequence[p] for p in permutation)) | Apply a permutation sigma({j}) to an arbitrary sequence.
:param sequence: Any finite length sequence ``[l_1,l_2,...l_n]``. If it is a list, tuple or str, the return type will be the same.
:param permutation: permutation image tuple
:type permutation: tuple
:return: The permuted sequence ``[l_sigma(1), l_sigma(2), ..., l_sigma(n)]``
:raise: BadPermutationError or ValueError |
def full_block_perm(block_permutation, block_structure):
fblockp = []
bp_inv = invert_permutation(block_permutation)
for k, block_length in enumerate(block_structure):
p_k = block_permutation[k]
offset = sum([block_structure[bp_inv[j]] for j in range(p_k)])
fblockp += range(offset, offset + block_length)
assert sorted(fblockp) == list(range(sum(block_structure)))
return tuple(fblockp) | Extend a permutation of blocks to a permutation for the internal signals of all blocks.
E.g., say we have two blocks of sizes ('block structure') ``(2, 3)``,
then a block permutation that switches the blocks would be given by the image tuple ``(1,0)``.
However, to get a permutation of all 2+3 = 5 channels that realizes that block permutation we would need
``(2, 3, 4, 0, 1)``
:param block_permutation: permutation image tuple of block indices
:type block_permutation: tuple
:param block_structure: The block channel dimensions, block structure
:type block_structure: tuple
:return: A single permutation for all channels of all blocks.
:rtype: tuple |
def block_perm_and_perms_within_blocks(permutation, block_structure):
nblocks = len(block_structure)
offsets = [sum(block_structure[:k]) for k in range(nblocks)]
images = [permutation[offset: offset + length] for (offset, length) in zip(offsets, block_structure)]
images_mins = list(map(min, images))
key_block_perm_inv = lambda block_index: images_mins[block_index]
block_perm_inv = tuple(sorted(range(nblocks), key = key_block_perm_inv))
# print(images_mins)
# print(permutation, block_structure, "-->", block_perm, invert_permutation(block_perm))
block_perm = invert_permutation(block_perm_inv)
assert images_mins[block_perm_inv[0]] == min(images_mins)
assert images_mins[block_perm_inv[-1]] == max(images_mins)
# block_perm = tuple(invert_permutation(block_perm_inv))
perms_within_blocks = []
for (offset, length, image) in zip(offsets, block_structure, images):
block_key = lambda elt_index: image[elt_index]
within_inv = sorted(range(length), key = block_key)
within = invert_permutation(tuple(within_inv))
assert permutation[within_inv[0] + offset] == min(image)
assert permutation[within_inv[-1] + offset] == max(image)
perms_within_blocks.append(within)
return block_perm, perms_within_blocks | Decompose a permutation into a block permutation and into permutations
acting within each block.
:param permutation: The overall permutation to be factored.
:type permutation: tuple
:param block_structure: The channel dimensions of the blocks
:type block_structure: tuple
:return: ``(block_permutation, permutations_within_blocks)``
Where ``block_permutations`` is an image tuple for a permutation of the block indices
and ``permutations_within_blocks`` is a list of image tuples for the permutations of the channels
within each block
:rtype: tuple |
def reverse(self, viewname, args=None, kwargs=None):
# TODO: django-fluent-pages needs a public API to get the current page.
current_page = getattr(self.request, '_current_fluent_page', None)
return blog_reverse(viewname, args=args, kwargs=kwargs, current_page=current_page) | Reverse a blog page, taking different configuration options into account.
For example, the blog can be mounted using *django-fluent-pages* on multiple nodes. |
def _check_kets(*ops, same_space=False, disjunct_space=False):
if not all([(isinstance(o, State) and o.isket) for o in ops]):
raise TypeError("All operands must be Kets")
if same_space:
if not len({o.space for o in ops if o is not ZeroKet}) == 1:
raise UnequalSpaces(str(ops))
if disjunct_space:
spc = TrivialSpace
for o in ops:
if o.space & spc > TrivialSpace:
raise OverlappingSpaces(str(ops))
spc *= o.space | Check that all operands are Kets from the same Hilbert space. |
def args(self):
if self.space.has_basis or isinstance(self.label, SymbolicLabelBase):
return (self.label, )
else:
return (self.index, ) | Tuple containing `label_or_index` as its only element. |
def next(self, n=1):
if isinstance(self.label, SymbolicLabelBase):
next_label = self.space.next_basis_label_or_index(
self.label, n)
return BasisKet(next_label, hs=self.space)
else:
try:
next_index = self.space.next_basis_label_or_index(
self.index, n)
return BasisKet(next_index, hs=self.space)
except IndexError:
return ZeroKet | Move up by `n` steps in the Hilbert space::
>>> hs = LocalSpace('tls', basis=('g', 'e'))
>>> ascii(BasisKet('g', hs=hs).next())
'|e>^(tls)'
>>> ascii(BasisKet(0, hs=hs).next())
'|e>^(tls)'
We can also go multiple steps:
>>> hs = LocalSpace('ten', dimension=10)
>>> ascii(BasisKet(0, hs=hs).next(2))
'|2>^(ten)'
An increment that leads out of the Hilbert space returns zero::
>>> BasisKet(0, hs=hs).next(10)
ZeroKet |
def to_fock_representation(self, index_symbol='n', max_terms=None):
phase_factor = sympy.exp(
sympy.Rational(-1, 2) * self.ampl * self.ampl.conjugate())
if not isinstance(index_symbol, IdxSym):
index_symbol = IdxSym(index_symbol)
n = index_symbol
if max_terms is None:
index_range = IndexOverFockSpace(n, hs=self._hs)
else:
index_range = IndexOverRange(n, 0, max_terms-1)
term = (
(self.ampl**n / sympy.sqrt(sympy.factorial(n))) *
BasisKet(FockIndex(n), hs=self._hs))
return phase_factor * KetIndexedSum(term, index_range) | Return the coherent state written out as an indexed sum over Fock
basis states |
def codemirror_script(self, inputid):
varname = "{}_codemirror".format(inputid)
html = self.get_codemirror_field_js()
opts = self.codemirror_config()
return html.format(varname=varname, inputid=inputid,
settings=json.dumps(opts, sort_keys=True)) | Build CodeMirror HTML script tag which contains CodeMirror init.
Arguments:
inputid (string): Input id.
Returns:
string: HTML for field CodeMirror instance. |
def render(self, name, value, attrs=None, renderer=None):
if not hasattr(self, "editor_manifest"):
self.editor_manifest = self.init_manifest(self.config_name)
config = self.editor_manifest.get_config(self.config_name)
if config.get('embed_config'):
self.embed_config = True
context = self.get_context(name, value, attrs)
return self._render(self.template_name, context, renderer) | Returns this Widget rendered as HTML, as a Unicode string. |
def media(self):
if not hasattr(self, "editor_manifest"):
self.editor_manifest = self.init_manifest(self.config_name)
return forms.Media(
css={"all": self.editor_manifest.css()},
js=self.editor_manifest.js()
) | Adds necessary files (Js/CSS) to the widget's medias.
Returns:
django.forms.Media: Media object with all assets from registered
config. |
def _get_common_block_structure(lhs_bs, rhs_bs):
# for convenience the arguments may also be Circuit objects
if isinstance(lhs_bs, Circuit):
lhs_bs = lhs_bs.block_structure
if isinstance(rhs_bs, Circuit):
rhs_bs = rhs_bs.block_structure
if sum(lhs_bs) != sum(rhs_bs):
raise IncompatibleBlockStructures(
'Blockstructures have different total channel numbers.')
if len(lhs_bs) == len(rhs_bs) == 0:
return ()
i = j = 1
lsum = 0
while True:
lsum = sum(lhs_bs[:i])
rsum = sum(rhs_bs[:j])
if lsum < rsum:
i += 1
elif rsum < lsum:
j += 1
else:
break
return (lsum, ) + _get_common_block_structure(lhs_bs[i:], rhs_bs[j:]) | For two block structures ``aa = (a1, a2, ..., an)``, ``bb = (b1, b2,
..., bm)`` generate the maximal common block structure so that every block
from aa and bb is contained in exactly one block of the resulting
structure. This is useful for determining how to apply the distributive
law when feeding two concatenated Circuit objects into each other.
Examples:
``(1, 1, 1), (2, 1) -> (2, 1)``
``(1, 1, 2, 1), (2, 1, 2) -> (2, 3)``
Args:
lhs_bs (tuple): first block structure
rhs_bs (tuple): second block structure |
def _tensor_decompose_series(lhs, rhs):
if isinstance(rhs, CPermutation):
raise CannotSimplify()
lhs_structure = lhs.block_structure
rhs_structure = rhs.block_structure
res_struct = _get_common_block_structure(lhs_structure, rhs_structure)
if len(res_struct) > 1:
blocks, oblocks = (
lhs.get_blocks(res_struct),
rhs.get_blocks(res_struct))
parallel_series = [SeriesProduct.create(lb, rb)
for (lb, rb) in zip(blocks, oblocks)]
return Concatenation.create(*parallel_series)
raise CannotSimplify() | Simplification method for lhs << rhs
Decompose a series product of two reducible circuits with compatible block
structures into a concatenation of individual series products between
subblocks. This method raises CannotSimplify when rhs is a CPermutation in
order not to conflict with other _rules. |
def _factor_permutation_for_blocks(cperm, rhs):
rbs = rhs.block_structure
if rhs == cid(rhs.cdim):
return cperm
if len(rbs) > 1:
residual_lhs, transformed_rhs, carried_through_lhs \
= cperm._factorize_for_rhs(rhs)
if residual_lhs == cperm:
raise CannotSimplify()
return SeriesProduct.create(residual_lhs, transformed_rhs,
carried_through_lhs)
raise CannotSimplify() | Simplification method for cperm << rhs.
Decompose a series product of a channel permutation and a reducible circuit
with appropriate block structure by decomposing the permutation into a
permutation within each block of rhs and a block permutation and a residual
part. This allows for achieving something close to a normal form for
circuit expression. |
def _pull_out_perm_lhs(lhs, rest, out_port, in_port):
out_inv, lhs_red = lhs._factor_lhs(out_port)
return lhs_red << Feedback.create(SeriesProduct.create(*rest),
out_port=out_inv, in_port=in_port) | Pull out a permutation from the Feedback of a SeriesProduct with itself.
Args:
lhs (CPermutation): The permutation circuit
rest (tuple): The other SeriesProduct operands
out_port (int): The feedback output port index
in_port (int): The feedback input port index
Returns:
Circuit: The simplified circuit |
def _pull_out_unaffected_blocks_lhs(lhs, rest, out_port, in_port):
_, block_index = lhs.index_in_block(out_port)
bs = lhs.block_structure
nbefore, nblock, nafter = (sum(bs[:block_index]),
bs[block_index],
sum(bs[block_index + 1:]))
before, block, after = lhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_lhs = before + cid(nblock - 1) + after
inner_lhs = cid(nbefore) + block + cid(nafter)
return outer_lhs << Feedback.create(
SeriesProduct.create(inner_lhs, *rest),
out_port=out_port, in_port=in_port)
elif block == cid(nblock):
outer_lhs = before + cid(nblock - 1) + after
return outer_lhs << Feedback.create(
SeriesProduct.create(*rest),
out_port=out_port, in_port=in_port)
raise CannotSimplify() | In a self-Feedback of a series product, where the left-most operand is
reducible, pull all non-trivial blocks outside of the feedback.
Args:
lhs (Circuit): The reducible circuit
rest (tuple): The other SeriesProduct operands
out_port (int): The feedback output port index
in_port (int): The feedback input port index
Returns:
Circuit: The simplified circuit |
def _pull_out_perm_rhs(rest, rhs, out_port, in_port):
in_im, rhs_red = rhs._factor_rhs(in_port)
return (Feedback.create(
SeriesProduct.create(*rest),
out_port=out_port, in_port=in_im) << rhs_red) | Similar to :func:`_pull_out_perm_lhs` but on the RHS of a series
product self-feedback. |
def _pull_out_unaffected_blocks_rhs(rest, rhs, out_port, in_port):
_, block_index = rhs.index_in_block(in_port)
rest = tuple(rest)
bs = rhs.block_structure
(nbefore, nblock, nafter) = (sum(bs[:block_index]),
bs[block_index],
sum(bs[block_index + 1:]))
before, block, after = rhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_rhs = before + cid(nblock - 1) + after
inner_rhs = cid(nbefore) + block + cid(nafter)
return Feedback.create(SeriesProduct.create(*(rest + (inner_rhs,))),
out_port=out_port, in_port=in_port) << outer_rhs
elif block == cid(nblock):
outer_rhs = before + cid(nblock - 1) + after
return Feedback.create(SeriesProduct.create(*rest),
out_port=out_port, in_port=in_port) << outer_rhs
raise CannotSimplify() | Similar to :func:`_pull_out_unaffected_blocks_lhs` but on the RHS of a
series product self-feedback. |
def _series_feedback(series, out_port, in_port):
series_s = series.series_inverse().series_inverse()
if series_s == series:
raise CannotSimplify()
return series_s.feedback(out_port=out_port, in_port=in_port) | Invert a series self-feedback twice to get rid of unnecessary
permutations. |
def properties_for_args(cls, arg_names='_arg_names'):
from qnet.algebra.core.scalar_algebra import Scalar
scalar_args = False
if hasattr(cls, '_scalar_args'):
scalar_args = cls._scalar_args
for arg_name in getattr(cls, arg_names):
def get_arg(self, name):
val = getattr(self, "_%s" % name)
if scalar_args:
assert isinstance(val, Scalar)
return val
prop = property(partial(get_arg, name=arg_name))
doc = "The `%s` argument" % arg_name
if scalar_args:
doc += ", as a :class:`.Scalar` instance."
else:
doc += "."
prop.__doc__ = doc
setattr(cls, arg_name, prop)
cls._has_properties_for_args = True
return cls | For a class with an attribute `arg_names` containing a list of names,
add a property for every name in that list.
It is assumed that there is an instance attribute ``self._<arg_name>``,
which is returned by the `arg_name` property. The decorator also adds a
class attribute :attr:`_has_properties_for_args` that may be used to ensure
that a class is decorated. |
def get_category(self, slug):
try:
return get_category_for_slug(slug)
except ObjectDoesNotExist as e:
raise Http404(str(e)) | Get the category object |
def validate_unique_slug(self, cleaned_data):
date_kwargs = {}
error_msg = _("The slug is not unique")
# The /year/month/slug/ URL determines when a slug can be unique.
pubdate = cleaned_data['publication_date'] or now()
if '{year}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE:
date_kwargs['year'] = pubdate.year
error_msg = _("The slug is not unique within it's publication year.")
if '{month}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE:
date_kwargs['month'] = pubdate.month
error_msg = _("The slug is not unique within it's publication month.")
if '{day}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE:
date_kwargs['day'] = pubdate.day
error_msg = _("The slug is not unique within it's publication day.")
date_range = get_date_range(**date_kwargs)
# Base filters are configurable for translation support.
dup_filters = self.get_unique_slug_filters(cleaned_data)
if date_range:
dup_filters['publication_date__range'] = date_range
dup_qs = EntryModel.objects.filter(**dup_filters)
if self.instance and self.instance.pk:
dup_qs = dup_qs.exclude(pk=self.instance.pk)
# Test whether the slug is unique in the current month
# Note: doesn't take changes to FLUENT_BLOGS_ENTRY_LINK_STYLE into account.
if dup_qs.exists():
raise ValidationError(error_msg) | Test whether the slug is unique within a given time period. |
def substitute(expr, var_map):
try:
if isinstance(expr, SympyBasic):
sympy_var_map = {
k: v for (k, v) in var_map.items()
if isinstance(k, SympyBasic)}
return expr.subs(sympy_var_map)
else:
return expr.substitute(var_map)
except AttributeError:
if expr in var_map:
return var_map[expr]
return expr | Substitute symbols or (sub-)expressions with the given replacements and
re-evalute the result
Args:
expr: The expression in which to perform the substitution
var_map (dict): The substitution dictionary. |
def _apply_rules_no_recurse(expr, rules):
try:
# `rules` is an OrderedDict key => (pattern, replacement)
items = rules.items()
except AttributeError:
# `rules` is a list of (pattern, replacement) tuples
items = enumerate(rules)
for key, (pat, replacement) in items:
matched = pat.match(expr)
if matched:
try:
return replacement(**matched)
except CannotSimplify:
pass
return expr | Non-recursively match expr again all rules |
def _rules_attr(cls):
from qnet.algebra.core.algebraic_properties import (
match_replace, match_replace_binary)
if match_replace in cls.simplifications:
return '_rules'
elif match_replace_binary in cls.simplifications:
return '_binary_rules'
else:
raise TypeError(
"class %s does not have match_replace or "
"match_replace_binary in its simplifications" % cls.__name__) | Return the name of the attribute with rules for :meth:`create` |
def add_rule(cls, name, pattern, replacement, attr=None):
from qnet.utils.check_rules import check_rules_dict
if attr is None:
attr = cls._rules_attr()
if name in getattr(cls, attr):
raise ValueError(
"Duplicate key '%s': rule already exists" % name)
getattr(cls, attr).update(check_rules_dict(
[(name, (pattern, replacement))])) | Add an algebraic rule for :meth:`create` to the class
Args:
name (str): Name of the rule. This is used for debug logging to
allow an analysis of which rules where applied when creating an
expression. The `name` can be arbitrary, but it must be unique.
Built-in rules have names ``'Rxxx'`` where ``x`` is a digit
pattern (.Pattern): A pattern constructed by :func:`.pattern_head`
to match a :class:`.ProtoExpr`
replacement (callable): callable that takes the wildcard names
defined in `pattern` as keyword arguments and returns an
evaluated expression.
attr (None or str): Name of the class attribute to which to add the
rule. If None, one of ``'_rules'``, ``'_binary_rules'`` is
automatically chosen
Raises:
TypeError: if `name` is not a :class:`str` or `pattern` is not a
:class:`.Pattern` instance
ValueError: if `pattern` is not set up to match a
:class:`.ProtoExpr`; if there there is already a rule with the
same `name`; if `replacement` is not a callable or does not
take all the wildcard names in `pattern` as arguments
AttributeError: If invalid `attr`
Note:
The "automatic" rules added by this method are applied *before*
expressions are instantiated (against a corresponding
:class:`.ProtoExpr`). In contrast,
:meth:`apply_rules`/:meth:`apply_rule` are applied to fully
instantiated objects.
The :func:`.temporary_rules` context manager may be used to create
a context in which rules may be defined locally. |
def show_rules(cls, *names, attr=None):
from qnet.printing import srepr
try:
if attr is None:
attr = cls._rules_attr()
rules = getattr(cls, attr)
except TypeError:
rules = {}
for (name, rule) in rules.items():
if len(names) > 0 and name not in names:
continue
pat, repl = rule
print(name)
print(" PATTERN:")
print(textwrap.indent(
textwrap.dedent(srepr(pat, indented=True)),
prefix=" "*8))
print(" REPLACEMENT:")
print(textwrap.indent(
textwrap.dedent(inspect.getsource(repl).rstrip()),
prefix=" "*8)) | Print algebraic rules used by :class:`create`
Print a summary of the algebraic rules with the given names, or all
rules if not names a given.
Args:
names (str): Names of rules to show
attr (None or str): Name of the class attribute from which to get
the rules. Cf. :meth:`add_rule`.
Raises:
AttributeError: If invalid `attr` |
def del_rules(cls, *names, attr=None):
if attr is None:
attr = cls._rules_attr()
if len(names) == 0:
getattr(cls, attr) # raise AttributeError if wrong attr
setattr(cls, attr, OrderedDict())
else:
for name in names:
del getattr(cls, attr)[name] | Delete algebraic rules used by :meth:`create`
Remove the rules with the given `names`, or all rules if no names are
given
Args:
names (str): Names of rules to delete
attr (None or str): Name of the class attribute from which to
delete the rules. Cf. :meth:`add_rule`.
Raises:
KeyError: If any rules in `names` does not exist
AttributeError: If invalid `attr` |
def rules(cls, attr=None):
try:
if attr is None:
attr = cls._rules_attr()
return getattr(cls, attr).keys()
except TypeError:
return () | Iterable of rule names used by :meth:`create`
Args:
attr (None or str): Name of the class attribute to which to get the
names. If None, one of ``'_rules'``, ``'_binary_rules'`` is
automatically chosen |
def kwargs(self):
# Subclasses must override this property if and only if they define
# keyword-only arguments in their __init__ method
if hasattr(self, '_has_kwargs') and self._has_kwargs:
raise NotImplementedError(
"Class %s does not provide a kwargs property"
% str(self.__class__.__name__))
return {} | The dictionary of keyword-only arguments for the instantiation of
the Expression |
def substitute(self, var_map):
if self in var_map:
return var_map[self]
return self._substitute(var_map) | Substitute sub-expressions
Args:
var_map (dict): Dictionary with entries of the form
``{expr: substitution}`` |
def _substitute(self, var_map, safe=False):
if self in var_map:
if not safe or (type(var_map[self]) == type(self)):
return var_map[self]
if isinstance(self.__class__, Singleton):
return self
new_args = [substitute(arg, var_map) for arg in self.args]
new_kwargs = {key: substitute(val, var_map)
for (key, val) in self.kwargs.items()}
if safe:
return self.__class__(*new_args, **new_kwargs)
else:
return self.create(*new_args, **new_kwargs) | Implementation of :meth:`substitute`.
For internal use, the `safe` keyword argument allows to perform a
substitution on the `args` and `kwargs` of the expression only,
guaranteeing that the type of the expression does not change, at the
cost of possibly not returning a maximally simplified expression. The
`safe` keyword is not handled recursively, i.e. any `args`/`kwargs`
will be fully simplified, possibly changing their types. |
def apply_rules(self, rules, recursive=True):
if recursive:
new_args = [_apply_rules(arg, rules) for arg in self.args]
new_kwargs = {
key: _apply_rules(val, rules)
for (key, val) in self.kwargs.items()}
else:
new_args = self.args
new_kwargs = self.kwargs
simplified = self.create(*new_args, **new_kwargs)
return _apply_rules_no_recurse(simplified, rules) | Rebuild the expression while applying a list of rules
The rules are applied against the instantiated expression, and any
sub-expressions if `recursive` is True. Rule application is best though
of as a pattern-based substitution. This is different from the
*automatic* rules that :meth:`create` uses (see :meth:`add_rule`),
which are applied *before* expressions are instantiated.
Args:
rules (list or ~collections.OrderedDict): List of rules or
dictionary mapping names to rules, where each rule is a tuple
(:class:`Pattern`, replacement callable), cf.
:meth:`apply_rule`
recursive (bool): If true (default), apply rules to all arguments
and keyword arguments of the expression. Otherwise, only the
expression itself will be re-instantiated.
If `rules` is a dictionary, the keys (rules names) are used only for
debug logging, to allow an analysis of which rules lead to the final
form of an expression. |
def apply_rule(self, pattern, replacement, recursive=True):
return self.apply_rules([(pattern, replacement)], recursive=recursive) | Apply a single rules to the expression
This is equivalent to :meth:`apply_rules` with
``rules=[(pattern, replacement)]``
Args:
pattern (.Pattern): A pattern containing one or more wildcards
replacement (callable): A callable that takes the wildcard names in
`pattern` as keyword arguments, and returns a replacement for
any expression that `pattern` matches.
Example:
Consider the following Heisenberg Hamiltonian::
>>> tls = SpinSpace(label='s', spin='1/2')
>>> i, j, n = symbols('i, j, n', cls=IdxSym)
>>> J = symbols('J', cls=sympy.IndexedBase)
>>> def Sig(i):
... return OperatorSymbol(
... StrLabel(sympy.Indexed('sigma', i)), hs=tls)
>>> H = - Sum(i, tls)(Sum(j, tls)(
... J[i, j] * Sig(i) * Sig(j)))
>>> unicode(H)
'- (β_{i,j β ββ} J_ij ΟΜ_i^(s) ΟΜ_j^(s))'
We can transform this into a classical Hamiltonian by replacing the
operators with scalars::
>>> H_classical = H.apply_rule(
... pattern(OperatorSymbol, wc('label', head=StrLabel)),
... lambda label: label.expr * IdentityOperator)
>>> unicode(H_classical)
'- (β_{i,j β ββ} J_ij Ο_i Ο_j)' |
def free_symbols(self):
if self._free_symbols is None:
res = set.union(
set([]), # dummy arg (union fails without arguments)
*[_free_symbols(val) for val in self.kwargs.values()])
res.update(
set([]), # dummy arg (update fails without arguments)
*[_free_symbols(arg) for arg in self.args])
self._free_symbols = res
return self._free_symbols | Set of free SymPy symbols contained within the expression. |
def bound_symbols(self):
if self._bound_symbols is None:
res = set.union(
set([]), # dummy arg (union fails without arguments)
*[_bound_symbols(val) for val in self.kwargs.values()])
res.update(
set([]), # dummy arg (update fails without arguments)
*[_bound_symbols(arg) for arg in self.args])
self._bound_symbols = res
return self._bound_symbols | Set of bound SymPy symbols in the expression |
def all_symbols(self):
if self._all_symbols is None:
self._all_symbols = self.free_symbols | self.bound_symbols
return self._all_symbols | Combination of :attr:`free_symbols` and :attr:`bound_symbols` |
def download(url, dest):
u = urllib.FancyURLopener()
logger.info("Downloading %s..." % url)
u.retrieve(url, dest)
logger.info('Done, see %s' % dest)
return dest | Platform-agnostic downloader. |
def requirements_check():
required_programs = [
('samtools',
'http://samtools.sourceforge.net/'),
('bedtools',
'http://bedtools.readthedocs.org/en/latest/'),
('bigWigToBedGraph',
'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/'),
('bedGraphToBigWig',
'http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/'),
]
for req, url in required_programs:
try:
p = subprocess.Popen(
[req], stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
except OSError:
raise ValueError("Please install %s (%s)" % (req, url)) | Ensure we have programs needed to download/manipulate the data |
def _up_to_date(md5, fn):
if os.path.exists(fn):
if hashlib.md5(open(fn).read()).hexdigest() == md5:
logger.info('md5sum match for %s' % fn)
return True
else:
logger.info('wrong md5sum for %s' % fn)
os.unlink(fn) | Make sure md5sum(fn) == md5, and if not, delete `fn`. |
def logged_command(cmds):
"helper function to log a command and then run it"
logger.info(' '.join(cmds))
os.system(' '.join(cmds)f logged_command(cmds):
"helper function to log a command and then run it"
logger.info(' '.join(cmds))
os.system(' '.join(cmds)) | helper function to log a command and then run it |
def get_cufflinks():
"Download cufflinks GTF files"
for size, md5, url in cufflinks:
cuff_gtf = os.path.join(args.data_dir, os.path.basename(url))
if not _up_to_date(md5, cuff_gtf):
download(url, cuff_gtff get_cufflinks():
"Download cufflinks GTF files"
for size, md5, url in cufflinks:
cuff_gtf = os.path.join(args.data_dir, os.path.basename(url))
if not _up_to_date(md5, cuff_gtf):
download(url, cuff_gtf) | Download cufflinks GTF files |
def get_bams():
for size, md5, url in bams:
bam = os.path.join(
args.data_dir,
os.path.basename(url).replace('.bam', '_%s.bam' % CHROM))
if not _up_to_date(md5, bam):
logger.info(
'Downloading reads on chromosome %s from %s to %s'
% (CHROM, url, bam))
cmds = ['samtools', 'view', '-b', url, COORD, '>', bam]
logged_command(cmds)
bai = bam + '.bai'
if not os.path.exists(bai):
logger.info('indexing %s' % bam)
logger.info(' '.join(cmds))
cmds = [
'samtools',
'index',
bam]
logged_command(cmds)
if os.path.exists(os.path.basename(url) + '.bai'):
os.unlink(os.path.basename(url) + '.bai')
for size, md5, fn in bais:
if not _up_to_date(md5, fn):
cmds = [
'samtools', 'index', bai.replace('.bai', '')]
logged_command(cmds) | Download BAM files if needed, extract only chr17 reads, and regenerate .bai |
def get_gtf():
size, md5, url = GTF
full_gtf = os.path.join(args.data_dir, os.path.basename(url))
subset_gtf = os.path.join(
args.data_dir,
os.path.basename(url).replace('.gtf.gz', '_%s.gtf' % CHROM))
if not _up_to_date(md5, subset_gtf):
download(url, full_gtf)
cmds = [
'zcat', '<',
full_gtf,
'|', 'awk -F "\\t" \'{if ($1 == "%s") print $0}\''
% CHROM.replace('chr', ''),
'|', 'awk \'{print "chr"$0}\'', '>', subset_gtf]
logged_command(cmds) | Download GTF file from Ensembl, only keeping the chr17 entries. |
def make_db():
size, md5, fn = DB
if not _up_to_date(md5, fn):
gffutils.create_db(fn.replace('.db', ''), fn, verbose=True, force=True) | Create gffutils database |
def cufflinks_conversion():
for size, md5, fn in cufflinks_tables:
fn = os.path.join(args.data_dir, fn)
table = fn.replace('.gtf.gz', '.table')
if not _up_to_date(md5, table):
logger.info("Converting Cufflinks GTF %s to table" % fn)
fout = open(table, 'w')
fout.write('id\tscore\tfpkm\n')
x = pybedtools.BedTool(fn)
seen = set()
for i in x:
accession = i['transcript_id'].split('.')[0]
if accession not in seen:
seen.update([accession])
fout.write(
'\t'.join([accession, i.score, i['FPKM']]) + '\n')
fout.close() | convert Cufflinks output GTF files into tables of score and FPKM. |
def plot(self, feature):
if isinstance(feature, gffutils.Feature):
feature = asinterval(feature)
self.make_fig()
axes = []
for ax, method in self.panels():
feature = method(ax, feature)
axes.append(ax)
return axes | Spawns a new figure showing data for `feature`.
:param feature: A `pybedtools.Interval` object
Using the pybedtools.Interval `feature`, creates figure specified in
:meth:`BaseMiniBrowser.make_fig` and plots data on panels according to
`self.panels()`. |
def make_fig(self):
self.fig = plt.figure(figsize=(8, 4))
self._all_figures.append(self.fig) | Figure constructor, called before `self.plot()` |
def example_panel(self, ax, feature):
txt = '%s:%s-%s' % (feature.chrom, feature.start, feature.stop)
ax.text(0.5, 0.5, txt, transform=ax.transAxes)
return feature | A example panel that just prints the text of the feature. |
def signal_panel(self, ax, feature):
for gs, kwargs in zip(self.genomic_signal_objs, self.plotting_kwargs):
x, y = gs.local_coverage(feature, **self.local_coverage_kwargs)
ax.plot(x, y, **kwargs)
ax.axis('tight')
return feature | Plots each genomic signal as a line using the corresponding
plotting_kwargs |
def panels(self):
ax1 = self.fig.add_subplot(211)
ax2 = self.fig.add_subplot(212, sharex=ax1)
return (ax2, self.gene_panel), (ax1, self.signal_panel) | Add 2 panels to the figure, top for signal and bottom for gene models |
def gene_panel(self, ax, feature):
from gffutils.contrib.plotting import Gene
extent = [feature.start, feature.stop]
nearby_genes = self.db.region(
(feature.chrom, feature.start, feature.stop), featuretype='gene')
ybase = 0
ngenes = 0
for nearby_gene in nearby_genes:
ngenes += 1
extent.extend([nearby_gene.start, nearby_gene.stop])
gene_collection = Gene(
self.db,
nearby_gene,
transcripts=['mRNA'],
cds=['CDS'],
utrs=['exon'],
ybase=ybase,
color="0.5", picker=5)
gene_collection.name = nearby_gene.id
gene_collection.add_to_ax(ax)
ybase += gene_collection.max_y
xmin = min(extent)
xmax = max(extent)
ymax = ngenes
# 1% padding seems to work well
padding = (xmax - xmin) * 0.01
ax.axis('tight')
# add lines indicating extent of current feature
vline_kwargs = dict(color='k', linestyle='--')
ax.axvline(feature.start, **vline_kwargs)
ax.axvline(feature.stop, **vline_kwargs)
# Make a new feature to represent the region plus surrounding genes
interval = pybedtools.create_interval_from_list(feature.fields)
interval.start = xmin - padding
interval.stop = xmax + padding
interval.strand = '.'
return interval | Plots gene models on an Axes.
Queries the database
:param ax: matplotlib.Axes object
:param feature: pybedtools.Interval |
def simple():
MAX_VALUE = 100
# Create our test progress bar
bar = Bar(max_value=MAX_VALUE, fallback=True)
bar.cursor.clear_lines(2)
# Before beginning to draw our bars, we save the position
# of our cursor so we can restore back to this position before writing
# the next time.
bar.cursor.save()
for i in range(MAX_VALUE + 1):
sleep(0.1 * random.random())
# We restore the cursor to saved position before writing
bar.cursor.restore()
# Now we draw the bar
bar.draw(value=i) | Simple example using just the Bar class
This example is intended to show usage of the Bar class at the lowest
level. |
def ci_plot(x, arr, conf=0.95, ax=None, line_kwargs=None, fill_kwargs=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
line_kwargs = line_kwargs or {}
fill_kwargs = fill_kwargs or {}
m, lo, hi = ci(arr, conf)
ax.plot(x, m, **line_kwargs)
ax.fill_between(x, lo, hi, **fill_kwargs)
return ax | Plots the mean and 95% ci for the given array on the given axes
Parameters
----------
x : 1-D array-like
x values for the plot
arr : 2-D array-like
The array to calculate mean and std for
conf : float [.5 - 1]
Confidence interval to use
ax : matplotlib.Axes
The axes object on which to plot
line_kwargs : dict
Additional kwargs passed to Axes.plot
fill_kwargs : dict
Additiona kwargs passed to Axes.fill_between |
def add_labels_to_subsets(ax, subset_by, subset_order, text_kwargs=None,
add_hlines=True, hline_kwargs=None):
_text_kwargs = dict(transform=ax.get_yaxis_transform())
if text_kwargs:
_text_kwargs.update(text_kwargs)
_hline_kwargs = dict(color='k')
if hline_kwargs:
_hline_kwargs.update(hline_kwargs)
pos = 0
for label in subset_order:
ind = subset_by == label
last_pos = pos
pos += sum(ind)
if add_hlines:
ax.axhline(pos, **_hline_kwargs)
ax.text(
1.1,
last_pos + (pos - last_pos)/2.0,
label,
**_text_kwargs) | Helper function for adding labels to subsets within a heatmap.
Assumes that imshow() was called with `subsets` and `subset_order`.
Parameters
----------
ax : matplotlib.Axes
The axes to label. Generally you can use `fig.array_axes` attribute of
the Figure object returned by `metaseq.plotutils.imshow`.
subset_by, subset_order : array, list
See `metaseq.plotutils.imshow()` docstring; these should be the same
`subsets` and `subset_order` that were provided to that function. |
def ci(arr, conf=0.95):
m = arr.mean(axis=0)
n = len(arr)
se = arr.std(axis=0) / np.sqrt(n)
h = se * stats.t._ppf((1 + conf) / 2., n - 1)
return m, m - h, m + h | Column-wise confidence interval.
Parameters
----------
arr : array-like
conf : float
Confidence interval
Returns
-------
m : array
column-wise mean
lower : array
lower column-wise confidence bound
upper : array
upper column-wise confidence bound |
def nice_log(x):
neg = x < 0
xi = np.log2(np.abs(x) + 1)
xi[neg] = -xi[neg]
return xi | Uses a log scale but with negative numbers.
:param x: NumPy array |
def tip_zscores(a):
weighted = a * a.mean(axis=0)
scores = weighted.sum(axis=1)
zscores = (scores - scores.mean()) / scores.std()
return zscores | Calculates the "target identification from profiles" (TIP) zscores
from Cheng et al. 2001, Bioinformatics 27(23):3221-3227.
:param a: NumPy array, where each row is the signal for a feature. |
def tip_fdr(a, alpha=0.05):
zscores = tip_zscores(a)
pvals = stats.norm.pdf(zscores)
rejected, fdrs = fdrcorrection(pvals)
return fdrs | Returns adjusted TIP p-values for a particular `alpha`.
(see :func:`tip_zscores` for more info)
:param a: NumPy array, where each row is the signal for a feature
:param alpha: False discovery rate |
def prepare_logged(x, y):
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi | Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays |
def matrix_and_line_shell(figsize=(5, 12), strip=False):
fig = plt.figure(figsize=figsize)
# Constants to keep track
if strip:
STRIP_COLS = 1
else:
STRIP_COLS = 0
ROWS = 4
COLS = 8 + STRIP_COLS
MAT_COLS = 7
MAT_ROWS = 3
LINE_ROWS = ROWS - MAT_ROWS
mat_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, STRIP_COLS),
rowspan=MAT_ROWS,
colspan=MAT_COLS,
)
line_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(MAT_ROWS, STRIP_COLS),
rowspan=LINE_ROWS,
colspan=MAT_COLS,
sharex=mat_ax)
if strip:
strip_ax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(0, 0),
rowspan=MAT_ROWS,
colspan=STRIP_COLS,
sharey=mat_ax,
)
else:
strip_ax = None
cax = plt.subplot2grid(
shape=(ROWS, COLS),
loc=(ROWS - MAT_ROWS, MAT_COLS + STRIP_COLS),
rowspan=1,
colspan=1,
)
fig.subplots_adjust(hspace=0.1, wspace=0.2, right=0.88, left=0.23)
return fig, mat_ax, line_ax, strip_ax, cax | Helper function to construct an empty figure that has space for a matrix,
a summary line plot directly below it, a colorbar axis, and an optional
"strip" axis that parallels the matrix (and shares its y-axis) where data
can be added to create callbacks.
Returns a tuple of (fig, matrix_ax, line_ax, strip_ax, colorbar_ax) that
can then be used to plot upon.
:param figsize: Tuple of (width, height), in inches, for the figure to be
:param strip: If `strip` is False, then the returned `strip_ax` will be
None and no strip axes will be created. |
def _updatecopy(orig, update_with, keys=None, override=False):
d = orig.copy()
if keys is None:
keys = update_with.keys()
for k in keys:
if k in update_with:
if k in d and not override:
continue
d[k] = update_with[k]
return d | Update a copy of dest with source. If `keys` is a list, then only update
with those keys. |
def add_legends(self, xhists=True, yhists=False, scatter=True, **kwargs):
axs = []
if xhists:
axs.extend(self.hxs)
if yhists:
axs.extend(self.hys)
if scatter:
axs.extend(self.ax)
for ax in axs:
ax.legend(**kwargs) | Add legends to axes. |
def genomic_signal(fn, kind):
try:
klass = _registry[kind.lower()]
except KeyError:
raise ValueError(
'No support for %s format, choices are %s'
% (kind, _registry.keys()))
m = klass(fn)
m.kind = kind
return m | Factory function that makes the right class for the file format.
Typically you'll only need this function to create a new genomic signal
object.
:param fn: Filename
:param kind:
String. Format of the file; see
metaseq.genomic_signal._registry.keys() |
def array(self, features, processes=None, chunksize=1, ragged=False,
**kwargs):
if processes is not None:
arrays = _array_parallel(
self.fn, self.__class__, features, processes=processes,
chunksize=chunksize, **kwargs)
else:
arrays = _array(self.fn, self.__class__, features, **kwargs)
if not ragged:
stacked_arrays = np.row_stack(arrays)
del arrays
return stacked_arrays
else:
return arrays | Creates an MxN NumPy array of genomic signal for the region defined by
each feature in `features`, where M=len(features) and N=(bins or
feature length)
Parameters
----------
features : iterable of interval-like objects
An iterable of interval-like objects; see docstring for
`local_coverage` method for more details.
processes : int or None
If not None, then create the array in parallel, giving each process
chunks of length `chunksize` to work on.
chunksize : int
`features` will be split into `chunksize` pieces, and each piece
will be given to a different process. The optimum value is
dependent on the size of the features and the underlying data set,
but `chunksize=100` is a good place to start.
ragged : bool
If False (default), then return a 2-D NumPy array. This requires
all rows to have the same number of columns, which you get when
supplying `bins` or if all features are of uniform length. If
True, then return a list of 1-D NumPy arrays
Notes
-----
Additional keyword args are passed to local_coverage() which performs
the work for each feature; see that method for more details. |
def genome(self):
# This gets the underlying pysam Samfile object
f = self.adapter.fileobj
d = {}
for ref, length in zip(f.references, f.lengths):
d[ref] = (0, length)
return d | "genome" dictionary ready for pybedtools, based on the BAM header. |
def mapped_read_count(self, force=False):
# Already run?
if self._readcount and not force:
return self._readcount
if os.path.exists(self.fn + '.mmr') and not force:
for line in open(self.fn + '.mmr'):
if line.startswith('#'):
continue
self._readcount = float(line.strip())
return self._readcount
cmds = ['samtools',
'view',
'-c',
'-F', '0x4',
self.fn]
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr:
sys.stderr.write('samtools says: %s' % stderr)
return None
mapped_reads = int(stdout)
# write to file so the next time you need the lib size you can access
# it quickly
if not os.path.exists(self.fn + '.mmr'):
fout = open(self.fn + '.mmr', 'w')
fout.write(str(mapped_reads) + '\n')
fout.close()
self._readcount = mapped_reads
return self._readcount | Counts total reads in a BAM file.
If a file self.bam + '.scale' exists, then just read the first line of
that file that doesn't start with a "#". If such a file doesn't exist,
then it will be created with the number of reads as the first and only
line in the file.
The result is also stored in self._readcount so that the time-consuming
part only runs once; use force=True to force re-count.
Parameters
----------
force : bool
If True, then force a re-count; otherwise use cached data if
available. |
def print_row_perc_table(table, row_labels, col_labels):
r1c1, r1c2, r2c1, r2c2 = map(float, table)
row1 = r1c1 + r1c2
row2 = r2c1 + r2c2
blocks = [
(r1c1, row1),
(r1c2, row1),
(r2c1, row2),
(r2c2, row2)]
new_table = []
for cell, row in blocks:
try:
x = cell / row
except ZeroDivisionError:
x = 0
new_table.append(x)
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(True)
del s[5]
return ''.join(s) | given a table, print the percentages rather than the totals |
def print_col_perc_table(table, row_labels, col_labels):
r1c1, r1c2, r2c1, r2c2 = map(float, table)
col1 = r1c1 + r2c1
col2 = r1c2 + r2c2
blocks = [
(r1c1, col1),
(r1c2, col2),
(r2c1, col1),
(r2c2, col2)]
new_table = []
for cell, row in blocks:
try:
x = cell / row
except ZeroDivisionError:
x = 0
new_table.append(x)
s = print_2x2_table(new_table, row_labels, col_labels, fmt="%.2f")
s = s.splitlines(False)
last_space = s[0].rindex(" ")
new_s = [i[:last_space] for i in s]
return '\n'.join(new_s) | given a table, print the cols as percentages |
def table_maker(subset, ind1, ind2, row_labels, col_labels, title):
table = [
sum(subset & ind1 & ind2),
sum(subset & ind1 & ~ind2),
sum(subset & ~ind1 & ind2),
sum(subset & ~ind1 & ~ind2)
]
print
print title
print '-' * len(title)
print print_2x2_table(table, row_labels=row_labels, col_labels=col_labels)
print print_row_perc_table(
table, row_labels=row_labels, col_labels=col_labels)
print print_col_perc_table(
table, row_labels=row_labels, col_labels=col_labels)
print fisher.pvalue(*table) | `subset` provides a subsetted boolean of items to consider. If no subset,
you can use all with `np.ones_like(ind1) == 1`
`ind1` is used to subset rows, e.g., log2fc > 0. This is used for rows, so
row_label might be ['upregulated', 'others']
`ind2` is used to subset cols. For example, col_labels would be
['bound', 'unbound'] |
def draw(self, tree, bar_desc=None, save_cursor=True, flush=True):
if save_cursor:
self.cursor.save()
tree = deepcopy(tree)
# TODO: Automatically collapse hierarchy so something
# will always be displayable (well, unless the top-level)
# contains too many to display
lines_required = self.lines_required(tree)
ensure(lines_required <= self.cursor.term.height,
LengthOverflowError,
"Terminal is not long ({} rows) enough to fit all bars "
"({} rows).".format(self.cursor.term.height, lines_required))
bar_desc = BarDescriptor(type=Bar) if not bar_desc else bar_desc
self._calculate_values(tree, bar_desc)
self._draw(tree)
if flush:
self.cursor.flush() | Draw ``tree`` to the terminal
:type tree: dict
:param tree: ``tree`` should be a tree representing a hierarchy; each
key should be a string describing that hierarchy level and value
should also be ``dict`` except for leaves which should be
``BarDescriptors``. See ``BarDescriptor`` for a tree example.
:type bar_desc: BarDescriptor|NoneType
:param bar_desc: For describing non-leaf bars in that will be
drawn from ``tree``; certain attributes such as ``value``
and ``kwargs["max_value"]`` will of course be overridden
if provided.
:type flush: bool
:param flush: If this is set, output written will be flushed
:type save_cursor: bool
:param save_cursor: If this is set, cursor location will be saved before
drawing; this will OVERWRITE a previous save, so be sure to set
this accordingly (to your needs). |
def make_room(self, tree):
lines_req = self.lines_required(tree)
self.cursor.clear_lines(lines_req) | Clear lines in terminal below current cursor position as required
This is important to do before drawing to ensure sufficient
room at the bottom of your terminal.
:type tree: dict
:param tree: tree as described in ``BarDescriptor`` |
def lines_required(self, tree, count=0):
if all([
isinstance(tree, dict),
type(tree) != BarDescriptor
]):
return sum(self.lines_required(v, count=count)
for v in tree.values()) + 2
elif isinstance(tree, BarDescriptor):
if tree.get("kwargs", {}).get("title_pos") in ["left", "right"]:
return 1
else:
return 2 | Calculate number of lines required to draw ``tree`` |
def _calculate_values(self, tree, bar_d):
if all([
isinstance(tree, dict),
type(tree) != BarDescriptor
]):
# Calculate value and max_value
max_val = 0
value = 0
for k in tree:
# Get descriptor by recursing
bar_desc = self._calculate_values(tree[k], bar_d)
# Reassign to tuple of (new descriptor, tree below)
tree[k] = (bar_desc, tree[k])
value += bar_desc["value"].value
max_val += bar_desc.get("kwargs", {}).get("max_value", 100)
# Merge in values from ``bar_d`` before returning descriptor
kwargs = merge_dicts(
[bar_d.get("kwargs", {}),
dict(max_value=max_val)],
deepcopy=True
)
ret_d = merge_dicts(
[bar_d,
dict(value=Value(floor(value)), kwargs=kwargs)],
deepcopy=True
)
return BarDescriptor(ret_d)
elif isinstance(tree, BarDescriptor):
return tree
else:
raise TypeError("Unexpected type {}".format(type(tree))) | Calculate values for drawing bars of non-leafs in ``tree``
Recurses through ``tree``, replaces ``dict``s with
``(BarDescriptor, dict)`` so ``ProgressTree._draw`` can use
the ``BarDescriptor``s to draw the tree |
def _draw(self, tree, indent=0):
if all([
isinstance(tree, dict),
type(tree) != BarDescriptor
]):
for k, v in sorted(tree.items()):
bar_desc, subdict = v[0], v[1]
args = [self.cursor.term] + bar_desc.get("args", [])
kwargs = dict(title_pos="above", indent=indent, title=k)
kwargs.update(bar_desc.get("kwargs", {}))
b = Bar(*args, **kwargs)
b.draw(value=bar_desc["value"].value, flush=False)
self._draw(subdict, indent=indent + self.indent) | Recurse through ``tree`` and draw all nodes |
def merge_dicts(dicts, deepcopy=False):
assert isinstance(dicts, list) and all(isinstance(d, dict) for d in dicts)
return dict(chain(*[copy.deepcopy(d).items() if deepcopy else d.items()
for d in dicts])) | Merges dicts
In case of key conflicts, the value kept will be from the latter
dictionary in the list of dictionaries
:param dicts: [dict, ...]
:param deepcopy: deepcopy items within dicts |
def load_features_and_arrays(prefix, mmap_mode='r'):
features = pybedtools.BedTool(prefix + '.features')
arrays = np.load(prefix + '.npz', mmap_mode=mmap_mode)
return features, arrays | Returns the features and NumPy arrays that were saved with
save_features_and_arrays.
Parameters
----------
prefix : str
Path to where data are saved
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}
Mode in which to memory-map the file. See np.load for details. |
def save_features_and_arrays(features, arrays, prefix, compressed=False,
link_features=False, overwrite=False):
if link_features:
if isinstance(features, pybedtools.BedTool):
assert isinstance(features.fn, basestring)
features_filename = features.fn
else:
assert isinstance(features, basestring)
features_filename = features
if overwrite:
force_flag = '-f'
else:
force_flag = ''
cmds = [
'ln', '-s', force_flag, os.path.abspath(features_filename), prefix + '.features']
os.system(' '.join(cmds))
else:
pybedtools.BedTool(features).saveas(prefix + '.features')
if compressed:
np.savez_compressed(
prefix,
**arrays)
else:
np.savez(prefix, **arrays) | Saves NumPy arrays of processed data, along with the features that
correspond to each row, to files for later use.
Two files will be saved, both starting with `prefix`:
prefix.features : a file of features. If GFF features were provided,
this will be in GFF format, if BED features were provided it will be in
BED format, and so on.
prefix.npz : A NumPy .npz file.
Parameters
----------
arrays : dict of NumPy arrays
Rows in each array should correspond to `features`. This dictionary is
passed to np.savez
features : iterable of Feature-like objects
This is usually the same features that were used to create the array in
the first place.
link_features : bool
If True, then assume that `features` is either a pybedtools.BedTool
pointing to a file, or a filename. In this case, instead of making
a copy, a symlink will be created to the original features. This helps
save disk space.
prefix : str
Path to where data will be saved.
compressed : bool
If True, saves arrays using np.savez_compressed rather than np.savez.
This will save disk space, but will be slower when accessing the data
later. |
def list_all(fritz, args):
devices = fritz.get_devices()
for device in devices:
print('#' * 30)
print('name=%s' % device.name)
print(' ain=%s' % device.ain)
print(' id=%s' % device.identifier)
print(' productname=%s' % device.productname)
print(' manufacturer=%s' % device.manufacturer)
print(" present=%s" % device.present)
print(" lock=%s" % device.lock)
print(" devicelock=%s" % device.device_lock)
if device.present is False:
continue
if device.has_switch:
print(" Switch:")
print(" switch_state=%s" % device.switch_state)
if device.has_switch:
print(" Powermeter:")
print(" power=%s" % device.power)
print(" energy=%s" % device.energy)
print(" voltage=%s" % device.voltage)
if device.has_temperature_sensor:
print(" Temperature:")
print(" temperature=%s" % device.temperature)
print(" offset=%s" % device.offset)
if device.has_thermostat:
print(" Thermostat:")
print(" battery_low=%s" % device.battery_low)
print(" battery_level=%s" % device.battery_level)
print(" actual=%s" % device.actual_temperature)
print(" target=%s" % device.target_temperature)
print(" comfort=%s" % device.comfort_temperature)
print(" eco=%s" % device.eco_temperature)
print(" window=%s" % device.window_open)
print(" summer=%s" % device.summer_active)
print(" holiday=%s" % device.holiday_active)
if device.has_alarm:
print(" Alert:")
print(" alert=%s" % device.alert_state) | Command that prints all device information. |
def device_statistics(fritz, args):
stats = fritz.get_device_statistics(args.ain)
print(stats) | Command that prints the device statistics. |
def chunker(f, n):
f = iter(f)
x = []
while 1:
if len(x) < n:
try:
x.append(f.next())
except StopIteration:
if len(x) > 0:
yield tuple(x)
break
else:
yield tuple(x)
x = [] | Utility function to split iterable `f` into `n` chunks |
def example_filename(fn):
fn = os.path.join(data_dir(), fn)
if not os.path.exists(fn):
raise ValueError("%s does not exist" % fn)
return fn | Return a bed file from the pybedtools examples directory. Use
:func:`list_example_files` to see a list of files that are included. |
def split_feature(f, n):
if not isinstance(n, int):
raise ValueError('n must be an integer')
orig_feature = copy(f)
step = (f.stop - f.start) / n
for i in range(f.start, f.stop, step):
f = copy(orig_feature)
start = i
stop = min(i + step, orig_feature.stop)
f.start = start
f.stop = stop
yield f
if stop == orig_feature.stop:
break | Split an interval into `n` roughly equal portions |
def tointerval(s):
if isinstance(s, basestring):
m = coord_re.search(s)
if m.group('strand'):
return pybedtools.create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
'.',
'0',
m.group('strand')])
else:
return pybedtools.create_interval_from_list([
m.group('chrom'),
m.group('start'),
m.group('stop'),
])
return s | If string, then convert to an interval; otherwise just return the input |
def max_width(self):
value, unit = float(self._width_str[:-1]), self._width_str[-1]
ensure(unit in ["c", "%"], ValueError,
"Width unit must be either 'c' or '%'")
if unit == "c":
ensure(value <= self.columns, ValueError,
"Terminal only has {} columns, cannot draw "
"bar of size {}.".format(self.columns, value))
retval = value
else: # unit == "%"
ensure(0 < value <= 100, ValueError,
"value=={} does not satisfy 0 < value <= 100".format(value))
dec = value / 100
retval = dec * self.columns
return floor(retval) | Get maximum width of progress bar
:rtype: int
:returns: Maximum column width of progress bar |
def full_line_width(self):
bar_str_len = sum([
self._indent,
((len(self.title) + 1) if self._title_pos in ["left", "right"]
else 0), # Title if present
len(self.start_char),
self.max_width, # Progress bar
len(self.end_char),
1, # Space between end_char and amount_complete_str
len(str(self.max_value)) * 2 + 1 # 100/100
])
return bar_str_len | Find actual length of bar_str
e.g., Progress [ | ] 10/10 |
def _supports_colors(term, raise_err, colors):
for color in colors:
try:
if isinstance(color, str):
req_colors = 16 if "bright" in color else 8
ensure(term.number_of_colors >= req_colors,
ColorUnsupportedError,
"{} is unsupported by your terminal.".format(color))
elif isinstance(color, int):
ensure(term.number_of_colors >= color,
ColorUnsupportedError,
"{} is unsupported by your terminal.".format(color))
except ColorUnsupportedError as e:
if raise_err:
raise e
else:
return False
else:
return True | Check if ``term`` supports ``colors``
:raises ColorUnsupportedError: This is raised if ``raise_err``
is ``False`` and a color in ``colors`` is unsupported by ``term``
:type raise_err: bool
:param raise_err: Set to ``False`` to return a ``bool`` indicating
color support rather than raising ColorUnsupportedError
:type colors: [str, ...] |
def _get_format_callable(term, color, back_color):
if isinstance(color, str):
ensure(
any(isinstance(back_color, t) for t in [str, type(None)]),
TypeError,
"back_color must be a str or NoneType"
)
if back_color:
return getattr(term, "_".join(
[color, "on", back_color]
))
elif back_color is None:
return getattr(term, color)
elif isinstance(color, int):
return term.on_color(color)
else:
raise TypeError("Invalid type {} for color".format(
type(color)
)) | Get string-coloring callable
Get callable for string output using ``color`` on ``back_color``
on ``term``
:param term: blessings.Terminal instance
:param color: Color that callable will color the string it's passed
:param back_color: Back color for the string
:returns: callable(s: str) -> str |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.