text_prompt
stringlengths 100
17.7k
β | code_prompt
stringlengths 7
9.86k
β |
---|---|
<SYSTEM_TASK:>
Return a modified that returns the representation of expr, or '...' if
<END_TASK>
<USER_TASK:>
Description:
def _shorten_render(renderer, max_len):
"""Return a modified that returns the representation of expr, or '...' if
that representation is longer than `max_len`""" |
def short_renderer(expr):
res = renderer(expr)
if len(res) > max_len:
return '...'
else:
return res
return short_renderer |
<SYSTEM_TASK:>
Initialize the algebra system
<END_TASK>
<USER_TASK:>
Description:
def init_algebra(*, default_hs_cls='LocalSpace'):
"""Initialize the algebra system
Args:
default_hs_cls (str): The name of the :class:`.LocalSpace` subclass
that should be used when implicitly creating Hilbert spaces, e.g.
in :class:`.OperatorSymbol`
""" |
from qnet.algebra.core.hilbert_space_algebra import LocalSpace
from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression
default_hs_cls = getattr(importlib.import_module('qnet'), default_hs_cls)
if issubclass(default_hs_cls, LocalSpace):
QuantumExpression._default_hs_cls = default_hs_cls
else:
raise TypeError("default_hs_cls must be a subclass of LocalSpace") |
<SYSTEM_TASK:>
Register configuration for an editor instance.
<END_TASK>
<USER_TASK:>
Description:
def register(self, name):
"""
Register configuration for an editor instance.
Arguments:
name (string): Config name from available ones in
``settings.CODEMIRROR_SETTINGS``.
Raises:
UnknowConfigError: If given config name does not exist in
``settings.CODEMIRROR_SETTINGS``.
Returns:
dict: Registred config dict.
""" |
if name not in settings.CODEMIRROR_SETTINGS:
msg = ("Given config name '{}' does not exists in "
"'settings.CODEMIRROR_SETTINGS'.")
raise UnknowConfigError(msg.format(name))
parameters = copy.deepcopy(self.default_internal_config)
parameters.update(copy.deepcopy(
settings.CODEMIRROR_SETTINGS[name]
))
# Add asset bundles name
if 'css_bundle_name' not in parameters:
css_template_name = settings.CODEMIRROR_BUNDLE_CSS_NAME
parameters['css_bundle_name'] = css_template_name.format(
settings_name=name
)
if 'js_bundle_name' not in parameters:
js_template_name = settings.CODEMIRROR_BUNDLE_JS_NAME
parameters['js_bundle_name'] = js_template_name.format(
settings_name=name
)
self.registry[name] = parameters
return parameters |
<SYSTEM_TASK:>
Register many configuration names.
<END_TASK>
<USER_TASK:>
Description:
def register_many(self, *args):
"""
Register many configuration names.
Arguments:
*args: Config names as strings.
Returns:
list: List of registered configs.
""" |
params = []
for name in args:
params.append(self.register(name))
return params |
<SYSTEM_TASK:>
From given mode name, return mode file path from
<END_TASK>
<USER_TASK:>
Description:
def resolve_mode(self, name):
"""
From given mode name, return mode file path from
``settings.CODEMIRROR_MODES`` map.
Arguments:
name (string): Mode name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_MODES``.
Returns:
string: Mode file path.
""" |
if name not in settings.CODEMIRROR_MODES:
msg = ("Given config name '{}' does not exists in "
"'settings.CODEMIRROR_MODES'.")
raise UnknowModeError(msg.format(name))
return settings.CODEMIRROR_MODES.get(name) |
<SYSTEM_TASK:>
From given theme name, return theme file path from
<END_TASK>
<USER_TASK:>
Description:
def resolve_theme(self, name):
"""
From given theme name, return theme file path from
``settings.CODEMIRROR_THEMES`` map.
Arguments:
name (string): Theme name.
Raises:
KeyError: When given name does not exist in
``settings.CODEMIRROR_THEMES``.
Returns:
string: Theme file path.
""" |
if name not in settings.CODEMIRROR_THEMES:
msg = ("Given theme name '{}' does not exists in "
"'settings.CODEMIRROR_THEMES'.")
raise UnknowThemeError(msg.format(name))
return settings.CODEMIRROR_THEMES.get(name) |
<SYSTEM_TASK:>
Returns registred configurations.
<END_TASK>
<USER_TASK:>
Description:
def get_configs(self, name=None):
"""
Returns registred configurations.
* If ``name`` argument is not given, default behavior is to return
every config from all registred config;
* If ``name`` argument is given, just return its config and nothing
else;
Keyword Arguments:
name (string): Specific configuration name to return.
Raises:
NotRegisteredError: If given config name does not exist in
registry.
Returns:
dict: Configurations.
""" |
if name:
if name not in self.registry:
msg = "Given config name '{}' is not registered."
raise NotRegisteredError(msg.format(name))
return {name: self.registry[name]}
return self.registry |
<SYSTEM_TASK:>
Return a registred configuration for given config name.
<END_TASK>
<USER_TASK:>
Description:
def get_config(self, name):
"""
Return a registred configuration for given config name.
Arguments:
name (string): A registred config name.
Raises:
NotRegisteredError: If given config name does not exist in
registry.
Returns:
dict: Configuration.
""" |
if name not in self.registry:
msg = "Given config name '{}' is not registered."
raise NotRegisteredError(msg.format(name))
return copy.deepcopy(self.registry[name]) |
<SYSTEM_TASK:>
Return CodeMirror parameters for given configuration name.
<END_TASK>
<USER_TASK:>
Description:
def get_codemirror_parameters(self, name):
"""
Return CodeMirror parameters for given configuration name.
This is a reduced configuration from internal parameters.
Arguments:
name (string): Config name from available ones in
``settings.CODEMIRROR_SETTINGS``.
Returns:
dict: Parameters.
""" |
config = self.get_config(name)
return {k: config[k] for k in config if k not in self._internal_only} |
<SYSTEM_TASK:>
Commutator of `A` and `B`
<END_TASK>
<USER_TASK:>
Description:
def commutator(A, B=None):
"""Commutator of `A` and `B`
If ``B != None``, return the commutator :math:`[A,B]`, otherwise return
the super-operator :math:`[A,\cdot]`. The super-operator :math:`[A,\cdot]`
maps any other operator ``B`` to the commutator :math:`[A, B] = A B - B A`.
Args:
A: The first operator to form the commutator of.
B: The second operator to form the commutator of, or None.
Returns:
SuperOperator: The linear superoperator :math:`[A,\cdot]`
""" |
if B:
return A * B - B * A
return SPre(A) - SPost(A) |
<SYSTEM_TASK:>
r"""Return the Liouvillian super-operator associated with `H` and `Ls`
<END_TASK>
<USER_TASK:>
Description:
def liouvillian(H, Ls=None):
r"""Return the Liouvillian super-operator associated with `H` and `Ls`
The Liouvillian :math:`\mathcal{L}` generates the Markovian-dynamics of a
system via the Master equation:
.. math::
\dot{\rho} = \mathcal{L}\rho
= -i[H,\rho] + \sum_{j=1}^n \mathcal{D}[L_j] \rho
Args:
H (Operator): The associated Hamilton operator
Ls (sequence or Matrix): A sequence of Lindblad operators.
Returns:
SuperOperator: The Liouvillian super-operator.
""" |
if Ls is None:
Ls = []
elif isinstance(Ls, Matrix):
Ls = Ls.matrix.ravel().tolist()
summands = [-I * commutator(H), ]
summands.extend([lindblad(L) for L in Ls])
return SuperOperatorPlus.create(*summands) |
<SYSTEM_TASK:>
r"""Generate the operator matrix with quadrants
<END_TASK>
<USER_TASK:>
Description:
def block_matrix(A, B, C, D):
r"""Generate the operator matrix with quadrants
.. math::
\begin{pmatrix} A B \\ C D \end{pmatrix}
Args:
A (Matrix): Matrix of shape ``(n, m)``
B (Matrix): Matrix of shape ``(n, k)``
C (Matrix): Matrix of shape ``(l, m)``
D (Matrix): Matrix of shape ``(l, k)``
Returns:
Matrix: The combined block matrix ``[[A, B], [C, D]]``.
""" |
return vstackm((hstackm((A, B)), hstackm((C, D)))) |
<SYSTEM_TASK:>
r"""Return orthogonal permutation matrix for permutation tuple
<END_TASK>
<USER_TASK:>
Description:
def permutation_matrix(permutation):
r"""Return orthogonal permutation matrix for permutation tuple
Return an orthogonal permutation matrix :math:`M_\sigma`
for a permutation :math:`\sigma` defined by the image tuple
:math:`(\sigma(1), \sigma(2),\dots \sigma(n))`,
such that
.. math::
M_\sigma \vec{e}_i = \vec{e}_{\sigma(i)}
where :math:`\vec{e}_k` is the k-th standard basis vector.
This definition ensures a composition law:
.. math::
M_{\sigma \cdot \tau} = M_\sigma M_\tau.
The column form of :math:`M_\sigma` is thus given by
.. math::
M = (
\vec{e}_{\sigma(1)},
\vec{e}_{\sigma(2)},
\dots \vec{e}_{\sigma(n)}).
Args:
permutation (tuple): A permutation image tuple (zero-based indices!)
""" |
assert check_permutation(permutation)
n = len(permutation)
op_matrix = np_zeros((n, n), dtype=int)
for i, j in enumerate(permutation):
op_matrix[j, i] = 1
return Matrix(op_matrix) |
<SYSTEM_TASK:>
Are all elements of the matrix zero?
<END_TASK>
<USER_TASK:>
Description:
def is_zero(self):
"""Are all elements of the matrix zero?""" |
for o in self.matrix.ravel():
try:
if not o.is_zero:
return False
except AttributeError:
if not o == 0:
return False
return True |
<SYSTEM_TASK:>
The element-wise conjugate matrix
<END_TASK>
<USER_TASK:>
Description:
def conjugate(self):
"""The element-wise conjugate matrix
This is defined only if all the entries in the matrix have a defined
conjugate (i.e., they have a `conjugate` method). This is *not* the
case for a matrix of operators. In such a case, only an
:meth:`elementwise` :func:`adjoint` would be applicable, but this is
mathematically different from a complex conjugate.
Raises:
NoConjugateMatrix: if any entries have no `conjugate` method
""" |
try:
return Matrix(np_conjugate(self.matrix))
except AttributeError:
raise NoConjugateMatrix(
"Matrix %s contains entries that have no defined "
"conjugate" % str(self)) |
<SYSTEM_TASK:>
Element-wise real part
<END_TASK>
<USER_TASK:>
Description:
def real(self):
"""Element-wise real part
Raises:
NoConjugateMatrix: if entries have no `conjugate` method and no
other way to determine the real part
Note:
A mathematically equivalent way to obtain a real matrix from a
complex matrix ``M`` is::
(M.conjugate() + M) / 2
However, the result may not be identical to ``M.real``, as the
latter tries to convert elements of the matrix to real values
directly, if possible, and only uses the conjugate as a fall-back
""" |
def re(val):
if hasattr(val, 'real'):
return val.real
elif hasattr(val, 'as_real_imag'):
return val.as_real_imag()[0]
elif hasattr(val, 'conjugate'):
return (val.conjugate() + val) / 2
else:
raise NoConjugateMatrix(
"Matrix entry %s contains has no defined "
"conjugate" % str(val))
# Note: Do NOT use self.matrix.real! This will give wrong results, as
# numpy thinks of objects (Operators) as real, even if they have no
# defined real part
return self.element_wise(re) |
<SYSTEM_TASK:>
Element-wise imaginary part
<END_TASK>
<USER_TASK:>
Description:
def imag(self):
"""Element-wise imaginary part
Raises:
NoConjugateMatrix: if entries have no `conjugate` method and no
other way to determine the imaginary part
Note:
A mathematically equivalent way to obtain an imaginary matrix from
a complex matrix ``M`` is::
(M.conjugate() - M) / (I * 2)
with same same caveats as :attr:`real`.
""" |
def im(val):
if hasattr(val, 'imag'):
return val.imag
elif hasattr(val, 'as_real_imag'):
return val.as_real_imag()[1]
elif hasattr(val, 'conjugate'):
return (val.conjugate() - val) / (2 * I)
else:
raise NoConjugateMatrix(
"Matrix entry %s contains has no defined "
"conjugate" % str(val))
# Note: Do NOT use self.matrix.real! This will give wrong results, as
# numpy thinks of objects (Operators) as real, even if they have no
# defined real part
return self.element_wise(im) |
<SYSTEM_TASK:>
Apply a function to each matrix element and return the result in a
<END_TASK>
<USER_TASK:>
Description:
def element_wise(self, func, *args, **kwargs):
"""Apply a function to each matrix element and return the result in a
new operator matrix of the same shape.
Args:
func (FunctionType): A function to be applied to each element. It
must take the element as its first argument.
args: Additional positional arguments to be passed to `func`
kwargs: Additional keyword arguments to be passed to `func`
Returns:
Matrix: Matrix with results of `func`, applied element-wise.
""" |
s = self.shape
emat = [func(o, *args, **kwargs) for o in self.matrix.ravel()]
return Matrix(np_array(emat).reshape(s)) |
<SYSTEM_TASK:>
Expand the matrix expression as a truncated power series in a scalar
<END_TASK>
<USER_TASK:>
Description:
def series_expand(self, param: Symbol, about, order: int):
"""Expand the matrix expression as a truncated power series in a scalar
parameter.
Args:
param: Expansion parameter.
about (.Scalar): Point about which to expand.
order: Maximum order of expansion >= 0
Returns:
tuple of length (order+1), where the entries are the expansion
coefficients.
""" |
s = self.shape
emats = zip(*[o.series_expand(param, about, order)
for o in self.matrix.ravel()])
return tuple((Matrix(np_array(em).reshape(s)) for em in emats)) |
<SYSTEM_TASK:>
Expand each matrix element distributively.
<END_TASK>
<USER_TASK:>
Description:
def expand(self):
"""Expand each matrix element distributively.
Returns:
Matrix: Expanded matrix.
""" |
return self.element_wise(
lambda o: o.expand() if isinstance(o, QuantumExpression) else o) |
<SYSTEM_TASK:>
Combined Hilbert space of all matrix elements.
<END_TASK>
<USER_TASK:>
Description:
def space(self):
"""Combined Hilbert space of all matrix elements.""" |
arg_spaces = [o.space for o in self.matrix.ravel()
if hasattr(o, 'space')]
if len(arg_spaces) == 0:
return TrivialSpace
else:
return ProductSpace.create(*arg_spaces) |
<SYSTEM_TASK:>
Simplify all scalar expressions appearing in the Matrix.
<END_TASK>
<USER_TASK:>
Description:
def simplify_scalar(self, func=sympy.simplify):
"""Simplify all scalar expressions appearing in the Matrix.""" |
def element_simplify(v):
if isinstance(v, sympy.Basic):
return func(v)
elif isinstance(v, QuantumExpression):
return v.simplify_scalar(func=func)
else:
return v
return self.element_wise(element_simplify) |
<SYSTEM_TASK:>
Try to find a demo source for given mode if any, if finded use it to
<END_TASK>
<USER_TASK:>
Description:
def get_initial(self):
"""
Try to find a demo source for given mode if any, if finded use it to
fill the demo textarea.
""" |
initial = {}
if self.kwargs.get('mode', None):
filename = "{}.txt".format(self.kwargs['mode'])
filepath = os.path.join(settings.BASE_DIR, 'demo_datas', filename)
if os.path.exists(filepath):
with io.open(filepath, 'r', encoding='utf-8') as fp:
initial['foo'] = fp.read()
return initial |
<SYSTEM_TASK:>
Return the order by syntax for a model.
<END_TASK>
<USER_TASK:>
Description:
def _get_order_by(order, orderby, order_by_fields):
"""
Return the order by syntax for a model.
Checks whether use ascending or descending order, and maps the fieldnames.
""" |
try:
# Find the actual database fieldnames for the keyword.
db_fieldnames = order_by_fields[orderby]
except KeyError:
raise ValueError("Invalid value for 'orderby': '{0}', supported values are: {1}".format(orderby, ', '.join(sorted(order_by_fields.keys()))))
# Default to descending for some fields, otherwise be ascending
is_desc = (not order and orderby in ORDER_BY_DESC) \
or (order or 'asc').lower() in ('desc', 'descending')
if is_desc:
return map(lambda name: '-' + name, db_fieldnames)
else:
return db_fieldnames |
<SYSTEM_TASK:>
Query the entries using a set of predefined filters.
<END_TASK>
<USER_TASK:>
Description:
def query_entries(
queryset=None,
year=None, month=None, day=None,
category=None, category_slug=None,
tag=None, tag_slug=None,
author=None, author_slug=None,
future=False,
order=None,
orderby=None,
limit=None,
):
"""
Query the entries using a set of predefined filters.
This interface is mainly used by the ``get_entries`` template tag.
""" |
if queryset is None:
queryset = get_entry_model().objects.all()
if appsettings.FLUENT_BLOGS_FILTER_SITE_ID:
queryset = queryset.parent_site(settings.SITE_ID)
if not future:
queryset = queryset.published()
if year:
queryset = queryset.filter(publication_date__year=year)
if month:
queryset = queryset.filter(publication_date__month=month)
if day:
queryset = queryset.filter(publication_date__day=day)
# The main category/tag/author filters
if category:
if isinstance(category, basestring):
queryset = queryset.categories(category)
elif isinstance(category, (int, long)):
queryset = queryset.filter(categories=category)
else:
raise ValueError("Expected slug or ID for the 'category' parameter")
if category_slug:
queryset = queryset.categories(category)
if tag:
if isinstance(tag, basestring):
queryset = queryset.tagged(tag)
elif isinstance(tag, (int, long)):
queryset = queryset.filter(tags=tag)
else:
raise ValueError("Expected slug or ID for 'tag' parameter.")
if tag_slug:
queryset = queryset.tagged(tag)
if author:
if isinstance(author, basestring):
queryset = queryset.authors(author)
elif isinstance(author, (int, long)):
queryset = queryset.filter(author=author)
else:
raise ValueError("Expected slug or ID for 'author' parameter.")
if author_slug:
queryset = queryset.authors(author_slug)
# Ordering
if orderby:
queryset = queryset.order_by(*_get_order_by(order, orderby, ENTRY_ORDER_BY_FIELDS))
else:
queryset = queryset.order_by('-publication_date')
# Limit
if limit:
queryset = queryset[:limit]
return queryset |
<SYSTEM_TASK:>
Query the tags, with usage count included.
<END_TASK>
<USER_TASK:>
Description:
def query_tags(order=None, orderby=None, limit=None):
"""
Query the tags, with usage count included.
This interface is mainly used by the ``get_tags`` template tag.
""" |
from taggit.models import Tag, TaggedItem # feature is still optional
# Get queryset filters for published entries
EntryModel = get_entry_model()
ct = ContentType.objects.get_for_model(EntryModel) # take advantage of local caching.
entry_filter = {
'status': EntryModel.PUBLISHED
}
if appsettings.FLUENT_BLOGS_FILTER_SITE_ID:
entry_filter['parent_site'] = settings.SITE_ID
entry_qs = EntryModel.objects.filter(**entry_filter).values_list('pk')
# get tags
queryset = Tag.objects.filter(
taggit_taggeditem_items__content_type=ct,
taggit_taggeditem_items__object_id__in=entry_qs
).annotate(
count=Count('taggit_taggeditem_items')
)
# Ordering
if orderby:
queryset = queryset.order_by(*_get_order_by(order, orderby, TAG_ORDER_BY_FIELDS))
else:
queryset = queryset.order_by('-count')
# Limit
if limit:
queryset = queryset[:limit]
return queryset |
<SYSTEM_TASK:>
Find the category for a given slug
<END_TASK>
<USER_TASK:>
Description:
def get_category_for_slug(slug, language_code=None):
"""
Find the category for a given slug
""" |
Category = get_category_model()
if issubclass(Category, TranslatableModel):
return Category.objects.active_translations(language_code, slug=slug).get()
else:
return Category.objects.get(slug=slug) |
<SYSTEM_TASK:>
Return a start..end range to query for a specific month, day or year.
<END_TASK>
<USER_TASK:>
Description:
def get_date_range(year=None, month=None, day=None):
"""
Return a start..end range to query for a specific month, day or year.
""" |
if year is None:
return None
if month is None:
# year only
start = datetime(year, 1, 1, 0, 0, 0, tzinfo=utc)
end = datetime(year, 12, 31, 23, 59, 59, 999, tzinfo=utc)
return (start, end)
if day is None:
# year + month only
start = datetime(year, month, 1, 0, 0, 0, tzinfo=utc)
end = start + timedelta(days=monthrange(year, month)[1], microseconds=-1)
return (start, end)
else:
# Exact day
start = datetime(year, month, day, 0, 0, 0, tzinfo=utc)
end = start + timedelta(days=1, microseconds=-1)
return (start, end) |
<SYSTEM_TASK:>
Flat' constructor for the Pattern class
<END_TASK>
<USER_TASK:>
Description:
def pattern(head, *args, mode=1, wc_name=None, conditions=None, **kwargs) \
-> Pattern:
"""'Flat' constructor for the Pattern class
Positional and keyword arguments are mapped into `args` and `kwargs`,
respectively. Useful for defining rules that match an instantiated
Expression with specific arguments
""" |
if len(args) == 0:
args = None
if len(kwargs) == 0:
kwargs = None
return Pattern(head, args, kwargs, mode=mode, wc_name=wc_name,
conditions=conditions) |
<SYSTEM_TASK:>
Recursively match `expr` with the given `expr_or_pattern`
<END_TASK>
<USER_TASK:>
Description:
def match_pattern(expr_or_pattern: object, expr: object) -> MatchDict:
"""Recursively match `expr` with the given `expr_or_pattern`
Args:
expr_or_pattern: either a direct expression (equal to `expr` for a
successful match), or an instance of :class:`Pattern`.
expr: the expression to be matched
""" |
try: # first try expr_or_pattern as a Pattern
return expr_or_pattern.match(expr)
except AttributeError: # expr_or_pattern is an expr, not a Pattern
if expr_or_pattern == expr:
return MatchDict() # success
else:
res = MatchDict()
res.success = False
res.reason = "Expressions '%s' and '%s' are not the same" % (
repr(expr_or_pattern), repr(expr))
return res |
<SYSTEM_TASK:>
Update dict with entries from `other`
<END_TASK>
<USER_TASK:>
Description:
def update(self, *others):
"""Update dict with entries from `other`
If `other` has an attribute ``success=False`` and ``reason``, those
attributes are copied as well
""" |
for other in others:
for key, val in other.items():
self[key] = val
try:
if not other.success:
self.success = False
self.reason = other.reason
except AttributeError:
pass |
<SYSTEM_TASK:>
Iterator over patterns for positional arguments to be matched
<END_TASK>
<USER_TASK:>
Description:
def extended_arg_patterns(self):
"""Iterator over patterns for positional arguments to be matched
This yields the elements of :attr:`args`, extended by their `mode`
value
""" |
for arg in self._arg_iterator(self.args):
if isinstance(arg, Pattern):
if arg.mode > self.single:
while True:
yield arg
else:
yield arg
else:
yield arg |
<SYSTEM_TASK:>
Return an iterator over all matches in `expr`
<END_TASK>
<USER_TASK:>
Description:
def finditer(self, expr):
"""Return an iterator over all matches in `expr`
Iterate over all :class:`MatchDict` results of matches for any
matching (sub-)expressions in `expr`. The order of the matches conforms
to the equivalent matched expressions returned by :meth:`findall`.
""" |
try:
for arg in expr.args:
for m in self.finditer(arg):
yield m
for arg in expr.kwargs.values():
for m in self.finditer(arg):
yield m
except AttributeError:
pass
m = self.match(expr)
if m:
yield m |
<SYSTEM_TASK:>
Set of all wildcard names occurring in the pattern
<END_TASK>
<USER_TASK:>
Description:
def wc_names(self):
"""Set of all wildcard names occurring in the pattern""" |
if self.wc_name is None:
res = set()
else:
res = set([self.wc_name])
if self.args is not None:
for arg in self.args:
if isinstance(arg, Pattern):
res.update(arg.wc_names)
if self.kwargs is not None:
for val in self.kwargs.values():
if isinstance(val, Pattern):
res.update(val.wc_names)
return res |
<SYSTEM_TASK:>
Instantiate proto-expression from the given Expression
<END_TASK>
<USER_TASK:>
Description:
def from_expr(cls, expr):
"""Instantiate proto-expression from the given Expression""" |
return cls(expr.args, expr.kwargs, cls=expr.__class__) |
<SYSTEM_TASK:>
Return the actual entry model that is in use.
<END_TASK>
<USER_TASK:>
Description:
def get_entry_model():
"""
Return the actual entry model that is in use.
This function reads the :ref:`FLUENT_BLOGS_ENTRY_MODEL` setting to find the model.
The model is automatically registered with *django-fluent-comments*
and *django-any-urlfield* when it's installed.
""" |
global _EntryModel
if _EntryModel is None:
# This method is likely called the first time when the admin initializes, the sitemaps module is imported, or BaseBlogMixin is used.
# Either way, it needs to happen after all apps have initialized, to make sure the model can be imported.
if not appsettings.FLUENT_BLOGS_ENTRY_MODEL:
_EntryModel = Entry
else:
app_label, model_name = appsettings.FLUENT_BLOGS_ENTRY_MODEL.rsplit('.', 1)
_EntryModel = apps.get_model(app_label, model_name)
if _EntryModel is None:
raise ImportError("{app_label}.{model_name} could not be imported.".format(app_label=app_label, model_name=model_name))
# Auto-register with django-fluent-comments moderation
if 'fluent_comments' in settings.INSTALLED_APPS and issubclass(_EntryModel, CommentsEntryMixin):
from fluent_comments.moderation import moderate_model
moderate_model(
_EntryModel,
publication_date_field='publication_date',
enable_comments_field='enable_comments',
)
# Auto-register with django-any-urlfield
if 'any_urlfield' in settings.INSTALLED_APPS:
from any_urlfield.models import AnyUrlField
from any_urlfield.forms.widgets import SimpleRawIdWidget
AnyUrlField.register_model(_EntryModel, widget=SimpleRawIdWidget(_EntryModel))
return _EntryModel |
<SYSTEM_TASK:>
Reverse a URL to the blog, taking various configuration options into account.
<END_TASK>
<USER_TASK:>
Description:
def blog_reverse(viewname, args=None, kwargs=None, current_app='fluent_blogs', **page_kwargs):
"""
Reverse a URL to the blog, taking various configuration options into account.
This is a compatibility function to allow django-fluent-blogs to operate stand-alone.
Either the app can be hooked in the URLconf directly, or it can be added as a pagetype of *django-fluent-pages*.
""" |
return mixed_reverse(viewname, args=args, kwargs=kwargs, current_app=current_app, **page_kwargs) |
<SYSTEM_TASK:>
Recursively expand commutators in `expr` according to the Leibniz rule.
<END_TASK>
<USER_TASK:>
Description:
def expand_commutators_leibniz(expr, expand_expr=True):
"""Recursively expand commutators in `expr` according to the Leibniz rule.
.. math::
[A B, C] = A [B, C] + [A, C] B
.. math::
[A, B C] = [A, B] C + B [A, C]
If `expand_expr` is True, expand products of sums in `expr`, as well as in
the result.
""" |
recurse = partial(expand_commutators_leibniz, expand_expr=expand_expr)
A = wc('A', head=Operator)
C = wc('C', head=Operator)
AB = wc('AB', head=OperatorTimes)
BC = wc('BC', head=OperatorTimes)
def leibniz_right(A, BC):
"""[A, BC] -> [A, B] C + B [A, C]"""
B = BC.operands[0]
C = OperatorTimes.create(*BC.operands[1:])
return Commutator.create(A, B) * C + B * Commutator.create(A, C)
def leibniz_left(AB, C):
"""[AB, C] -> A [B, C] C + [A, C] B"""
A = AB.operands[0]
B = OperatorTimes(*AB.operands[1:])
return A * Commutator.create(B, C) + Commutator.create(A, C) * B
rules = OrderedDict([
('leibniz1', (
pattern(Commutator, A, BC),
lambda A, BC: recurse(leibniz_right(A, BC).expand()))),
('leibniz2', (
pattern(Commutator, AB, C),
lambda AB, C: recurse(leibniz_left(AB, C).expand())))])
if expand_expr:
res = _apply_rules(expr.expand(), rules).expand()
else:
res = _apply_rules(expr, rules)
return res |
<SYSTEM_TASK:>
Initialize the printing system.
<END_TASK>
<USER_TASK:>
Description:
def init_printing(*, reset=False, init_sympy=True, **kwargs):
"""Initialize the printing system.
This determines the behavior of the :func:`ascii`, :func:`unicode`,
and :func:`latex` functions, as well as the ``__str__`` and ``__repr__`` of
any :class:`.Expression`.
The routine may be called in one of two forms. First,
::
init_printing(
str_format=<str_fmt>, repr_format=<repr_fmt>,
caching=<use_caching>, **settings)
provides a simplified, "manual" setup with the following parameters.
Args:
str_format (str): Format for ``__str__`` representation of an
:class:`.Expression`. One of 'ascii', 'unicode', 'latex', 'srepr',
'indsrepr' ("indented `srepr`"), or 'tree'. The string
representation will be affected by the settings for the
corresponding print routine, e.g. :func:`unicode` for
``str_format='unicode'``
repr_format (str): Like `str_format`, but for ``__repr__``. This is
what gets displayed in an interactive (I)Python session.
caching (bool): By default, the printing functions (:func:`ascii`,
:func:`unicode`, :func:`latex`) cache their result for any
expression and sub-expression. This is both for efficiency and to
give the ability to to supply custom strings for subexpression by
passing a `cache` parameter to the printing functions. Initializing
the printing system with ``caching=False`` disables this
possibility.
settings: Any setting understood by any of the printing routines.
Second,
::
init_printing(inifile=<path_to_file>)
allows for more detailed settings through a config file, see the
:ref:`notes on using an INI file <ini_file_printing>`.
If `str_format` or `repr_format` are not given, they will be set to
'unicode' if the current terminal is known to support an UTF8 (accordig to
``sys.stdout.encoding``), and 'ascii' otherwise.
Generally, :func:`init_printing` should be called only once at the
beginning of a script or notebook. If it is called multiple times, any
settings accumulate. To avoid this and to reset the printing system to the
defaults, you may pass ``reset=True``. In a Jupyter notebook, expressions
are rendered graphically via LaTeX, using the settings as they affect the
:func:`latex` printer.
The :func:`sympy.init_printing()` routine is called automatically, unless
`init_sympy` is given as ``False``.
See also:
:func:`configure_printing` allows to temporarily change the printing
system from what was configured in :func:`init_printing`.
""" |
# return either None (default) or a dict of frozen attributes if
# ``_freeze=True`` is given as a keyword argument (internal use in
# `configure_printing` only)
logger = logging.getLogger(__name__)
if reset:
SympyPrinter._global_settings = {}
if init_sympy:
if kwargs.get('repr_format', '') == 'unicode':
sympy_init_printing(use_unicode=True)
if kwargs.get('repr_format', '') == 'ascii':
sympy_init_printing(use_unicode=False)
else:
sympy_init_printing() # let sympy decide by itself
if 'inifile' in kwargs:
invalid_kwargs = False
if '_freeze' in kwargs:
_freeze = kwargs['_freeze']
if len(kwargs) != 2:
invalid_kwargs = True
else:
_freeze = False
if len(kwargs) != 1:
invalid_kwargs = True
if invalid_kwargs:
raise TypeError(
"The `inifile` argument cannot be combined with any "
"other keyword arguments")
logger.debug(
"Initializating printing from INI file %s", kwargs['inifile'])
return _init_printing_from_file(kwargs['inifile'], _freeze=_freeze)
else:
logger.debug(
"Initializating printing with direct settings: %s", repr(kwargs))
return _init_printing(**kwargs) |
<SYSTEM_TASK:>
Context manager for temporarily changing the printing system.
<END_TASK>
<USER_TASK:>
Description:
def configure_printing(**kwargs):
"""Context manager for temporarily changing the printing system.
This takes the same parameters as :func:`init_printing`
Example:
>>> A = OperatorSymbol('A', hs=1); B = OperatorSymbol('B', hs=1)
>>> with configure_printing(show_hs_label=False):
... print(ascii(A + B))
A + B
>>> print(ascii(A + B))
A^(1) + B^(1)
""" |
freeze = init_printing(_freeze=True, **kwargs)
try:
yield
finally:
for obj, attr_map in freeze.items():
for attr, val in attr_map.items():
setattr(obj, attr, val) |
<SYSTEM_TASK:>
Convert a QNET expression to a qutip object
<END_TASK>
<USER_TASK:>
Description:
def convert_to_qutip(expr, full_space=None, mapping=None):
"""Convert a QNET expression to a qutip object
Args:
expr: a QNET expression
full_space (HilbertSpace): The
Hilbert space in which `expr` is defined. If not given,
``expr.space`` is used. The Hilbert space must have a well-defined
basis.
mapping (dict): A mapping of any (sub-)expression to either a
`quip.Qobj` directly, or to a callable that will convert the
expression into a `qutip.Qobj`. Useful for e.g. supplying objects
for symbols
Raises:
ValueError: if `expr` is not in `full_space`, or if `expr` cannot be
converted.
""" |
if full_space is None:
full_space = expr.space
if not expr.space.is_tensor_factor_of(full_space):
raise ValueError(
"expr '%s' must be in full_space %s" % (expr, full_space))
if full_space == TrivialSpace:
raise AlgebraError(
"Cannot convert object in TrivialSpace to qutip. "
"You may pass a non-trivial `full_space`")
if mapping is not None:
if expr in mapping:
ret = mapping[expr]
if isinstance(ret, qutip.Qobj):
return ret
else:
assert callable(ret)
return ret(expr)
if expr is IdentityOperator:
local_spaces = full_space.local_factors
if len(local_spaces) == 0:
raise ValueError("full_space %s does not have local factors"
% full_space)
else:
return qutip.tensor(*[qutip.qeye(s.dimension)
for s in local_spaces])
elif expr is ZeroOperator:
return qutip.tensor(
*[qutip.Qobj(csr_matrix((s.dimension, s.dimension)))
for s in full_space.local_factors]
)
elif isinstance(expr, LocalOperator):
return _convert_local_operator_to_qutip(expr, full_space, mapping)
elif (isinstance(expr, Operator) and isinstance(expr, Operation)):
return _convert_operator_operation_to_qutip(expr, full_space, mapping)
elif isinstance(expr, OperatorTrace):
raise NotImplementedError('Cannot convert OperatorTrace to '
'qutip')
# actually, this is perfectly doable in principle, but requires a bit
# of work
elif isinstance(expr, State):
return _convert_ket_to_qutip(expr, full_space, mapping)
elif isinstance(expr, SuperOperator):
return _convert_superoperator_to_qutip(expr, full_space, mapping)
elif isinstance(expr, Operation):
# This is assumed to be an Operation on states, as we have handled all
# other Operations above. Eventually, a StateOperation should be
# defined as a common superclass for the Operations in the state
# algebra
return _convert_state_operation_to_qutip(expr, full_space, mapping)
elif isinstance(expr, SLH):
# SLH object cannot be converted to a single qutip object, only to a
# nested list of qutip object. That's why a separate routine
# SLH_to_qutip exists
raise ValueError("SLH objects can only be converted using "
"SLH_to_qutip routine")
else:
raise ValueError("Cannot convert '%s' of type %s"
% (str(expr), type(expr))) |
<SYSTEM_TASK:>
Convert a LocalOperator instance to qutip
<END_TASK>
<USER_TASK:>
Description:
def _convert_local_operator_to_qutip(expr, full_space, mapping):
"""Convert a LocalOperator instance to qutip""" |
n = full_space.dimension
if full_space != expr.space:
all_spaces = full_space.local_factors
own_space_index = all_spaces.index(expr.space)
return qutip.tensor(
*([qutip.qeye(s.dimension)
for s in all_spaces[:own_space_index]] +
[convert_to_qutip(expr, expr.space, mapping=mapping), ] +
[qutip.qeye(s.dimension)
for s in all_spaces[own_space_index + 1:]])
)
if isinstance(expr, Create):
return qutip.create(n)
elif isinstance(expr, Jz):
return qutip.jmat((expr.space.dimension-1)/2., "z")
elif isinstance(expr, Jplus):
return qutip.jmat((expr.space.dimension-1)/2., "+")
elif isinstance(expr, Jminus):
return qutip.jmat((expr.space.dimension-1)/2., "-")
elif isinstance(expr, Destroy):
return qutip.destroy(n)
elif isinstance(expr, Phase):
arg = complex(expr.operands[1]) * arange(n)
d = np_cos(arg) + 1j * np_sin(arg)
return qutip.Qobj(np_diag(d))
elif isinstance(expr, Displace):
alpha = expr.operands[1]
return qutip.displace(n, alpha)
elif isinstance(expr, Squeeze):
eta = expr.operands[1]
return qutip.displace(n, eta)
elif isinstance(expr, LocalSigma):
j = expr.j
k = expr.k
if isinstance(j, str):
j = expr.space.basis_labels.index(j)
if isinstance(k, str):
k = expr.space.basis_labels.index(k)
ket = qutip.basis(n, j)
bra = qutip.basis(n, k).dag()
return ket * bra
else:
raise ValueError("Cannot convert '%s' of type %s"
% (str(expr), type(expr))) |
<SYSTEM_TASK:>
Convert a possiblty time-dependent operator into the nested-list
<END_TASK>
<USER_TASK:>
Description:
def _time_dependent_to_qutip(
op, full_space=None, time_symbol=symbols("t", real=True),
convert_as='pyfunc'):
"""Convert a possiblty time-dependent operator into the nested-list
structure required by QuTiP""" |
if full_space is None:
full_space = op.space
if time_symbol in op.free_symbols:
op = op.expand()
if isinstance(op, OperatorPlus):
result = []
for o in op.operands:
if time_symbol not in o.free_symbols:
if len(result) == 0:
result.append(convert_to_qutip(o,
full_space=full_space))
else:
result[0] += convert_to_qutip(o, full_space=full_space)
for o in op.operands:
if time_symbol in o.free_symbols:
result.append(_time_dependent_to_qutip(o, full_space,
time_symbol, convert_as))
return result
elif (
isinstance(op, ScalarTimesOperator) and
isinstance(op.coeff, ScalarValue)):
if convert_as == 'pyfunc':
func_no_args = lambdify(time_symbol, op.coeff.val)
if {time_symbol, } == op.coeff.free_symbols:
def func(t, args):
# args are ignored for increased efficiency, since we
# know there are no free symbols except t
return func_no_args(t)
else:
def func(t, args):
return func_no_args(t).subs(args)
coeff = func
elif convert_as == 'str':
# a bit of a hack to replace imaginary unit
# TODO: we can probably use one of the sympy code generation
# routines, or lambdify with 'numexpr' to implement this in a
# more robust way
coeff = re.sub("I", "(1.0j)", str(op.coeff.val))
else:
raise ValueError(("Invalid value '%s' for `convert_as`, must "
"be one of 'str', 'pyfunc'") % convert_as)
return [convert_to_qutip(op.term, full_space), coeff]
else:
raise ValueError("op cannot be expressed in qutip. It must have "
"the structure op = sum_i f_i(t) * op_i")
else:
return convert_to_qutip(op, full_space=full_space) |
<SYSTEM_TASK:>
Left-justify text to a total of `width`
<END_TASK>
<USER_TASK:>
Description:
def ljust(text, width, fillchar=' '):
"""Left-justify text to a total of `width`
The `width` is based on graphemes::
>>> s = 'AΜ'
>>> s.ljust(2)
'AΜ'
>>> ljust(s, 2)
'AΜ '
""" |
len_text = grapheme_len(text)
return text + fillchar * (width - len_text) |
<SYSTEM_TASK:>
Right-justify text for a total of `width` graphemes
<END_TASK>
<USER_TASK:>
Description:
def rjust(text, width, fillchar=' '):
"""Right-justify text for a total of `width` graphemes
The `width` is based on graphemes::
>>> s = 'AΜ'
>>> s.rjust(2)
'AΜ'
>>> rjust(s, 2)
' AΜ'
""" |
len_text = grapheme_len(text)
return fillchar * (width - len_text) + text |
<SYSTEM_TASK:>
Kronecker delta symbol
<END_TASK>
<USER_TASK:>
Description:
def KroneckerDelta(i, j, simplify=True):
"""Kronecker delta symbol
Return :class:`One` (`i` equals `j`)), :class:`Zero` (`i` and `j` are
non-symbolic an unequal), or a :class:`ScalarValue` wrapping SymPy's
:class:`~sympy.functions.special.tensor_functions.KroneckerDelta`.
>>> i, j = IdxSym('i'), IdxSym('j')
>>> KroneckerDelta(i, i)
One
>>> KroneckerDelta(1, 2)
Zero
>>> KroneckerDelta(i, j)
KroneckerDelta(i, j)
By default, the Kronecker delta is returned in a simplified form, e.g::
>>> KroneckerDelta((i+1)/2, (j+1)/2)
KroneckerDelta(i, j)
This may be suppressed by setting `simplify` to False::
>>> KroneckerDelta((i+1)/2, (j+1)/2, simplify=False)
KroneckerDelta(i/2 + 1/2, j/2 + 1/2)
Raises:
TypeError: if `i` or `j` is not an integer or sympy expression. There
is no automatic sympification of `i` and `j`.
""" |
from qnet.algebra.core.scalar_algebra import ScalarValue, One
if not isinstance(i, (int, sympy.Basic)):
raise TypeError(
"i is not an integer or sympy expression: %s" % type(i))
if not isinstance(j, (int, sympy.Basic)):
raise TypeError(
"j is not an integer or sympy expression: %s" % type(j))
if i == j:
return One
else:
delta = sympy.KroneckerDelta(i, j)
if simplify:
delta = _simplify_delta(delta)
return ScalarValue.create(delta) |
<SYSTEM_TASK:>
Instantiate the product while applying simplification rules
<END_TASK>
<USER_TASK:>
Description:
def create(cls, *operands, **kwargs):
"""Instantiate the product while applying simplification rules""" |
converted_operands = []
for op in operands:
if not isinstance(op, Scalar):
op = ScalarValue.create(op)
converted_operands.append(op)
return super().create(*converted_operands, **kwargs) |
<SYSTEM_TASK:>
Complex conjugate of of the product
<END_TASK>
<USER_TASK:>
Description:
def conjugate(self):
"""Complex conjugate of of the product""" |
return self.__class__.create(
*[arg.conjugate() for arg in reversed(self.args)]) |
<SYSTEM_TASK:>
Instantiate the indexed sum while applying simplification rules
<END_TASK>
<USER_TASK:>
Description:
def create(cls, term, *ranges):
"""Instantiate the indexed sum while applying simplification rules""" |
if not isinstance(term, Scalar):
term = ScalarValue.create(term)
return super().create(term, *ranges) |
<SYSTEM_TASK:>
Complex conjugate of of the indexed sum
<END_TASK>
<USER_TASK:>
Description:
def conjugate(self):
"""Complex conjugate of of the indexed sum""" |
return self.__class__.create(self.term.conjugate(), *self.ranges) |
<SYSTEM_TASK:>
Collect summands that occur multiple times into a single summand
<END_TASK>
<USER_TASK:>
Description:
def collect_summands(cls, ops, kwargs):
"""Collect summands that occur multiple times into a single summand
Also filters out zero-summands.
Example:
>>> A, B, C = (OperatorSymbol(s, hs=0) for s in ('A', 'B', 'C'))
>>> collect_summands(
... OperatorPlus, (A, B, C, ZeroOperator, 2 * A, B, -C) , {})
((3 * A^(0), 2 * B^(0)), {})
>>> collect_summands(OperatorPlus, (A, -A), {})
ZeroOperator
>>> collect_summands(OperatorPlus, (B, A, -B), {})
A^(0)
""" |
from qnet.algebra.core.abstract_quantum_algebra import (
ScalarTimesQuantumExpression)
coeff_map = OrderedDict()
for op in ops:
if isinstance(op, ScalarTimesQuantumExpression):
coeff, term = op.coeff, op.term
else:
coeff, term = 1, op
if term in coeff_map:
coeff_map[term] += coeff
else:
coeff_map[term] = coeff
fops = []
for (term, coeff) in coeff_map.items():
op = coeff * term
if not op.is_zero:
fops.append(op)
if len(fops) == 0:
return cls._zero
elif len(fops) == 1:
return fops[0]
else:
return tuple(fops), kwargs |
<SYSTEM_TASK:>
combine two fully reduced lists a, b
<END_TASK>
<USER_TASK:>
Description:
def _match_replace_binary_combine(cls, a: list, b: list) -> list:
"""combine two fully reduced lists a, b""" |
if len(a) == 0 or len(b) == 0:
return a + b
r = _get_binary_replacement(a[-1], b[0], cls)
if r is None:
return a + b
if r == cls._neutral_element:
return _match_replace_binary_combine(cls, a[:-1], b[1:])
if isinstance(r, cls):
r = list(r.args)
else:
r = [r, ]
return _match_replace_binary_combine(
cls,
_match_replace_binary_combine(cls, a[:-1], r),
b[1:]) |
<SYSTEM_TASK:>
Return ZeroOperator if all the operators in `ops` have a disjunct
<END_TASK>
<USER_TASK:>
Description:
def disjunct_hs_zero(cls, ops, kwargs):
"""Return ZeroOperator if all the operators in `ops` have a disjunct
Hilbert space, or an unchanged `ops`, `kwargs` otherwise
""" |
from qnet.algebra.core.hilbert_space_algebra import TrivialSpace
from qnet.algebra.core.operator_algebra import ZeroOperator
hilbert_spaces = []
for op in ops:
try:
hs = op.space
except AttributeError: # scalars
hs = TrivialSpace
for hs_prev in hilbert_spaces:
if not hs.isdisjoint(hs_prev):
return ops, kwargs
hilbert_spaces.append(hs)
return ZeroOperator |
<SYSTEM_TASK:>
Apply anti-commutative property of the commutator to apply a standard
<END_TASK>
<USER_TASK:>
Description:
def commutator_order(cls, ops, kwargs):
"""Apply anti-commutative property of the commutator to apply a standard
ordering of the commutator arguments
""" |
from qnet.algebra.core.operator_algebra import Commutator
assert len(ops) == 2
if cls.order_key(ops[1]) < cls.order_key(ops[0]):
return -1 * Commutator.create(ops[1], ops[0])
else:
return ops, kwargs |
<SYSTEM_TASK:>
Accept operands that are all bras, and turn that into to bra of the
<END_TASK>
<USER_TASK:>
Description:
def accept_bras(cls, ops, kwargs):
"""Accept operands that are all bras, and turn that into to bra of the
operation applied to all corresponding kets""" |
from qnet.algebra.core.state_algebra import Bra
kets = []
for bra in ops:
if isinstance(bra, Bra):
kets.append(bra.ket)
else:
return ops, kwargs
return Bra.create(cls.create(*kets, **kwargs)) |
<SYSTEM_TASK:>
Sorting key for ranges.
<END_TASK>
<USER_TASK:>
Description:
def _ranges_key(r, delta_indices):
"""Sorting key for ranges.
When used with ``reverse=True``, this can be used to sort index ranges into
the order we would prefer to eliminate them by evaluating KroneckerDeltas:
First, eliminate primed indices, then indices names higher in the alphabet.
""" |
idx = r.index_symbol
if idx in delta_indices:
return (r.index_symbol.primed, r.index_symbol.name)
else:
# ranges that are not in delta_indices should remain in the original
# order
return (0, ' ') |
<SYSTEM_TASK:>
Yield factors from expr, mixing sympy and QNET
<END_TASK>
<USER_TASK:>
Description:
def _factors_for_expand_delta(expr):
"""Yield factors from expr, mixing sympy and QNET
Auxiliary routine for :func:`_expand_delta`.
""" |
from qnet.algebra.core.scalar_algebra import ScalarValue
from qnet.algebra.core.abstract_quantum_algebra import (
ScalarTimesQuantumExpression)
if isinstance(expr, ScalarTimesQuantumExpression):
yield from _factors_for_expand_delta(expr.coeff)
yield expr.term
elif isinstance(expr, ScalarValue):
yield from _factors_for_expand_delta(expr.val)
elif isinstance(expr, sympy.Basic) and expr.is_Mul:
yield from expr.args
else:
yield expr |
<SYSTEM_TASK:>
Split a product into sympy and qnet factors
<END_TASK>
<USER_TASK:>
Description:
def _split_sympy_quantum_factor(expr):
"""Split a product into sympy and qnet factors
This is a helper routine for applying some sympy transformation on an
arbitrary product-like expression in QNET. The idea is this::
expr -> sympy_factor, quantum_factor
sympy_factor -> sympy_function(sympy_factor)
expr -> sympy_factor * quantum_factor
""" |
from qnet.algebra.core.abstract_quantum_algebra import (
QuantumExpression, ScalarTimesQuantumExpression)
from qnet.algebra.core.scalar_algebra import ScalarValue, ScalarTimes, One
if isinstance(expr, ScalarTimesQuantumExpression):
sympy_factor, quantum_factor = _split_sympy_quantum_factor(expr.coeff)
quantum_factor *= expr.term
elif isinstance(expr, ScalarValue):
sympy_factor = expr.val
quantum_factor = expr._one
elif isinstance(expr, ScalarTimes):
sympy_factor = sympy.S(1)
quantum_factor = expr._one
for op in expr.operands:
op_sympy, op_quantum = _split_sympy_quantum_factor(op)
sympy_factor *= op_sympy
quantum_factor *= op_quantum
elif isinstance(expr, sympy.Basic):
sympy_factor = expr
quantum_factor = One
else:
sympy_factor = sympy.S(1)
quantum_factor = expr
assert isinstance(sympy_factor, sympy.Basic)
assert isinstance(quantum_factor, QuantumExpression)
return sympy_factor, quantum_factor |
<SYSTEM_TASK:>
Extract a "simple" Kronecker delta containing `idx` from `expr`.
<END_TASK>
<USER_TASK:>
Description:
def _extract_delta(expr, idx):
"""Extract a "simple" Kronecker delta containing `idx` from `expr`.
Assuming `expr` can be written as the product of a Kronecker Delta and a
`new_expr`, return a tuple of the sympy.KroneckerDelta instance and
`new_expr`. Otherwise, return a tuple of None and the original `expr`
(possibly converted to a :class:`.QuantumExpression`).
On input, `expr` can be a :class:`QuantumExpression` or a
:class:`sympy.Basic` object. On output, `new_expr` is guaranteed to be a
:class:`QuantumExpression`.
""" |
from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression
from qnet.algebra.core.scalar_algebra import ScalarValue
sympy_factor, quantum_factor = _split_sympy_quantum_factor(expr)
delta, new_expr = _sympy_extract_delta(sympy_factor, idx)
if delta is None:
new_expr = expr
else:
new_expr = new_expr * quantum_factor
if isinstance(new_expr, ScalarValue._val_types):
new_expr = ScalarValue.create(new_expr)
assert isinstance(new_expr, QuantumExpression)
return delta, new_expr |
<SYSTEM_TASK:>
Partially execute a summation for `term` with a Kronecker Delta for one
<END_TASK>
<USER_TASK:>
Description:
def _deltasummation(term, ranges, i_range):
"""Partially execute a summation for `term` with a Kronecker Delta for one
of the summation indices.
This implements the solution to the core sub-problem in
:func:`indexed_sum_over_kronecker`
Args:
term (QuantumExpression): term of the sum
ranges (list): list of all summation index ranges
(class:`IndexRangeBase` instances)
i_range (int): list-index of element in `ranges` which should be
eliminated
Returns:
``(result, flag)`` where `result` is a list
of ``(new_term, new_ranges)`` tuples and `flag` is an integer.
There are three possible cases, indicated by the returned `flag`. Consider
the following setup::
>>> i, j, k = symbols('i, j, k', cls=IdxSym)
>>> i_range = IndexOverList(i, (0, 1))
>>> j_range = IndexOverList(j, (0, 1))
>>> ranges = [i_range, j_range]
>>> def A(i, j):
... from sympy import IndexedBase
... return OperatorSymbol(StrLabel(IndexedBase('A')[i, j]), hs=0)
1. If executing the sum produces a single non-zero term, result will be
``[(new_term, new_ranges)]`` where `new_ranges` contains the input `ranges`
without the eliminated range specified by `i_range`. This should be the
most common case for calls to:func:`_deltasummation`::
>>> term = KroneckerDelta(i, j) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [(A(i, i), [i_range])]
>>> assert flag == 1
2. If executing the sum for the index symbol specified via `index_range`
does not reduce the sum, the result will be the list ``[(term, ranges)]``
with unchanged `term` and `ranges`::
>>> term = KroneckerDelta(j, k) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
This case also covers if there is no Kroncker delta in the term::
>>> term = A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 0)
>>> assert result == [(term, [i_range, j_range])]
>>> assert flag == 2
3. If `term` does not contain a Kronecker delta as a factor, but in a
sum that can be expanded, the result will be a list of
``[(summand1, ranges), (summand2, ranges), ...]`` for the summands of that
expansion. In this case, `:func:`_deltasummation` should be called again
for every tuple in the list, with the same `i_range`::
>>> term = (KroneckerDelta(i, j) + 1) * A(i, j)
>>> result, flag = _deltasummation(term, [i_range, j_range], 1)
>>> assert result == [
... (A(i, j), [i_range, j_range]),
... (KroneckerDelta(i,j) * A(i, j), [i_range, j_range])]
>>> assert flag == 3
""" |
from qnet.algebra.core.abstract_quantum_algebra import QuantumExpression
idx = ranges[i_range].index_symbol
summands = _expand_delta(term, idx)
if len(summands) > 1:
return [(summand, ranges) for summand in summands], 3
else:
delta, expr = _extract_delta(summands[0], idx)
if not delta:
return [(term, ranges)], 2
solns = sympy.solve(delta.args[0] - delta.args[1], idx)
assert len(solns) > 0 # I can't think of an example that might cause this
# if len(solns) == 0:
# return [(term._zero, [])], 4
if len(solns) != 1:
return [(term, ranges)], 2
value = solns[0]
new_term = expr.substitute({idx: value})
if _RESOLVE_KRONECKER_WITH_PIECEWISE:
new_term *= ranges[i_range].piecewise_one(value)
assert isinstance(new_term, QuantumExpression)
return [(new_term, ranges[:i_range] + ranges[i_range+1:])], 1 |
<SYSTEM_TASK:>
If possible, decompose a permutation into a sequence of permutations
<END_TASK>
<USER_TASK:>
Description:
def permutation_to_block_permutations(permutation):
"""If possible, decompose a permutation into a sequence of permutations
each acting on individual ranges of the full range of indices.
E.g.
``(1,2,0,3,5,4) --> (1,2,0) [+] (0,2,1)``
:param permutation: A valid permutation image tuple ``s = (s_0,...s_n)`` with ``n > 0``
:type permutation: tuple
:return: A list of permutation tuples ``[t = (t_0,...,t_n1), u = (u_0,...,u_n2),..., z = (z_0,...,z_nm)]`` such that ``s = t [+] u [+] ... [+] z``
:rtype: list of tuples
:raise: ValueError
""" |
if len(permutation) == 0 or not check_permutation(permutation):
raise BadPermutationError()
cycles = permutation_to_disjoint_cycles(permutation)
if len(cycles) == 1:
return (permutation,)
current_block_start = cycles[0][0]
current_block_end = max(cycles[0])
current_block_cycles = [cycles[0]]
res_permutations = []
for c in cycles[1:]:
if c[0] > current_block_end:
res_permutations.append(permutation_from_disjoint_cycles(current_block_cycles, current_block_start))
assert sum(map(len, current_block_cycles)) == current_block_end - current_block_start + 1
current_block_start = c[0]
current_block_end = max(c)
current_block_cycles = [c]
else:
current_block_cycles.append(c)
if max(c) > current_block_end:
current_block_end = max(c)
res_permutations.append(permutation_from_disjoint_cycles(current_block_cycles, current_block_start))
assert sum(map(len, current_block_cycles)) == current_block_end - current_block_start + 1
assert sum(map(len, res_permutations)) == len(permutation)
return res_permutations |
<SYSTEM_TASK:>
Decompose a permutation into a block permutation and into permutations
<END_TASK>
<USER_TASK:>
Description:
def block_perm_and_perms_within_blocks(permutation, block_structure):
"""Decompose a permutation into a block permutation and into permutations
acting within each block.
:param permutation: The overall permutation to be factored.
:type permutation: tuple
:param block_structure: The channel dimensions of the blocks
:type block_structure: tuple
:return: ``(block_permutation, permutations_within_blocks)``
Where ``block_permutations`` is an image tuple for a permutation of the block indices
and ``permutations_within_blocks`` is a list of image tuples for the permutations of the channels
within each block
:rtype: tuple
""" |
nblocks = len(block_structure)
offsets = [sum(block_structure[:k]) for k in range(nblocks)]
images = [permutation[offset: offset + length] for (offset, length) in zip(offsets, block_structure)]
images_mins = list(map(min, images))
key_block_perm_inv = lambda block_index: images_mins[block_index]
block_perm_inv = tuple(sorted(range(nblocks), key = key_block_perm_inv))
# print(images_mins)
# print(permutation, block_structure, "-->", block_perm, invert_permutation(block_perm))
block_perm = invert_permutation(block_perm_inv)
assert images_mins[block_perm_inv[0]] == min(images_mins)
assert images_mins[block_perm_inv[-1]] == max(images_mins)
# block_perm = tuple(invert_permutation(block_perm_inv))
perms_within_blocks = []
for (offset, length, image) in zip(offsets, block_structure, images):
block_key = lambda elt_index: image[elt_index]
within_inv = sorted(range(length), key = block_key)
within = invert_permutation(tuple(within_inv))
assert permutation[within_inv[0] + offset] == min(image)
assert permutation[within_inv[-1] + offset] == max(image)
perms_within_blocks.append(within)
return block_perm, perms_within_blocks |
<SYSTEM_TASK:>
Check that all operands are Kets from the same Hilbert space.
<END_TASK>
<USER_TASK:>
Description:
def _check_kets(*ops, same_space=False, disjunct_space=False):
"""Check that all operands are Kets from the same Hilbert space.""" |
if not all([(isinstance(o, State) and o.isket) for o in ops]):
raise TypeError("All operands must be Kets")
if same_space:
if not len({o.space for o in ops if o is not ZeroKet}) == 1:
raise UnequalSpaces(str(ops))
if disjunct_space:
spc = TrivialSpace
for o in ops:
if o.space & spc > TrivialSpace:
raise OverlappingSpaces(str(ops))
spc *= o.space |
<SYSTEM_TASK:>
Tuple containing `label_or_index` as its only element.
<END_TASK>
<USER_TASK:>
Description:
def args(self):
"""Tuple containing `label_or_index` as its only element.""" |
if self.space.has_basis or isinstance(self.label, SymbolicLabelBase):
return (self.label, )
else:
return (self.index, ) |
<SYSTEM_TASK:>
Return the coherent state written out as an indexed sum over Fock
<END_TASK>
<USER_TASK:>
Description:
def to_fock_representation(self, index_symbol='n', max_terms=None):
"""Return the coherent state written out as an indexed sum over Fock
basis states""" |
phase_factor = sympy.exp(
sympy.Rational(-1, 2) * self.ampl * self.ampl.conjugate())
if not isinstance(index_symbol, IdxSym):
index_symbol = IdxSym(index_symbol)
n = index_symbol
if max_terms is None:
index_range = IndexOverFockSpace(n, hs=self._hs)
else:
index_range = IndexOverRange(n, 0, max_terms-1)
term = (
(self.ampl**n / sympy.sqrt(sympy.factorial(n))) *
BasisKet(FockIndex(n), hs=self._hs))
return phase_factor * KetIndexedSum(term, index_range) |
<SYSTEM_TASK:>
Set the default algebraic rules for scalars
<END_TASK>
<USER_TASK:>
Description:
def _algebraic_rules_scalar():
"""Set the default algebraic rules for scalars""" |
a = wc("a", head=SCALAR_VAL_TYPES)
b = wc("b", head=SCALAR_VAL_TYPES)
x = wc("x", head=SCALAR_TYPES)
y = wc("y", head=SCALAR_TYPES)
z = wc("z", head=SCALAR_TYPES)
indranges__ = wc("indranges__", head=IndexRangeBase)
ScalarTimes._binary_rules.update(check_rules_dict([
('R001', (
pattern_head(a, b),
lambda a, b: a * b)),
('R002', (
pattern_head(x, x),
lambda x: x**2)),
('R003', (
pattern_head(Zero, x),
lambda x: Zero)),
('R004', (
pattern_head(x, Zero),
lambda x: Zero)),
('R005', (
pattern_head(
pattern(ScalarPower, x, y),
pattern(ScalarPower, x, z)),
lambda x, y, z: x**(y+z))),
('R006', (
pattern_head(x, pattern(ScalarPower, x, -1)),
lambda x: One)),
]))
ScalarPower._rules.update(check_rules_dict([
('R001', (
pattern_head(a, b),
lambda a, b: a**b)),
('R002', (
pattern_head(x, 0),
lambda x: One)),
('R003', (
pattern_head(x, 1),
lambda x: x)),
('R004', (
pattern_head(pattern(ScalarPower, x, y), z),
lambda x, y, z: x**(y*z))),
]))
def pull_constfactor_from_sum(x, y, indranges):
bound_symbols = set([r.index_symbol for r in indranges])
if len(x.free_symbols.intersection(bound_symbols)) == 0:
return x * ScalarIndexedSum.create(y, *indranges)
else:
raise CannotSimplify()
ScalarIndexedSum._rules.update(check_rules_dict([
('R001', ( # sum over zero -> zero
pattern_head(Zero, indranges__),
lambda indranges: Zero)),
('R002', ( # pull constant prefactor out of sum
pattern_head(pattern(ScalarTimes, x, y), indranges__),
lambda x, y, indranges:
pull_constfactor_from_sum(x, y, indranges))),
])) |
<SYSTEM_TASK:>
Simplification method for lhs << rhs
<END_TASK>
<USER_TASK:>
Description:
def _tensor_decompose_series(lhs, rhs):
"""Simplification method for lhs << rhs
Decompose a series product of two reducible circuits with compatible block
structures into a concatenation of individual series products between
subblocks. This method raises CannotSimplify when rhs is a CPermutation in
order not to conflict with other _rules.
""" |
if isinstance(rhs, CPermutation):
raise CannotSimplify()
lhs_structure = lhs.block_structure
rhs_structure = rhs.block_structure
res_struct = _get_common_block_structure(lhs_structure, rhs_structure)
if len(res_struct) > 1:
blocks, oblocks = (
lhs.get_blocks(res_struct),
rhs.get_blocks(res_struct))
parallel_series = [SeriesProduct.create(lb, rb)
for (lb, rb) in zip(blocks, oblocks)]
return Concatenation.create(*parallel_series)
raise CannotSimplify() |
<SYSTEM_TASK:>
Simplification method for cperm << rhs.
<END_TASK>
<USER_TASK:>
Description:
def _factor_permutation_for_blocks(cperm, rhs):
"""Simplification method for cperm << rhs.
Decompose a series product of a channel permutation and a reducible circuit
with appropriate block structure by decomposing the permutation into a
permutation within each block of rhs and a block permutation and a residual
part. This allows for achieving something close to a normal form for
circuit expression.
""" |
rbs = rhs.block_structure
if rhs == cid(rhs.cdim):
return cperm
if len(rbs) > 1:
residual_lhs, transformed_rhs, carried_through_lhs \
= cperm._factorize_for_rhs(rhs)
if residual_lhs == cperm:
raise CannotSimplify()
return SeriesProduct.create(residual_lhs, transformed_rhs,
carried_through_lhs)
raise CannotSimplify() |
<SYSTEM_TASK:>
Pull out a permutation from the Feedback of a SeriesProduct with itself.
<END_TASK>
<USER_TASK:>
Description:
def _pull_out_perm_lhs(lhs, rest, out_port, in_port):
"""Pull out a permutation from the Feedback of a SeriesProduct with itself.
Args:
lhs (CPermutation): The permutation circuit
rest (tuple): The other SeriesProduct operands
out_port (int): The feedback output port index
in_port (int): The feedback input port index
Returns:
Circuit: The simplified circuit
""" |
out_inv, lhs_red = lhs._factor_lhs(out_port)
return lhs_red << Feedback.create(SeriesProduct.create(*rest),
out_port=out_inv, in_port=in_port) |
<SYSTEM_TASK:>
In a self-Feedback of a series product, where the left-most operand is
<END_TASK>
<USER_TASK:>
Description:
def _pull_out_unaffected_blocks_lhs(lhs, rest, out_port, in_port):
"""In a self-Feedback of a series product, where the left-most operand is
reducible, pull all non-trivial blocks outside of the feedback.
Args:
lhs (Circuit): The reducible circuit
rest (tuple): The other SeriesProduct operands
out_port (int): The feedback output port index
in_port (int): The feedback input port index
Returns:
Circuit: The simplified circuit
""" |
_, block_index = lhs.index_in_block(out_port)
bs = lhs.block_structure
nbefore, nblock, nafter = (sum(bs[:block_index]),
bs[block_index],
sum(bs[block_index + 1:]))
before, block, after = lhs.get_blocks((nbefore, nblock, nafter))
if before != cid(nbefore) or after != cid(nafter):
outer_lhs = before + cid(nblock - 1) + after
inner_lhs = cid(nbefore) + block + cid(nafter)
return outer_lhs << Feedback.create(
SeriesProduct.create(inner_lhs, *rest),
out_port=out_port, in_port=in_port)
elif block == cid(nblock):
outer_lhs = before + cid(nblock - 1) + after
return outer_lhs << Feedback.create(
SeriesProduct.create(*rest),
out_port=out_port, in_port=in_port)
raise CannotSimplify() |
<SYSTEM_TASK:>
Invert a series self-feedback twice to get rid of unnecessary
<END_TASK>
<USER_TASK:>
Description:
def _series_feedback(series, out_port, in_port):
"""Invert a series self-feedback twice to get rid of unnecessary
permutations.""" |
series_s = series.series_inverse().series_inverse()
if series_s == series:
raise CannotSimplify()
return series_s.feedback(out_port=out_port, in_port=in_port) |
<SYSTEM_TASK:>
For a class with an attribute `arg_names` containing a list of names,
<END_TASK>
<USER_TASK:>
Description:
def properties_for_args(cls, arg_names='_arg_names'):
"""For a class with an attribute `arg_names` containing a list of names,
add a property for every name in that list.
It is assumed that there is an instance attribute ``self._<arg_name>``,
which is returned by the `arg_name` property. The decorator also adds a
class attribute :attr:`_has_properties_for_args` that may be used to ensure
that a class is decorated.
""" |
from qnet.algebra.core.scalar_algebra import Scalar
scalar_args = False
if hasattr(cls, '_scalar_args'):
scalar_args = cls._scalar_args
for arg_name in getattr(cls, arg_names):
def get_arg(self, name):
val = getattr(self, "_%s" % name)
if scalar_args:
assert isinstance(val, Scalar)
return val
prop = property(partial(get_arg, name=arg_name))
doc = "The `%s` argument" % arg_name
if scalar_args:
doc += ", as a :class:`.Scalar` instance."
else:
doc += "."
prop.__doc__ = doc
setattr(cls, arg_name, prop)
cls._has_properties_for_args = True
return cls |
<SYSTEM_TASK:>
Test whether the slug is unique within a given time period.
<END_TASK>
<USER_TASK:>
Description:
def validate_unique_slug(self, cleaned_data):
"""
Test whether the slug is unique within a given time period.
""" |
date_kwargs = {}
error_msg = _("The slug is not unique")
# The /year/month/slug/ URL determines when a slug can be unique.
pubdate = cleaned_data['publication_date'] or now()
if '{year}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE:
date_kwargs['year'] = pubdate.year
error_msg = _("The slug is not unique within it's publication year.")
if '{month}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE:
date_kwargs['month'] = pubdate.month
error_msg = _("The slug is not unique within it's publication month.")
if '{day}' in appsettings.FLUENT_BLOGS_ENTRY_LINK_STYLE:
date_kwargs['day'] = pubdate.day
error_msg = _("The slug is not unique within it's publication day.")
date_range = get_date_range(**date_kwargs)
# Base filters are configurable for translation support.
dup_filters = self.get_unique_slug_filters(cleaned_data)
if date_range:
dup_filters['publication_date__range'] = date_range
dup_qs = EntryModel.objects.filter(**dup_filters)
if self.instance and self.instance.pk:
dup_qs = dup_qs.exclude(pk=self.instance.pk)
# Test whether the slug is unique in the current month
# Note: doesn't take changes to FLUENT_BLOGS_ENTRY_LINK_STYLE into account.
if dup_qs.exists():
raise ValidationError(error_msg) |
<SYSTEM_TASK:>
Non-recursively match expr again all rules
<END_TASK>
<USER_TASK:>
Description:
def _apply_rules_no_recurse(expr, rules):
"""Non-recursively match expr again all rules""" |
try:
# `rules` is an OrderedDict key => (pattern, replacement)
items = rules.items()
except AttributeError:
# `rules` is a list of (pattern, replacement) tuples
items = enumerate(rules)
for key, (pat, replacement) in items:
matched = pat.match(expr)
if matched:
try:
return replacement(**matched)
except CannotSimplify:
pass
return expr |
<SYSTEM_TASK:>
Instantiate while applying automatic simplifications
<END_TASK>
<USER_TASK:>
Description:
def create(cls, *args, **kwargs):
"""Instantiate while applying automatic simplifications
Instead of directly instantiating `cls`, it is recommended to use
:meth:`create`, which applies simplifications to the args and keyword
arguments according to the :attr:`simplifications` class attribute, and
returns an appropriate object (which may or may not be an instance of
the original `cls`).
Two simplifications of particular importance are :func:`.match_replace`
and :func:`.match_replace_binary` which apply rule-based
simplifications.
The :func:`.temporary_rules` context manager may be used to allow
temporary modification of the automatic simplifications that
:meth:`create` uses, in particular the rules for
:func:`.match_replace` and :func:`.match_replace_binary`. Inside the
managed context, the :attr:`simplifications` class attribute may be
modified and rules can be managed with :meth:`add_rule` and
:meth:`del_rules`.
""" |
global LEVEL
if LOG:
logger = logging.getLogger('QNET.create')
logger.debug(
"%s%s.create(*args, **kwargs); args = %s, kwargs = %s",
(" " * LEVEL), cls.__name__, args, kwargs)
LEVEL += 1
key = cls._get_instance_key(args, kwargs)
try:
if cls.instance_caching:
instance = cls._instances[key]
if LOG:
LEVEL -= 1
logger.debug("%s(cached)-> %s", (" " * LEVEL), instance)
return instance
except KeyError:
pass
for i, simplification in enumerate(cls.simplifications):
if LOG:
try:
simpl_name = simplification.__name__
except AttributeError:
simpl_name = "simpl%d" % i
simplified = simplification(cls, args, kwargs)
try:
args, kwargs = simplified
if LOG:
logger.debug(
"%s(%s)-> args = %s, kwargs = %s", (" " * LEVEL),
simpl_name, args, kwargs)
except (TypeError, ValueError):
# We assume that if the simplification didn't return a tuple,
# the result is a fully instantiated object
if cls.instance_caching:
cls._instances[key] = simplified
if cls._create_idempotent and cls.instance_caching:
try:
key2 = simplified._instance_key
if key2 != key:
cls._instances[key2] = simplified # simplified key
except AttributeError:
# simplified might e.g. be a scalar and not have
# _instance_key
pass
if LOG:
LEVEL -= 1
logger.debug(
"%s(%s)-> %s", (" " * LEVEL), simpl_name, simplified)
return simplified
if len(kwargs) > 0:
cls._has_kwargs = True
instance = cls(*args, **kwargs)
if cls.instance_caching:
cls._instances[key] = instance
if cls._create_idempotent and cls.instance_caching:
key2 = cls._get_instance_key(args, kwargs)
if key2 != key:
cls._instances[key2] = instance # instantiated key
if LOG:
LEVEL -= 1
logger.debug("%s -> %s", (" " * LEVEL), instance)
return instance |
<SYSTEM_TASK:>
The dictionary of keyword-only arguments for the instantiation of
<END_TASK>
<USER_TASK:>
Description:
def kwargs(self):
"""The dictionary of keyword-only arguments for the instantiation of
the Expression""" |
# Subclasses must override this property if and only if they define
# keyword-only arguments in their __init__ method
if hasattr(self, '_has_kwargs') and self._has_kwargs:
raise NotImplementedError(
"Class %s does not provide a kwargs property"
% str(self.__class__.__name__))
return {} |
<SYSTEM_TASK:>
Substitute sub-expressions
<END_TASK>
<USER_TASK:>
Description:
def substitute(self, var_map):
"""Substitute sub-expressions
Args:
var_map (dict): Dictionary with entries of the form
``{expr: substitution}``
""" |
if self in var_map:
return var_map[self]
return self._substitute(var_map) |
<SYSTEM_TASK:>
Rebuild the expression while applying a list of rules
<END_TASK>
<USER_TASK:>
Description:
def apply_rules(self, rules, recursive=True):
"""Rebuild the expression while applying a list of rules
The rules are applied against the instantiated expression, and any
sub-expressions if `recursive` is True. Rule application is best though
of as a pattern-based substitution. This is different from the
*automatic* rules that :meth:`create` uses (see :meth:`add_rule`),
which are applied *before* expressions are instantiated.
Args:
rules (list or ~collections.OrderedDict): List of rules or
dictionary mapping names to rules, where each rule is a tuple
(:class:`Pattern`, replacement callable), cf.
:meth:`apply_rule`
recursive (bool): If true (default), apply rules to all arguments
and keyword arguments of the expression. Otherwise, only the
expression itself will be re-instantiated.
If `rules` is a dictionary, the keys (rules names) are used only for
debug logging, to allow an analysis of which rules lead to the final
form of an expression.
""" |
if recursive:
new_args = [_apply_rules(arg, rules) for arg in self.args]
new_kwargs = {
key: _apply_rules(val, rules)
for (key, val) in self.kwargs.items()}
else:
new_args = self.args
new_kwargs = self.kwargs
simplified = self.create(*new_args, **new_kwargs)
return _apply_rules_no_recurse(simplified, rules) |
<SYSTEM_TASK:>
Apply a single rules to the expression
<END_TASK>
<USER_TASK:>
Description:
def apply_rule(self, pattern, replacement, recursive=True):
"""Apply a single rules to the expression
This is equivalent to :meth:`apply_rules` with
``rules=[(pattern, replacement)]``
Args:
pattern (.Pattern): A pattern containing one or more wildcards
replacement (callable): A callable that takes the wildcard names in
`pattern` as keyword arguments, and returns a replacement for
any expression that `pattern` matches.
Example:
Consider the following Heisenberg Hamiltonian::
>>> tls = SpinSpace(label='s', spin='1/2')
>>> i, j, n = symbols('i, j, n', cls=IdxSym)
>>> J = symbols('J', cls=sympy.IndexedBase)
>>> def Sig(i):
... return OperatorSymbol(
... StrLabel(sympy.Indexed('sigma', i)), hs=tls)
>>> H = - Sum(i, tls)(Sum(j, tls)(
... J[i, j] * Sig(i) * Sig(j)))
>>> unicode(H)
'- (β_{i,j β ββ} J_ij ΟΜ_i^(s) ΟΜ_j^(s))'
We can transform this into a classical Hamiltonian by replacing the
operators with scalars::
>>> H_classical = H.apply_rule(
... pattern(OperatorSymbol, wc('label', head=StrLabel)),
... lambda label: label.expr * IdentityOperator)
>>> unicode(H_classical)
'- (β_{i,j β ββ} J_ij Ο_i Ο_j)'
""" |
return self.apply_rules([(pattern, replacement)], recursive=recursive) |
<SYSTEM_TASK:>
Download BAM files if needed, extract only chr17 reads, and regenerate .bai
<END_TASK>
<USER_TASK:>
Description:
def get_bams():
"""
Download BAM files if needed, extract only chr17 reads, and regenerate .bai
""" |
for size, md5, url in bams:
bam = os.path.join(
args.data_dir,
os.path.basename(url).replace('.bam', '_%s.bam' % CHROM))
if not _up_to_date(md5, bam):
logger.info(
'Downloading reads on chromosome %s from %s to %s'
% (CHROM, url, bam))
cmds = ['samtools', 'view', '-b', url, COORD, '>', bam]
logged_command(cmds)
bai = bam + '.bai'
if not os.path.exists(bai):
logger.info('indexing %s' % bam)
logger.info(' '.join(cmds))
cmds = [
'samtools',
'index',
bam]
logged_command(cmds)
if os.path.exists(os.path.basename(url) + '.bai'):
os.unlink(os.path.basename(url) + '.bai')
for size, md5, fn in bais:
if not _up_to_date(md5, fn):
cmds = [
'samtools', 'index', bai.replace('.bai', '')]
logged_command(cmds) |
<SYSTEM_TASK:>
Download GTF file from Ensembl, only keeping the chr17 entries.
<END_TASK>
<USER_TASK:>
Description:
def get_gtf():
"""
Download GTF file from Ensembl, only keeping the chr17 entries.
""" |
size, md5, url = GTF
full_gtf = os.path.join(args.data_dir, os.path.basename(url))
subset_gtf = os.path.join(
args.data_dir,
os.path.basename(url).replace('.gtf.gz', '_%s.gtf' % CHROM))
if not _up_to_date(md5, subset_gtf):
download(url, full_gtf)
cmds = [
'zcat', '<',
full_gtf,
'|', 'awk -F "\\t" \'{if ($1 == "%s") print $0}\''
% CHROM.replace('chr', ''),
'|', 'awk \'{print "chr"$0}\'', '>', subset_gtf]
logged_command(cmds) |
<SYSTEM_TASK:>
convert Cufflinks output GTF files into tables of score and FPKM.
<END_TASK>
<USER_TASK:>
Description:
def cufflinks_conversion():
"""
convert Cufflinks output GTF files into tables of score and FPKM.
""" |
for size, md5, fn in cufflinks_tables:
fn = os.path.join(args.data_dir, fn)
table = fn.replace('.gtf.gz', '.table')
if not _up_to_date(md5, table):
logger.info("Converting Cufflinks GTF %s to table" % fn)
fout = open(table, 'w')
fout.write('id\tscore\tfpkm\n')
x = pybedtools.BedTool(fn)
seen = set()
for i in x:
accession = i['transcript_id'].split('.')[0]
if accession not in seen:
seen.update([accession])
fout.write(
'\t'.join([accession, i.score, i['FPKM']]) + '\n')
fout.close() |
<SYSTEM_TASK:>
Spawns a new figure showing data for `feature`.
<END_TASK>
<USER_TASK:>
Description:
def plot(self, feature):
"""
Spawns a new figure showing data for `feature`.
:param feature: A `pybedtools.Interval` object
Using the pybedtools.Interval `feature`, creates figure specified in
:meth:`BaseMiniBrowser.make_fig` and plots data on panels according to
`self.panels()`.
""" |
if isinstance(feature, gffutils.Feature):
feature = asinterval(feature)
self.make_fig()
axes = []
for ax, method in self.panels():
feature = method(ax, feature)
axes.append(ax)
return axes |
<SYSTEM_TASK:>
A example panel that just prints the text of the feature.
<END_TASK>
<USER_TASK:>
Description:
def example_panel(self, ax, feature):
"""
A example panel that just prints the text of the feature.
""" |
txt = '%s:%s-%s' % (feature.chrom, feature.start, feature.stop)
ax.text(0.5, 0.5, txt, transform=ax.transAxes)
return feature |
<SYSTEM_TASK:>
Plots each genomic signal as a line using the corresponding
<END_TASK>
<USER_TASK:>
Description:
def signal_panel(self, ax, feature):
"""
Plots each genomic signal as a line using the corresponding
plotting_kwargs
""" |
for gs, kwargs in zip(self.genomic_signal_objs, self.plotting_kwargs):
x, y = gs.local_coverage(feature, **self.local_coverage_kwargs)
ax.plot(x, y, **kwargs)
ax.axis('tight')
return feature |
<SYSTEM_TASK:>
Add 2 panels to the figure, top for signal and bottom for gene models
<END_TASK>
<USER_TASK:>
Description:
def panels(self):
"""
Add 2 panels to the figure, top for signal and bottom for gene models
""" |
ax1 = self.fig.add_subplot(211)
ax2 = self.fig.add_subplot(212, sharex=ax1)
return (ax2, self.gene_panel), (ax1, self.signal_panel) |
<SYSTEM_TASK:>
Simple example using just the Bar class
<END_TASK>
<USER_TASK:>
Description:
def simple():
"""Simple example using just the Bar class
This example is intended to show usage of the Bar class at the lowest
level.
""" |
MAX_VALUE = 100
# Create our test progress bar
bar = Bar(max_value=MAX_VALUE, fallback=True)
bar.cursor.clear_lines(2)
# Before beginning to draw our bars, we save the position
# of our cursor so we can restore back to this position before writing
# the next time.
bar.cursor.save()
for i in range(MAX_VALUE + 1):
sleep(0.1 * random.random())
# We restore the cursor to saved position before writing
bar.cursor.restore()
# Now we draw the bar
bar.draw(value=i) |
<SYSTEM_TASK:>
Example showing tree progress view
<END_TASK>
<USER_TASK:>
Description:
def tree():
"""Example showing tree progress view""" |
#############
# Test data #
#############
# For this example, we're obviously going to be feeding fictitious data
# to ProgressTree, so here it is
leaf_values = [Value(0) for i in range(6)]
bd_defaults = dict(type=Bar, kwargs=dict(max_value=10))
test_d = {
"Warp Jump": {
"1) Prepare fuel": {
"Load Tanks": {
"Tank 1": BarDescriptor(value=leaf_values[0], **bd_defaults),
"Tank 2": BarDescriptor(value=leaf_values[1], **bd_defaults),
},
"Refine tylium ore": BarDescriptor(
value=leaf_values[2], **bd_defaults
),
},
"2) Calculate jump co-ordinates": {
"Resolve common name to co-ordinates": {
"Querying resolution from baseship": BarDescriptor(
value=leaf_values[3], **bd_defaults
),
},
},
"3) Perform jump": {
"Check FTL drive readiness": BarDescriptor(
value=leaf_values[4], **bd_defaults
),
"Juuuuuump!": BarDescriptor(value=leaf_values[5],
**bd_defaults)
}
}
}
# We'll use this function to bump up the leaf values
def incr_value(obj):
for val in leaf_values:
if val.value < 10:
val.value += 1
break
# And this to check if we're to stop drawing
def are_we_done(obj):
return all(val.value == 10 for val in leaf_values)
###################
# The actual code #
###################
# Create blessings.Terminal instance
t = Terminal()
# Initialize a ProgressTree instance
n = ProgressTree(term=t)
# We'll use the make_room method to make sure the terminal
# is filled out with all the room we need
n.make_room(test_d)
while not are_we_done(test_d):
sleep(0.2 * random.random())
# After the cursor position is first saved (in the first draw call)
# this will restore the cursor back to the top so we can draw again
n.cursor.restore()
# We use our incr_value method to bump the fake numbers
incr_value(test_d)
# Actually draw out the bars
n.draw(test_d, BarDescriptor(bd_defaults)) |
<SYSTEM_TASK:>
Plots the mean and 95% ci for the given array on the given axes
<END_TASK>
<USER_TASK:>
Description:
def ci_plot(x, arr, conf=0.95, ax=None, line_kwargs=None, fill_kwargs=None):
"""
Plots the mean and 95% ci for the given array on the given axes
Parameters
----------
x : 1-D array-like
x values for the plot
arr : 2-D array-like
The array to calculate mean and std for
conf : float [.5 - 1]
Confidence interval to use
ax : matplotlib.Axes
The axes object on which to plot
line_kwargs : dict
Additional kwargs passed to Axes.plot
fill_kwargs : dict
Additiona kwargs passed to Axes.fill_between
""" |
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
line_kwargs = line_kwargs or {}
fill_kwargs = fill_kwargs or {}
m, lo, hi = ci(arr, conf)
ax.plot(x, m, **line_kwargs)
ax.fill_between(x, lo, hi, **fill_kwargs)
return ax |
<SYSTEM_TASK:>
Helper function for adding labels to subsets within a heatmap.
<END_TASK>
<USER_TASK:>
Description:
def add_labels_to_subsets(ax, subset_by, subset_order, text_kwargs=None,
add_hlines=True, hline_kwargs=None):
"""
Helper function for adding labels to subsets within a heatmap.
Assumes that imshow() was called with `subsets` and `subset_order`.
Parameters
----------
ax : matplotlib.Axes
The axes to label. Generally you can use `fig.array_axes` attribute of
the Figure object returned by `metaseq.plotutils.imshow`.
subset_by, subset_order : array, list
See `metaseq.plotutils.imshow()` docstring; these should be the same
`subsets` and `subset_order` that were provided to that function.
""" |
_text_kwargs = dict(transform=ax.get_yaxis_transform())
if text_kwargs:
_text_kwargs.update(text_kwargs)
_hline_kwargs = dict(color='k')
if hline_kwargs:
_hline_kwargs.update(hline_kwargs)
pos = 0
for label in subset_order:
ind = subset_by == label
last_pos = pos
pos += sum(ind)
if add_hlines:
ax.axhline(pos, **_hline_kwargs)
ax.text(
1.1,
last_pos + (pos - last_pos)/2.0,
label,
**_text_kwargs) |
<SYSTEM_TASK:>
Calculate limits for a group of arrays in a flexible manner.
<END_TASK>
<USER_TASK:>
Description:
def calculate_limits(array_dict, method='global', percentiles=None, limit=()):
"""
Calculate limits for a group of arrays in a flexible manner.
Returns a dictionary of calculated (vmin, vmax), with the same keys as
`array_dict`.
Useful for plotting heatmaps of multiple datasets, and the vmin/vmax values
of the colormaps need to be matched across all (or a subset) of heatmaps.
Parameters
----------
array_dict : dict of np.arrays
method : {'global', 'independent', callable}
If method="global", then use the global min/max values across all
arrays in array_dict. If method="independent", then each array will
have its own min/max calcuated. If a callable, then it will be used to
group the keys of `array_dict`, and each group will have its own
group-wise min/max calculated.
percentiles : None or list
If not None, a list of (lower, upper) percentiles in the range [0,100].
""" |
if percentiles is not None:
for percentile in percentiles:
if not 0 <= percentile <= 100:
raise ValueError("percentile (%s) not between [0, 100]")
if method == 'global':
all_arrays = np.concatenate(
[i.ravel() for i in array_dict.itervalues()]
)
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
d = dict([(i, (vmin, vmax)) for i in array_dict.keys()])
elif method == 'independent':
d = {}
for k, v in array_dict.iteritems():
d[k] = (v.min(), v.max())
elif hasattr(method, '__call__'):
d = {}
sorted_keys = sorted(array_dict.keys(), key=method)
for group, keys in itertools.groupby(sorted_keys, method):
keys = list(keys)
all_arrays = np.concatenate([array_dict[i] for i in keys])
if percentiles:
vmin = mlab.prctile(
all_arrays, percentiles[0])
vmax = mlab.prctile(
all_arrays, percentiles[1])
else:
vmin = all_arrays.min()
vmax = all_arrays.max()
for key in keys:
d[key] = (vmin, vmax)
return d |
<SYSTEM_TASK:>
Column-wise confidence interval.
<END_TASK>
<USER_TASK:>
Description:
def ci(arr, conf=0.95):
"""
Column-wise confidence interval.
Parameters
----------
arr : array-like
conf : float
Confidence interval
Returns
-------
m : array
column-wise mean
lower : array
lower column-wise confidence bound
upper : array
upper column-wise confidence bound
""" |
m = arr.mean(axis=0)
n = len(arr)
se = arr.std(axis=0) / np.sqrt(n)
h = se * stats.t._ppf((1 + conf) / 2., n - 1)
return m, m - h, m + h |
<SYSTEM_TASK:>
Uses a log scale but with negative numbers.
<END_TASK>
<USER_TASK:>
Description:
def nice_log(x):
"""
Uses a log scale but with negative numbers.
:param x: NumPy array
""" |
neg = x < 0
xi = np.log2(np.abs(x) + 1)
xi[neg] = -xi[neg]
return xi |
<SYSTEM_TASK:>
Returns adjusted TIP p-values for a particular `alpha`.
<END_TASK>
<USER_TASK:>
Description:
def tip_fdr(a, alpha=0.05):
"""
Returns adjusted TIP p-values for a particular `alpha`.
(see :func:`tip_zscores` for more info)
:param a: NumPy array, where each row is the signal for a feature
:param alpha: False discovery rate
""" |
zscores = tip_zscores(a)
pvals = stats.norm.pdf(zscores)
rejected, fdrs = fdrcorrection(pvals)
return fdrs |
<SYSTEM_TASK:>
Transform `x` and `y` to a log scale while dealing with zeros.
<END_TASK>
<USER_TASK:>
Description:
def prepare_logged(x, y):
"""
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
""" |
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi |
<SYSTEM_TASK:>
Update a copy of dest with source. If `keys` is a list, then only update
<END_TASK>
<USER_TASK:>
Description:
def _updatecopy(orig, update_with, keys=None, override=False):
"""
Update a copy of dest with source. If `keys` is a list, then only update
with those keys.
""" |
d = orig.copy()
if keys is None:
keys = update_with.keys()
for k in keys:
if k in update_with:
if k in d and not override:
continue
d[k] = update_with[k]
return d |
<SYSTEM_TASK:>
Adds a new scatter to self.scatter_ax as well as marginal histograms
<END_TASK>
<USER_TASK:>
Description:
def append(self, x, y, scatter_kwargs, hist_kwargs=None, xhist_kwargs=None,
yhist_kwargs=None, num_ticks=3, labels=None, hist_share=False,
marginal_histograms=True):
"""
Adds a new scatter to self.scatter_ax as well as marginal histograms
for the same data, borrowing addtional room from the axes.
Parameters
----------
x, y : array-like
Data to be plotted
scatter_kwargs : dict
Keyword arguments that are passed directly to scatter().
hist_kwargs : dict
Keyword arguments that are passed directly to hist(), for both the
top and side histograms.
xhist_kwargs, yhist_kwargs : dict
Additional, margin-specific kwargs for the x or y histograms
respectively. These are used to update `hist_kwargs`
num_ticks : int
How many tick marks to use in each histogram's y-axis
labels : array-like
Optional NumPy array of labels that will be set on the collection
so that they can be accessed by a callback function.
hist_share : bool
If True, then all histograms will share the same frequency axes.
Useful for showing relative heights if you don't want to use the
hist_kwarg `normed=True`
marginal_histograms : bool
Set to False in order to disable marginal histograms and just use
as a normal scatterplot.
""" |
scatter_kwargs = scatter_kwargs or {}
hist_kwargs = hist_kwargs or {}
xhist_kwargs = xhist_kwargs or {}
yhist_kwargs = yhist_kwargs or {}
yhist_kwargs.update(dict(orientation='horizontal'))
# Plot the scatter
coll = self.scatter_ax.scatter(x, y, **scatter_kwargs)
coll.labels = labels
if not marginal_histograms:
return
xhk = _updatecopy(hist_kwargs, xhist_kwargs)
yhk = _updatecopy(hist_kwargs, yhist_kwargs)
axhistx = self.divider.append_axes(
'top', size=self.hist_size,
pad=self.pad, sharex=self.scatter_ax, sharey=self.xfirst_ax)
axhisty = self.divider.append_axes(
'right', size=self.hist_size,
pad=self.pad, sharey=self.scatter_ax, sharex=self.yfirst_ax)
axhistx.yaxis.set_major_locator(
MaxNLocator(nbins=num_ticks, prune='both'))
axhisty.xaxis.set_major_locator(
MaxNLocator(nbins=num_ticks, prune='both'))
if not self.xfirst_ax and hist_share:
self.xfirst_ax = axhistx
if not self.yfirst_ax and hist_share:
self.yfirst_ax = axhisty
# Keep track of which axes are which, because looking into fig.axes
# list will get awkward....
self.top_hists.append(axhistx)
self.right_hists.append(axhisty)
# Scatter will deal with NaN, but hist will not. So clean the data
# here.
hx = _clean(x)
hy = _clean(y)
self.hxs.append(hx)
self.hys.append(hy)
# Only plot hists if there's valid data
if len(hx) > 0:
if len(hx) == 1:
_xhk = _updatecopy(orig=xhk, update_with=dict(bins=[hx[0], hx[0]]), keys=['bins'])
axhistx.hist(hx, **_xhk)
else:
axhistx.hist(hx, **xhk)
if len(hy) > 0:
if len(hy) == 1:
_yhk = _updatecopy(orig=yhk, update_with=dict(bins=[hy[0], hy[0]]), keys=['bins'])
axhisty.hist(hy, **_yhk)
else:
axhisty.hist(hy, **yhk)
# Turn off unnecessary labels -- for these, use the scatter's axes
# labels
for txt in axhisty.get_yticklabels() + axhistx.get_xticklabels():
txt.set_visible(False)
for txt in axhisty.get_xticklabels():
txt.set_rotation(-90) |
<SYSTEM_TASK:>
Factory function that makes the right class for the file format.
<END_TASK>
<USER_TASK:>
Description:
def genomic_signal(fn, kind):
"""
Factory function that makes the right class for the file format.
Typically you'll only need this function to create a new genomic signal
object.
:param fn: Filename
:param kind:
String. Format of the file; see
metaseq.genomic_signal._registry.keys()
""" |
try:
klass = _registry[kind.lower()]
except KeyError:
raise ValueError(
'No support for %s format, choices are %s'
% (kind, _registry.keys()))
m = klass(fn)
m.kind = kind
return m |
<SYSTEM_TASK:>
"genome" dictionary ready for pybedtools, based on the BAM header.
<END_TASK>
<USER_TASK:>
Description:
def genome(self):
"""
"genome" dictionary ready for pybedtools, based on the BAM header.
""" |
# This gets the underlying pysam Samfile object
f = self.adapter.fileobj
d = {}
for ref, length in zip(f.references, f.lengths):
d[ref] = (0, length)
return d |
<SYSTEM_TASK:>
Counts total reads in a BAM file.
<END_TASK>
<USER_TASK:>
Description:
def mapped_read_count(self, force=False):
"""
Counts total reads in a BAM file.
If a file self.bam + '.scale' exists, then just read the first line of
that file that doesn't start with a "#". If such a file doesn't exist,
then it will be created with the number of reads as the first and only
line in the file.
The result is also stored in self._readcount so that the time-consuming
part only runs once; use force=True to force re-count.
Parameters
----------
force : bool
If True, then force a re-count; otherwise use cached data if
available.
""" |
# Already run?
if self._readcount and not force:
return self._readcount
if os.path.exists(self.fn + '.mmr') and not force:
for line in open(self.fn + '.mmr'):
if line.startswith('#'):
continue
self._readcount = float(line.strip())
return self._readcount
cmds = ['samtools',
'view',
'-c',
'-F', '0x4',
self.fn]
p = subprocess.Popen(
cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if stderr:
sys.stderr.write('samtools says: %s' % stderr)
return None
mapped_reads = int(stdout)
# write to file so the next time you need the lib size you can access
# it quickly
if not os.path.exists(self.fn + '.mmr'):
fout = open(self.fn + '.mmr', 'w')
fout.write(str(mapped_reads) + '\n')
fout.close()
self._readcount = mapped_reads
return self._readcount |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.