Code
stringlengths 103
85.9k
| Summary
sequencelengths 0
94
|
---|---|
Please provide a description of the function:def flatten_template_loaders(templates):
for loader in templates:
if not isinstance(loader, string_types):
for subloader in flatten_template_loaders(loader):
yield subloader
else:
yield loader | [
"\n Given a collection of template loaders, unwrap them into one flat iterable.\n\n :param templates: template loaders to unwrap\n :return: template loaders as an iterable of strings.\n :rtype: generator expression\n "
] |
Please provide a description of the function:def template_choices(templates, display_names=None, suffix=False):
# allow for global template names, as well as usage-local ones.
if display_names is None:
display_names = getattr(settings, 'TEMPLATEFINDER_DISPLAY_NAMES', {})
to_space_re = re.compile(r'[^a-zA-Z0-9\-]+')
def fix_display_title(template_path):
if template_path in display_names:
return display_names[template_path]
# take the last part from the template path; works even if there is no /
lastpart = template_path.rpartition('/')[-1]
# take everything to the left of the rightmost . (the file extension)
if suffix:
lastpart_with_suffix = lastpart
return capfirst(lastpart_with_suffix)
else:
lastpart_minus_suffix = lastpart.rpartition('.')[0]
# convert most non-alphanumeric characters into spaces, with the
# exception of hyphens.
lastpart_spaces = to_space_re.sub(' ', lastpart_minus_suffix)
return capfirst(lastpart_spaces)
return ((template, fix_display_title(template)) for template in templates) | [
"\n Given an iterable of `templates`, calculate human-friendly display names\n for each of them, optionally using the `display_names` provided, or a\n global dictionary (`TEMPLATEFINDER_DISPLAY_NAMES`) stored in the Django\n project's settings.\n\n .. note:: As the resulting iterable is a lazy generator, if it needs to be\n consumed more than once, it should be turned into a `set`, `tuple`\n or `list`.\n\n :param list templates: an iterable of template paths, as returned by\n `find_all_templates`\n :param display_names: If given, should be a dictionary where each key\n represents a template path in `templates`, and each\n value is the display text.\n :type display_names: dictionary or None\n :return: an iterable of two-tuples representing value (0) & display text (1)\n :rtype: generator expression\n "
] |
Please provide a description of the function:def readlines(self, sizehint=None):
wrapped = self.wrapped
try:
readlines = wrapped.readlines
except AttributeError:
lines = []
while 1:
line = wrapped.readline()
if line:
lines.append(line)
else:
break
return lines
return readlines() if sizehint is None else readlines(sizehint) | [
"Reads until EOF using :meth:`readline()`.\n\n :param sizehint: if it's present, instead of reading up to EOF,\n whole lines totalling approximately ``sizehint``\n bytes (or more to accommodate a final whole line)\n :type sizehint: :class:`numbers.Integral`\n :returns: a list containing the lines read\n :rtype: :class:`~typing.List`\\ [:class:`bytes`]\n\n "
] |
Please provide a description of the function:def seek(self, offset, whence=os.SEEK_SET):
self.wrapped.seek(offset, whence) | [
"Sets the file's current position.\n\n :param offset: the offset to set\n :type offset: :class:`numbers.Integral`\n :param whence: see the docs of :meth:`file.seek()`.\n default is :const:`os.SEEK_SET`\n\n "
] |
Please provide a description of the function:def get_current_context_id():
global get_current_context_id
if greenlet is not None:
if stackless is None:
get_current_context_id = greenlet.getcurrent
return greenlet.getcurrent()
return greenlet.getcurrent(), stackless.getcurrent()
elif stackless is not None:
get_current_context_id = stackless.getcurrent
return stackless.getcurrent()
get_current_context_id = _thread.get_ident
return _thread.get_ident() | [
"Identifis which context it is (greenlet, stackless, or thread).\n\n :returns: the identifier of the current context.\n\n "
] |
Please provide a description of the function:def store_context(store):
if not isinstance(store, Store):
raise TypeError('store must be an instance of sqlalchemy_imageattach.'
'store.Store, not ' + repr(store))
push_store_context(store)
yield store
pop_store_context() | [
"Sets the new (nested) context of the current image storage::\n\n with store_context(store):\n print current_store\n\n It could be set nestedly as well::\n\n with store_context(store1):\n print current_store # store1\n with store_context(store2):\n print current_store # store2\n print current_store # store1 back\n\n :param store: the image store to set to the :data:`current_store`\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n\n "
] |
Please provide a description of the function:def migrate(session, declarative_base, source, destination):
if not isinstance(session, Session):
raise TypeError('session must be an instance of sqlalchemy.orm.'
'session.Session, not ' + repr(session))
elif not isinstance(declarative_base, DeclarativeMeta):
raise TypeError('declarative_base must be an instance of sqlalchemy.'
'ext.declarative.api.DeclarativeMeta, not ' +
repr(declarative_base))
elif not isinstance(source, Store):
raise TypeError('source must be an instance of sqlalchemy_imageattach'
'.store.Store, not ' + repr(source))
elif not isinstance(destination, Store):
raise TypeError('destination must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(source))
classes = set(
cls
for cls in declarative_base._decl_class_registry.values()
if isinstance(cls, type) and issubclass(cls, Image)
)
# FIXME: it's not aware of single table inheritance
@MigrationPlan
def result():
for cls in classes:
for instance in migrate_class(session, cls, source, destination):
yield instance
return result | [
"Migrate all image data from ``source`` storage to ``destination``\n storage. All data in ``source`` storage are *not* deleted.\n\n It does not execute migration by itself alone. You need to\n :meth:`~MigrationPlan.execute()` the plan it returns::\n\n migrate(session, Base, source, destination).execute()\n\n Or iterate it using :keyword:`for` statement::\n\n for i in migrate(session, Base, source, destination):\n # i is an image just done migration\n print(i)\n\n :param session: SQLAlchemy session\n :type session: :class:`sqlalchemy.orm.session.Session`\n :param declarative_base:\n declarative base class created by\n :func:`sqlalchemy.ext.declarative.declarative_base`\n :type declarative_base:\n :class:`sqlalchemy.ext.declarative.api.DeclarativeMeta`\n :param source: the storage to copy image data from\n :type source: :class:`~sqlalchemy_imageattach.store.Store`\n :param destination: the storage to copy image data to\n :type destination: :class:`~sqlalchemy_imageattach.store.Store`\n :returns: iterable migration plan which is not executed yet\n :rtype: :class:`MigrationPlan`\n\n "
] |
Please provide a description of the function:def migrate_class(session, cls, source, destination):
if not isinstance(session, Session):
raise TypeError('session must be an instance of sqlalchemy.orm.'
'session.Session, not ' + repr(session))
elif not isinstance(cls, DeclarativeMeta):
raise TypeError('cls must be an instance of sqlalchemy.'
'ext.declarative.api.DeclarativeMeta, not ' +
repr(cls))
elif not isinstance(source, Store):
raise TypeError('source must be an instance of sqlalchemy_imageattach'
'.store.Store, not ' + repr(source))
elif not isinstance(destination, Store):
raise TypeError('destination must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(source))
@MigrationPlan
def result():
for instance in session.query(cls):
with source.open(instance) as f:
destination.store(instance, f)
yield instance
return result | [
"Migrate all image data of ``cls`` from ``source`` storage to\n ``destination`` storage. All data in ``source`` storage are *not*\n deleted.\n\n It does not execute migration by itself alone. You need to\n :meth:`~MigrationPlan.execute()` the plan it returns::\n\n migrate_class(session, UserPicture, source, destination).execute()\n\n Or iterate it using :keyword:`for` statement::\n\n for i in migrate_class(session, UserPicture, source, destination):\n # i is an image just done migration\n print(i)\n\n :param session: SQLAlchemy session\n :type session: :class:`sqlalchemy.orm.session.Session`\n :param cls: declarative mapper class\n :type cls: :class:`sqlalchemy.ext.declarative.api.DeclarativeMeta`\n :param source: the storage to copy image data from\n :type source: :class:`~sqlalchemy_imageattach.store.Store`\n :param destination: the storage to copy image data to\n :type destination: :class:`~sqlalchemy_imageattach.store.Store`\n :returns: iterable migration plan which is not executed yet\n :rtype: :class:`MigrationPlan`\n\n "
] |
Please provide a description of the function:def execute(self, callback=None):
if callback is None:
for _ in self:
pass
elif not callable(callback):
raise TypeError('callback must be callable, not ' +
repr(callback))
else:
for instance in self:
callback(instance) | [
"Execute the plan. If optional ``callback`` is present,\n it is invoked with an :class:`~sqlalchemy_imageattach.entity.Image`\n instance for every migrated image.\n\n :param callback: an optional callback that takes\n an :class:`~sqlalchemy_imageattach.entity.Image`\n instance. it's called zero or more times\n :type callback: :class:`~typing.Callable`\\ [[:class:`~.entity.Image`],\n :const:`None`]\n\n "
] |
Please provide a description of the function:def put_file(self, file, object_type, object_id, width, height, mimetype,
reproducible):
raise NotImplementedError('put_file() has to be implemented') | [
"Puts the ``file`` of the image.\n\n :param file: the image file to put\n :type file: file-like object, :class:`file`\n :param object_type: the object type of the image to put\n e.g. ``'comics.cover'``\n :type object_type: :class:`str`\n :param object_id: the object identifier number of the image to put\n :type object_id: :class:`numbers.Integral`\n :param width: the width of the image to put\n :type width: :class:`numbers.Integral`\n :param height: the height of the image to put\n :type height: :class:`numbers.Integral`\n :param mimetype: the mimetype of the image to put\n e.g. ``'image/jpeg'``\n :type mimetype: :class:`str`\n :param reproducible: :const:`True` only if it's reproducible by\n computing e.g. resized thumbnails.\n :const:`False` if it cannot be reproduced\n e.g. original images\n :type reproducible: :class:`bool`\n\n .. note::\n\n This is an abstract method which has to be implemented\n (overridden) by subclasses.\n\n It's not for consumers but implementations, so consumers\n should use :meth:`store()` method instead of this.\n\n "
] |
Please provide a description of the function:def store(self, image, file):
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
elif not callable(getattr(file, 'read', None)):
raise TypeError('file must be a readable file-like object that '
'implements read() method, not ' + repr(file))
self.put_file(file, image.object_type, image.object_id,
image.width, image.height, image.mimetype,
not image.original) | [
"Stores the actual data ``file`` of the given ``image``.\n ::\n\n with open(imagefile, 'rb') as f:\n store.store(image, f)\n\n :param image: the image to store its actual data file\n :type image: :class:`sqlalchemy_imageattach.entity.Image`\n :param file: the image file to put\n :type file: file-like object, :class:`file`\n\n "
] |
Please provide a description of the function:def delete(self, image):
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
self.delete_file(image.object_type, image.object_id,
image.width, image.height, image.mimetype) | [
"Delete the file of the given ``image``.\n\n :param image: the image to delete\n :type image: :class:`sqlalchemy_imageattach.entity.Image`\n\n "
] |
Please provide a description of the function:def open(self, image, use_seek=False):
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
elif image.object_id is None:
raise TypeError('image.object_id must be set; it is currently '
'None however')
elif not isinstance(image.object_id, numbers.Integral):
raise TypeError('image.object_id must be integer, not ' +
repr(image.object_id))
f = self.get_file(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
for method in 'read', 'readline', 'readlines':
if not callable(getattr(f, method, None)):
raise TypeError(
'{0!r}.get_file() must return file-like object which '
'has {1}() method, not {2!r}'.format(self, method, f)
)
ctxt = (callable(getattr(f, '__enter__', None)) and
callable(getattr(f, '__exit__', None)))
if use_seek:
if not callable(getattr(f, 'seek', None)):
f2 = io.BytesIO()
shutil.copyfileobj(f, f2)
f2.seek(0)
return f2
if ctxt:
return f
return SeekableFileProxy(f)
if ctxt:
return f
return FileProxy(f) | [
"Opens the file-like object of the given ``image``.\n Returned file-like object guarantees:\n\n - context manager protocol\n - :class:`collections.abc.Iterable` protocol\n - :class:`collections.abc.Iterator` protocol\n - :meth:`~io.RawIOBase.read()` method\n - :meth:`~io.IOBase.readline()` method\n - :meth:`~io.IOBase.readlines()` method\n\n To sum up: you definitely can read the file, in :keyword:`with`\n statement and :keyword:`for` loop.\n\n Plus, if ``use_seek`` option is :const:`True`:\n\n - :meth:`~io.IOBase.seek()` method\n - :meth:`~io.IOBase.tell()` method\n\n For example, if you want to make a local copy of\n the image::\n\n import shutil\n\n with store.open(image) as src:\n with open(filename, 'wb') as dst:\n shutil.copyfileobj(src, dst)\n\n :param image: the image to get its file\n :type image: :class:`sqlalchemy_imageattach.entity.Image`\n :param use_seek: whether the file should seekable.\n if :const:`True` it maybe buffered in the memory.\n default is :const:`False`\n :type use_seek: :class:`bool`\n :returns: the file-like object of the image, which is a context\n manager (plus, also seekable only if ``use_seek``\n is :const:`True`)\n :rtype: :class:`file`, :class:`~sqlalchemy_imageattach.file.FileProxy`,\n file-like object\n :raise IOError: when such file doesn't exist\n\n "
] |
Please provide a description of the function:def locate(self, image):
from .entity import Image
if not isinstance(image, Image):
raise TypeError('image must be a sqlalchemy_imageattach.entity.'
'Image instance, not ' + repr(image))
url = self.get_url(image.object_type, image.object_id,
image.width, image.height, image.mimetype)
if '?' in url:
fmt = '{0}&_ts={1}'
else:
fmt = '{0}?_ts={1}'
return fmt.format(url, image.created_at.strftime('%Y%m%d%H%M%S%f')) | [
"Gets the URL of the given ``image``.\n\n :param image: the image to get its url\n :type image: :class:`sqlalchemy_imageattach.entity.Image`\n :returns: the url of the image\n :rtype: :class:`str`\n\n "
] |
Please provide a description of the function:def get_minimum_indent(docstring, ignore_before=1):
r
indent_re = re.compile(r'^\s*')
indents = [indent_re.match(line).group(0)
for line in docstring.splitlines()[ignore_before:]
if line.strip()]
return min(indents, key=len) if indents else '' | [
"Gets the minimum indent string from the ``docstring``:\n\n >>> get_minimum_indent('Hello')\n ''\n >>> get_minimum_indent('Hello\\n world::\\n yeah')\n ' '\n\n :param docstring: the docstring to find its minimum indent\n :type docstring: :class:`str`\n :param ignore_before: ignore lines before this line.\n usually docstrings which follow :pep:`8`\n have no indent for the first line,\n so its default value is 1\n :type ignore_before: :class:`numbers.Integral`\n :returns: the minimum indent string which consists of only\n whitespaces (tabs and/or spaces)\n :rtype: :class:`str`\n\n "
] |
Please provide a description of the function:def append_docstring(docstring, *lines):
shallowest = get_minimum_indent(docstring)
appender = []
for line in lines:
appender.append('\n')
if line.strip():
appender.append(shallowest)
appender.append(line)
return docstring + ''.join(appender) | [
"Appends the ``docstring`` with given ``lines``::\n\n function.__doc__ = append_docstring(\n function.__doc__,\n '.. note::'\n '',\n ' Appended docstring!'\n )\n\n :param docstring: a docstring to be appended\n :param \\*lines: lines of trailing docstring\n :returns: new docstring which is appended\n :rtype: :class:`str`\n\n "
] |
Please provide a description of the function:def append_docstring_attributes(docstring, locals):
docstring = docstring or ''
for attr, val in locals.items():
doc = val.__doc__
if not doc:
continue
doc = get_minimum_indent(doc) + doc
lines = [' ' + l for l in textwrap.dedent(doc).splitlines()]
docstring = append_docstring(
docstring,
'',
'.. attribute:: ' + attr,
'',
*lines
)
return docstring | [
"Manually appends class' ``docstring`` with its attribute docstrings.\n For example::\n\n class Entity(object):\n # ...\n\n __doc__ = append_docstring_attributes(\n __doc__,\n dict((k, v) for k, v in locals()\n if isinstance(v, MyDescriptor))\n )\n\n :param docstring: class docstring to be appended\n :type docstring: :class:`str`\n :param locals: attributes dict\n :type locals: :class:`~typing.Mapping`\\ [:class:`str`, :class:`object`]\n :returns: appended docstring\n :rtype: :class:`str`\n\n "
] |
Please provide a description of the function:def image_attachment(*args, **kwargs):
if kwargs.get('uselist', False):
kwargs.setdefault('query_class', MultipleImageSet)
else:
kwargs.setdefault('query_class', SingleImageSet)
kwargs['uselist'] = True
kwargs.setdefault('lazy', 'dynamic')
kwargs.setdefault('cascade', 'all, delete-orphan')
return relationship(*args, **kwargs) | [
"The helper function, decorates raw\n :func:`~sqlalchemy.orm.relationship()` function, sepcialized for\n relationships between :class:`Image` subtypes.\n\n It takes the same parameters as :func:`~sqlalchemy.orm.relationship()`.\n\n If ``uselist`` is :const:`True`, it becomes possible to attach multiple\n image sets. In order to attach multiple image sets, image entity types\n must have extra discriminating primary key columns to group each image set.\n\n If ``uselist`` is :const:`False` (which is default), it becomes\n possible to attach only a single image.\n\n :param \\*args: the same arguments as\n :func:`~sqlalchemy.orm.relationship()`\n :param \\*\\*kwargs: the same keyword arguments as\n :func:`~sqlalchemy.orm.relationship()`\n :returns: the relationship property\n :rtype: :class:`sqlalchemy.orm.properties.RelationshipProperty`\n\n .. versionadded:: 1.0.0\n The ``uselist`` parameter.\n\n .. todo::\n\n It currently doesn't support population (eager loading) on\n :func:`image_attachment()` relationships yet.\n\n We seem to need to work something on attribute instrumental\n implementation.\n\n "
] |
Please provide a description of the function:def object_id(self):
pk = self.identity_attributes()
if len(pk) == 1:
pk_value = getattr(self, pk[0])
if isinstance(pk_value, numbers.Integral):
return pk_value
elif isinstance(pk_value, uuid.UUID):
return pk_value.int
raise NotImplementedError('object_id property has to be implemented') | [
"(:class:`numbers.Integral`) The identifier number of the image.\n It uses the primary key if it's integer, but can be overridden,\n and must be implemented when the primary key is not integer or\n composite key.\n\n .. versionchanged:: 1.1.0\n Since 1.1.0, it provides a more default implementation for\n :class:`~uuid.UUID` primary keys. If a primary key is not\n composite and :class:`~uuid.UUID` type, :attr:`object_id\n <sqlalchemy_imageattach.entity.Image.object_id>` for that doesn't\n have to be implemented.\n\n "
] |
Please provide a description of the function:def identity_attributes(cls):
columns = inspect(cls).primary_key
names = [c.name for c in columns if c.name not in ('width', 'height')]
return names | [
"A list of the names of primary key fields.\n\n :returns: A list of the names of primary key fields\n :rtype: :class:`typing.Sequence`\\ [:class:`str`]\n\n .. versionadded:: 1.0.0\n\n "
] |
Please provide a description of the function:def identity_map(self):
pk = self.identity_attributes()
values = {}
for name in pk:
values[name] = getattr(self, name)
return values | [
"(:class:`typing.Mapping`\\ [:class:`str`, :class:`object`])\n A dictionary of the values of primary key fields with their names.\n\n .. versionadded:: 1.0.0\n\n "
] |
Please provide a description of the function:def make_blob(self, store=current_store):
with self.open_file(store) as f:
return f.read() | [
"Gets the byte string of the image from the ``store``.\n\n :param store: the storage which contains the image.\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :returns: the binary data of the image\n :rtype: :class:`str`\n\n "
] |
Please provide a description of the function:def open_file(self, store=current_store, use_seek=False):
if not isinstance(store, Store):
raise TypeError('store must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(store))
if Session.object_session(self) is None:
try:
file = self.file
except AttributeError:
raise IOError('no stored original image file')
return ReusableFileProxy(file)
return store.open(self, use_seek) | [
"Opens the file-like object which is a context manager\n (that means it can used for :keyword:`with` statement).\n\n If ``use_seek`` is :const:`True` (though :const:`False` by default)\n it guarentees the returned file-like object is also seekable\n (provides :meth:`~file.seek()` method).\n\n :param store: the storage which contains image files.\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :returns: the file-like object of the image, which is a context\n manager (plus, also seekable only if ``use_seek``\n is :const:`True`)\n :rtype: :class:`file`,\n :class:`~sqlalchemy_imageattach.file.FileProxy`,\n file-like object\n\n "
] |
Please provide a description of the function:def locate(self, store=current_store):
if not isinstance(store, Store):
raise TypeError('store must be an instance of '
'sqlalchemy_imageattach.store.Store, not ' +
repr(store))
return store.locate(self) | [
"Gets the URL of the image from the ``store``.\n\n :param store: the storage which contains the image.\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :returns: the url of the image\n :rtype: :class:`str`\n\n "
] |
Please provide a description of the function:def _mark_image_file_stored(cls, mapper, connection, target):
try:
file_ = target.file
except AttributeError:
raise TypeError('sqlalchemy_imageattach.entity.Image which is '
'to be inserted must have file to store')
try:
try:
store = target.store
except AttributeError:
raise TypeError('sqlalchemy_imageattach.entity.Image which is '
'to be inserted must have store for the file')
store.store(target, file_)
cls._stored_images.add((target, store))
del target.file, target.store
finally:
file_.close() | [
"When the session flushes, stores actual image files into\n the storage. Note that these files could be deleted back\n if the ongoing transaction has done rollback. See also\n :meth:`_delete_image_file()`.\n\n "
] |
Please provide a description of the function:def _mark_image_file_deleted(cls, mapper, connection, target):
cls._deleted_images.add((target, get_current_store())) | [
"When the session flushes, marks images as deleted.\n The files of this marked images will be actually deleted\n in the image storage when the ongoing transaction succeeds.\n If it fails the :attr:`_deleted_images` queue will be just\n empty.\n\n "
] |
Please provide a description of the function:def _images_failed(cls, session, previous_transaction):
for image, store in cls._stored_images:
store.delete(image)
cls._stored_images.clear()
cls._deleted_images.clear() | [
"Deletes the files of :attr:`_stored_images` back and clears\n the :attr:`_stored_images` and :attr:`_deleted_images` set\n when the ongoing transaction has done rollback.\n\n "
] |
Please provide a description of the function:def _images_succeeded(cls, session):
for image, store in cls._deleted_images:
for stored_image, _ in cls._stored_images:
if stored_image.object_type == image.object_type and \
stored_image.object_id == image.object_id and \
stored_image.width == image.width and \
stored_image.height == image.height and \
stored_image.mimetype == image.mimetype:
break
else:
store.delete(image)
cls._stored_images.clear()
cls._deleted_images.clear() | [
"Clears the :attr:`_stored_images` set and deletes actual\n files that are marked as deleted in the storage\n if the ongoing transaction has committed.\n\n "
] |
Please provide a description of the function:def _original_images(self, **kwargs):
def test(image):
if not image.original:
return False
for filter, value in kwargs.items():
if getattr(image, filter) != value:
return False
return True
if Session.object_session(self.instance) is None:
images = []
for image, store in self._stored_images:
if test(image):
images.append(image)
state = instance_state(self.instance)
try:
added = state.committed_state[self.attr.key].added_items
except KeyError:
pass
else:
for image in added:
if test(image):
images.append(image)
if self.session:
for image in self.session.new:
if test(image):
images.append(image)
else:
query = self.filter_by(original=True, **kwargs)
images = query.all()
return images | [
"A list of the original images.\n\n :returns: A list of the original images.\n :rtype: :class:`typing.Sequence`\\ [:class:`Image`]\n "
] |
Please provide a description of the function:def from_raw_file(self, raw_file, store=current_store, size=None,
mimetype=None, original=True, extra_args=None,
extra_kwargs=None):
query = self.query
cls = query.column_descriptions[0]['type']
if not (isinstance(cls, type) and issubclass(cls, Image)):
raise TypeError('the first entity must be a subtype of '
'sqlalchemy_imageattach.entity.Image')
if original and query.session:
if store is current_store:
for existing in query:
test_data = existing.identity_map.copy()
test_data.update(self.identity_map)
if existing.identity_map == test_data:
query.remove(existing)
query.session.flush()
else:
with store_context(store):
for existing in query:
test_data = existing.identity_map.copy()
test_data.update(self.identity_map)
if existing.identity_map == test_data:
query.remove(existing)
query.session.flush()
if size is None or mimetype is None:
with WandImage(file=raw_file) as wand:
size = size or wand.size
mimetype = mimetype or wand.mimetype
if mimetype.startswith('image/x-'):
mimetype = 'image/' + mimetype[8:]
if extra_kwargs is None:
extra_kwargs = {}
extra_kwargs.update(self.identity_map)
if extra_args is None:
extra_args = ()
image = cls(size=size, mimetype=mimetype, original=original,
*extra_args, **extra_kwargs)
raw_file.seek(0)
image.file = raw_file
image.store = store
query.append(image)
return image | [
"Similar to :meth:`from_file()` except it's lower than that.\n It assumes that ``raw_file`` is readable and seekable while\n :meth:`from_file()` only assumes the file is readable.\n Also it doesn't make any in-memory buffer while\n :meth:`from_file()` always makes an in-memory buffer and copy\n the file into the buffer.\n\n If ``size`` and ``mimetype`` are passed, it won't try to read\n image and will use these values instead.\n\n It's used for implementing :meth:`from_file()` and\n :meth:`from_blob()` methods that are higher than it.\n\n :param raw_file: the seekable and readable file of the image\n :type raw_file: file-like object, :class:`file`\n :param store: the storage to store the file.\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :param size: an optional size of the image.\n automatically detected if it's omitted\n :type size: :class:`tuple`\n :param mimetype: an optional mimetype of the image.\n automatically detected if it's omitted\n :type mimetype: :class:`str`\n :param original: an optional flag which represents whether\n it is an original image or not.\n defualt is :const:`True` (meaning original)\n :type original: :class:`bool`\n :param extra_args: additional arguments to pass to the model's\n constructor.\n :type extra_args: :class:`collections.abc.Sequence`\n :param extra_kwargs: additional keyword arguments to pass to the\n model's constructor.\n :type extra_kwargs: :class:`typing.Mapping`\\ [:class:`str`,\n :class:`object`]\n :returns: the created image instance\n :rtype: :class:`Image`\n\n .. versionadded:: 1.0.0\n The ``extra_args`` and ``extra_kwargs`` options.\n\n "
] |
Please provide a description of the function:def from_blob(self, blob, store=current_store,
extra_args=None, extra_kwargs=None):
data = io.BytesIO(blob)
return self.from_raw_file(data, store, original=True,
extra_args=extra_args,
extra_kwargs=extra_kwargs) | [
"Stores the ``blob`` (byte string) for the image\n into the ``store``.\n\n :param blob: the byte string for the image\n :type blob: :class:`str`\n :param store: the storage to store the image data.\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :param extra_args: additional arguments to pass to the model's\n constructor.\n :type extra_args: :class:`collections.abc.Sequence`\n :param extra_kwargs: additional keyword arguments to pass to the\n model's constructor.\n :type extra_kwargs: :class:`typing.Mapping`\\ [:class:`str`,\n :class:`object`]\n :returns: the created image instance\n :rtype: :class:`Image`\n\n .. versionadded:: 1.0.0\n The ``extra_args`` and ``extra_kwargs`` options.\n\n "
] |
Please provide a description of the function:def from_file(self, file, store=current_store,
extra_args=None, extra_kwargs=None):
if isinstance(file, cgi.FieldStorage):
file = file.file
data = io.BytesIO()
shutil.copyfileobj(file, data)
data.seek(0)
return self.from_raw_file(data, store, original=True,
extra_args=extra_args,
extra_kwargs=extra_kwargs) | [
"Stores the ``file`` for the image into the ``store``.\n\n :param file: the readable file of the image\n :type file: file-like object, :class:`file`\n :param store: the storage to store the file.\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :param extra_args: additional arguments to pass to the model's\n constructor.\n :type extra_args: :class:`collections.abc.Sequence`\n :param extra_kwargs: additional keyword arguments to pass to the\n model's constructor.\n :type extra_kwargs: :class:`typing.Mapping`\\ [:class:`str`,\n :class:`object`]\n :returns: the created image instance\n :rtype: :class:`Image`\n\n .. versionadded:: 1.0.0\n The ``extra_args`` and ``extra_kwargs`` options.\n\n "
] |
Please provide a description of the function:def generate_thumbnail(self, ratio=None, width=None, height=None,
filter='undefined', store=current_store,
_preprocess_image=None, _postprocess_image=None):
params = ratio, width, height
param_count = sum(p is not None for p in params)
if not param_count:
raise TypeError('pass an argument ratio, width, or height')
elif param_count > 1:
raise TypeError('pass only one argument in ratio, width, or '
'height; these parameters are exclusive for '
'each other')
query = self.query
transient = Session.object_session(query.instance) is None
state = instance_state(query.instance)
try:
added = state.committed_state[query.attr.key].added_items
except KeyError:
added = []
if width is not None:
if not isinstance(width, numbers.Integral):
raise TypeError('width must be integer, not ' + repr(width))
elif width < 1:
raise ValueError('width must be natural number, not ' +
repr(width))
# find the same-but-already-generated thumbnail
for image in added:
if image.width == width:
return image
if not transient:
q = query.filter_by(width=width)
try:
return q.one()
except NoResultFound:
pass
def height(sz):
return sz[1] * (width / sz[0])
elif height is not None:
if not isinstance(height, numbers.Integral):
raise TypeError('height must be integer, not ' + repr(height))
elif height < 1:
raise ValueError('height must be natural number, not ' +
repr(height))
# find the same-but-already-generated thumbnail
for image in added:
if image.height == height:
return image
if not transient:
q = query.filter_by(height=height)
try:
return q.one()
except NoResultFound:
pass
def width(sz):
return sz[0] * (height / sz[1])
elif ratio is not None:
if not isinstance(ratio, numbers.Real):
raise TypeError('ratio must be an instance of numbers.Real, '
'not ' + repr(ratio))
def width(sz):
return sz[0] * ratio
def height(sz):
return sz[1] * ratio
data = io.BytesIO()
image = self.require_original()
with image.open_file(store=store) as f:
if _preprocess_image is None:
img = WandImage(file=f)
else:
with WandImage(file=f) as img:
img = _preprocess_image(img)
with img:
if img.mimetype in VECTOR_TYPES:
img.format = 'png'
original_size = img.size
if callable(width):
width = width(original_size)
if callable(height):
height = height(original_size)
width = int(width)
height = int(height)
# find the same-but-already-generated thumbnail
for image in added:
if image.width == width and image.height == height:
return image
if not transient:
q = query.filter_by(width=width, height=height)
try:
return q.one()
except NoResultFound:
pass
if len(img.sequence) > 1:
img_ctx = img.sequence[0].clone()
img_ctx.resize(width, height, filter=filter)
img_ctx.strip()
else:
img_ctx = NoopContext(img)
with img_ctx as single_img:
single_img.resize(width, height, filter=filter)
single_img.strip()
if _postprocess_image is None:
mimetype = img.mimetype
single_img.save(file=data)
else:
with _postprocess_image(img) as img:
mimetype = img.mimetype
single_img.save(file=data)
return self.from_raw_file(data, store,
size=(width, height),
mimetype=mimetype,
original=False) | [
"Resizes the :attr:`original` (scales up or down) and\n then store the resized thumbnail into the ``store``.\n\n :param ratio: resize by its ratio. if it's greater than 1\n it scales up, and if it's less than 1 it scales\n down. exclusive for ``width`` and ``height``\n parameters\n :type ratio: :class:`numbers.Real`\n :param width: resize by its width. exclusive for ``ratio``\n and ``height`` parameters\n :type width: :class:`numbers.Integral`\n :param height: resize by its height. exclusive for ``ratio``\n and ``width`` parameters\n :type height: :class:`numbers.Integral`\n :param filter: a filter type to use for resizing. choose one in\n :const:`wand.image.FILTER_TYPES`. default is\n ``'undefined'`` which means ImageMagick will try\n to guess best one to use\n :type filter: :class:`str`, :class:`numbers.Integral`\n :param store: the storage to store the resized image file.\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :param _preprocess_image: internal-use only option for preprocessing\n original image before resizing\n :type _preprocess_image:\n :class:`typing.Callable`\\ [[:class:`wand.image.Image`],\n :class:`wand.image.Image`]\n :param _postprocess_image: internal-use only option for preprocessing\n original image before resizing\n :type _postprocess_image:\n :class:`typing.Callable`\\ [[:class:`wand.image.Image`],\n :class:`wand.image.Image`]\n :returns: the resized thumbnail image. it might be an already\n existing image if the same size already exists\n :rtype: :class:`Image`\n :raise IOError: when there's no :attr:`original` image yet\n\n "
] |
Please provide a description of the function:def original(self):
images = self.query._original_images(**self.identity_map)
if images:
return images[0] | [
"(:class:`Image`) The original image. It could be :const:`None`\n if there are no stored images yet.\n\n "
] |
Please provide a description of the function:def find_thumbnail(self, width=None, height=None):
if width is None and height is None:
raise TypeError('required width and/or height')
q = self
if width is not None:
q = q.filter_by(width=width)
if height is not None:
q = q.filter_by(height=height)
try:
return q.one()
except NoResultFound:
if width is not None and height is not None:
msg = 'size: ' + repr((width, height))
elif width is not None:
msg = 'width: ' + repr(width)
else:
msg = 'height: ' + repr(height)
raise NoResultFound('no thumbnail image of such ' + msg) | [
"Finds the thumbnail of the image with the given ``width``\n and/or ``height``.\n\n :param width: the thumbnail width\n :type width: :class:`numbers.Integral`\n :param height: the thumbnail height\n :type height: :class:`numbers.Integral`\n :returns: the thumbnail image\n :rtype: :class:`Image`\n :raises sqlalchemy.orm.exc.NoResultFound:\n when there's no image of such size\n\n "
] |
Please provide a description of the function:def open_file(self, store=current_store, use_seek=False):
original = self.require_original()
return original.open_file(store, use_seek) | [
"The shorthand of :meth:`~Image.open_file()` for\n the :attr:`original`.\n\n :param store: the storage which contains the image files\n :data:`~sqlalchemy_imageattach.context.current_store`\n by default\n :type store: :class:`~sqlalchemy_imageattach.store.Store`\n :param use_seek: whether the file should seekable.\n if :const:`True` it maybe buffered in the memory.\n default is :const:`False`\n :type use_seek: :class:`bool`\n :returns: the file-like object of the image, which is a context\n manager (plus, also seekable only if ``use_seek``\n is :const:`True`)\n :rtype: :class:`file`,\n :class:`~sqlalchemy_imageattach.file.FileProxy`,\n file-like object\n\n "
] |
Please provide a description of the function:def image_sets(self):
images = self._original_images()
for image in images:
yield ImageSubset(self, **image.identity_map) | [
"(:class:`typing.Iterable`\\ [:class:`ImageSubset`]) The set of\n attached image sets.\n\n "
] |
Please provide a description of the function:def wsgi_middleware(self, app, cors=False):
_app = StaticServerMiddleware(app, '/' + self.prefix, self.path,
cors=self.cors)
def app(environ, start_response):
if not hasattr(self, 'host_url'):
self.host_url = (environ['wsgi.url_scheme'] + '://' +
environ['HTTP_HOST'] + '/')
return _app(environ, start_response)
return app | [
"WSGI middlewares that wraps the given ``app`` and serves\n actual image files. ::\n\n fs_store = HttpExposedFileSystemStore('userimages', 'images/')\n app = fs_store.wsgi_middleware(app)\n\n :param app: the wsgi app to wrap\n :type app: :class:`~typing.Callable`\\ [[],\n :class:`~typing.Iterable`\\ [:class:`bytes`]]\n :returns: the another wsgi app that wraps ``app``\n :rtype: :class:`StaticServerMiddleware`\n\n "
] |
Please provide a description of the function:def clear_database(self) -> None:
self._next_entity_id = 0
self._dead_entities.clear()
self._components.clear()
self._entities.clear()
self.clear_cache() | [
"Remove all Entities and Components from the World."
] |
Please provide a description of the function:def add_processor(self, processor_instance: Processor, priority=0) -> None:
assert issubclass(processor_instance.__class__, Processor)
processor_instance.priority = priority
processor_instance.world = self
self._processors.append(processor_instance)
self._processors.sort(key=lambda proc: proc.priority, reverse=True) | [
"Add a Processor instance to the World.\n\n :param processor_instance: An instance of a Processor,\n subclassed from the Processor class\n :param priority: A higher number is processed first.\n "
] |
Please provide a description of the function:def remove_processor(self, processor_type: Processor) -> None:
for processor in self._processors:
if type(processor) == processor_type:
processor.world = None
self._processors.remove(processor) | [
"Remove a Processor from the World, by type.\n\n :param processor_type: The class type of the Processor to remove.\n "
] |
Please provide a description of the function:def get_processor(self, processor_type: Type[P]) -> P:
for processor in self._processors:
if type(processor) == processor_type:
return processor | [
"Get a Processor instance, by type.\n\n This method returns a Processor instance by type. This could be\n useful in certain situations, such as wanting to call a method on a\n Processor, from within another Processor.\n\n :param processor_type: The type of the Processor you wish to retrieve.\n :return: A Processor instance that has previously been added to the World.\n "
] |
Please provide a description of the function:def create_entity(self, *components) -> int:
self._next_entity_id += 1
# TODO: duplicate add_component code here for performance
for component in components:
self.add_component(self._next_entity_id, component)
# self.clear_cache()
return self._next_entity_id | [
"Create a new Entity.\n\n This method returns an Entity ID, which is just a plain integer.\n You can optionally pass one or more Component instances to be\n assigned to the Entity.\n\n :param components: Optional components to be assigned to the\n entity on creation.\n :return: The next Entity ID in sequence.\n "
] |
Please provide a description of the function:def delete_entity(self, entity: int, immediate=False) -> None:
if immediate:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self.clear_cache()
else:
self._dead_entities.add(entity) | [
"Delete an Entity from the World.\n\n Delete an Entity and all of it's assigned Component instances from\n the world. By default, Entity deletion is delayed until the next call\n to *World.process*. You can request immediate deletion, however, by\n passing the \"immediate=True\" parameter. This should generally not be\n done during Entity iteration (calls to World.get_component/s).\n\n Raises a KeyError if the given entity does not exist in the database.\n :param entity: The Entity ID you wish to delete.\n :param immediate: If True, delete the Entity immediately.\n "
] |
Please provide a description of the function:def component_for_entity(self, entity: int, component_type: Type[C]) -> C:
return self._entities[entity][component_type] | [
"Retrieve a Component instance for a specific Entity.\n\n Retrieve a Component instance for a specific Entity. In some cases,\n it may be necessary to access a specific Component instance.\n For example: directly modifying a Component to handle user input.\n\n Raises a KeyError if the given Entity and Component do not exist.\n :param entity: The Entity ID to retrieve the Component for.\n :param component_type: The Component instance you wish to retrieve.\n :return: The Component instance requested for the given Entity ID.\n "
] |
Please provide a description of the function:def components_for_entity(self, entity: int) -> Tuple[C, ...]:
return tuple(self._entities[entity].values()) | [
"Retrieve all Components for a specific Entity, as a Tuple.\n\n Retrieve all Components for a specific Entity. The method is probably\n not appropriate to use in your Processors, but might be useful for\n saving state, or passing specific Components between World instances.\n Unlike most other methods, this returns all of the Components as a\n Tuple in one batch, instead of returning a Generator for iteration.\n\n Raises a KeyError if the given entity does not exist in the database.\n :param entity: The Entity ID to retrieve the Components for.\n :return: A tuple of all Component instances that have been\n assigned to the passed Entity ID.\n "
] |
Please provide a description of the function:def has_component(self, entity: int, component_type: Any) -> bool:
return component_type in self._entities[entity] | [
"Check if a specific Entity has a Component of a certain type.\n\n :param entity: The Entity you are querying.\n :param component_type: The type of Component to check for.\n :return: True if the Entity has a Component of this type,\n otherwise False\n "
] |
Please provide a description of the function:def add_component(self, entity: int, component_instance: Any) -> None:
component_type = type(component_instance)
if component_type not in self._components:
self._components[component_type] = set()
self._components[component_type].add(entity)
if entity not in self._entities:
self._entities[entity] = {}
self._entities[entity][component_type] = component_instance
self.clear_cache() | [
"Add a new Component instance to an Entity.\n\n Add a Component instance to an Entiy. If a Component of the same type\n is already assigned to the Entity, it will be replaced.\n\n :param entity: The Entity to associate the Component with.\n :param component_instance: A Component instance.\n "
] |
Please provide a description of the function:def remove_component(self, entity: int, component_type: Any) -> int:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity][component_type]
if not self._entities[entity]:
del self._entities[entity]
self.clear_cache()
return entity | [
"Remove a Component instance from an Entity, by type.\n\n A Component instance can be removed by providing it's type.\n For example: world.delete_component(enemy_a, Velocity) will remove\n the Velocity instance from the Entity enemy_a.\n\n Raises a KeyError if either the given entity or Component type does\n not exist in the database.\n :param entity: The Entity to remove the Component from.\n :param component_type: The type of the Component to remove.\n "
] |
Please provide a description of the function:def _get_component(self, component_type: Type[C]) -> Iterable[Tuple[int, C]]:
entity_db = self._entities
for entity in self._components.get(component_type, []):
yield entity, entity_db[entity][component_type] | [
"Get an iterator for Entity, Component pairs.\n\n :param component_type: The Component type to retrieve.\n :return: An iterator for (Entity, Component) tuples.\n "
] |
Please provide a description of the function:def _get_components(self, *component_types: Type)-> Iterable[Tuple[int, ...]]:
entity_db = self._entities
comp_db = self._components
try:
for entity in set.intersection(*[comp_db[ct] for ct in component_types]):
yield entity, [entity_db[entity][ct] for ct in component_types]
except KeyError:
pass | [
"Get an iterator for Entity and multiple Component sets.\n\n :param component_types: Two or more Component types.\n :return: An iterator for Entity, (Component1, Component2, etc)\n tuples.\n "
] |
Please provide a description of the function:def try_component(self, entity: int, component_type: Type):
if component_type in self._entities[entity]:
yield self._entities[entity][component_type]
else:
return None | [
"Try to get a single component type for an Entity.\n \n This method will return the requested Component if it exists, but\n will pass silently if it does not. This allows a way to access optional\n Components that may or may not exist.\n\n :param entity: The Entity ID to retrieve the Component for.\n :param component_type: The Component instance you wish to retrieve.\n :return: A iterator containg the single Component instance requested,\n which is empty if the component doesn't exist.\n "
] |
Please provide a description of the function:def _clear_dead_entities(self):
for entity in self._dead_entities:
for component_type in self._entities[entity]:
self._components[component_type].discard(entity)
if not self._components[component_type]:
del self._components[component_type]
del self._entities[entity]
self._dead_entities.clear()
self.clear_cache() | [
"Finalize deletion of any Entities that are marked dead.\n \n In the interest of performance, this method duplicates code from the\n `delete_entity` method. If that method is changed, those changes should\n be duplicated here as well.\n "
] |
Please provide a description of the function:def _timed_process(self, *args, **kwargs):
for processor in self._processors:
start_time = _time.process_time()
processor.process(*args, **kwargs)
process_time = int(round((_time.process_time() - start_time) * 1000, 2))
self.process_times[processor.__class__.__name__] = process_time | [
"Track Processor execution time for benchmarking."
] |
Please provide a description of the function:def process(self, *args, **kwargs):
self._clear_dead_entities()
self._process(*args, **kwargs) | [
"Call the process method on all Processors, in order of their priority.\n\n Call the *process* method on all assigned Processors, respecting their\n optional priority setting. In addition, any Entities that were marked\n for deletion since the last call to *World.process*, will be deleted\n at the start of this method call.\n\n :param args: Optional arguments that will be passed through to the\n *process* method of all Processors.\n "
] |
Please provide a description of the function:def texture_from_image(renderer, image_name):
soft_surface = ext.load_image(image_name)
texture = SDL_CreateTextureFromSurface(renderer.renderer, soft_surface)
SDL_FreeSurface(soft_surface)
return texture | [
"Create an SDL2 Texture from an image file"
] |
Please provide a description of the function:def setup_tree(ctx, verbose=None, root=None, tree_dir=None, modules_dir=None):
''' Sets up the SDSS tree enviroment '''
print('Setting up the tree')
ctx.run('python bin/setup_tree.py -t {0} -r {1} -m {2}'.format(tree_dir, root, modules_dir)) | [] |
Please provide a description of the function:def set_roots(self, uproot_with=None):
''' Set the roots of the tree in the os environment
Parameters:
uproot_with (str):
A new TREE_DIR path used to override an existing TREE_DIR environment variable
'''
# Check for TREE_DIR
self.treedir = os.environ.get('TREE_DIR', None) if not uproot_with else uproot_with
if not self.treedir:
treefilepath = os.path.dirname(os.path.abspath(__file__))
if 'python/' in treefilepath:
self.treedir = treefilepath.rsplit('/', 2)[0]
else:
self.treedir = treefilepath
self.treedir = treefilepath
os.environ['TREE_DIR'] = self.treedir
# Check sas_base_dir
if 'SAS_BASE_DIR' in os.environ:
self.sasbasedir = os.environ["SAS_BASE_DIR"]
else:
self.sasbasedir = os.path.expanduser('~/sas')
# make the directories
if not os.path.isdir(self.sasbasedir):
os.makedirs(self.sasbasedir) | [] |
Please provide a description of the function:def load_config(self, config=None):
''' loads a config file
Parameters:
config (str):
Optional name of manual config file to load
'''
# Read the config file
cfgname = (config or self.config_name)
cfgname = 'sdsswork' if cfgname is None else cfgname
assert isinstance(cfgname, six.string_types), 'config name must be a string'
config_name = cfgname if cfgname.endswith('.cfg') else '{0}.cfg'.format(cfgname)
self.configfile = os.path.join(self.treedir, 'data', config_name)
assert os.path.isfile(self.configfile) is True, 'configfile {0} must exist in the proper directory'.format(self.configfile)
self._cfg = SafeConfigParser()
try:
self._cfg.read(self.configfile.decode('utf-8'))
except AttributeError:
self._cfg.read(self.configfile)
# create the local tree environment
self.environ = OrderedDict()
self.environ['default'] = self._cfg.defaults()
# set the filesystem envvar to sas_base_dir
self._file_replace = '@FILESYSTEM@'
if self.environ['default']['filesystem'] == self._file_replace:
self.environ['default']['filesystem'] = self.sasbasedir | [] |
Please provide a description of the function:def branch_out(self, limb=None):
''' Set the individual section branches
This adds the various sections of the config file into the
tree environment for access later. Optically can specify a specific
branch. This does not yet load them into the os environment.
Parameters:
limb (str/list):
The name of the section of the config to add into the environ
or a list of strings
'''
# Filter on sections
if not limb:
limbs = self._cfg.sections()
else:
# we must have the general always + secton
limb = limb if isinstance(limb, list) else [limb]
limbs = ['general']
limbs.extend(limb)
# add all limbs into the tree environ
for leaf in limbs:
leaf = leaf if leaf in self._cfg.sections() else leaf.upper()
self.environ[leaf] = OrderedDict()
options = self._cfg.options(leaf)
for opt in options:
if opt in self.environ['default']:
continue
val = self._cfg.get(leaf, opt)
if val.find(self._file_replace) == 0:
val = val.replace(self._file_replace, self.sasbasedir)
self.environ[leaf][opt] = val | [] |
Please provide a description of the function:def add_limbs(self, key=None):
''' Add a new section from the tree into the existing os environment
Parameters:
key (str):
The section name to grab from the environment
'''
self.branch_out(limb=key)
self.add_paths_to_os(key=key) | [] |
Please provide a description of the function:def get_paths(self, key):
''' Retrieve a set of environment paths from the config
Parameters:
key (str):
The section name to grab from the environment
Returns:
self.environ[newkey] (OrderedDict):
An ordered dict containing all of the paths from the
specified section, as key:val = name:path
'''
newkey = key if key in self.environ else key.upper() if key.upper() \
in self.environ else None
if newkey:
return self.environ[newkey]
else:
raise KeyError('Key {0} not found in tree environment'.format(key)) | [] |
Please provide a description of the function:def add_paths_to_os(self, key=None, update=None):
''' Add the paths in tree environ into the os environ
This code goes through the tree environ and checks
for existence in the os environ, then adds them
Parameters:
key (str):
The section name to check against / add
update (bool):
If True, overwrites existing tree environment variables in your
local environment. Default is False.
'''
if key is not None:
allpaths = key if isinstance(key, list) else [key]
else:
allpaths = [k for k in self.environ.keys() if 'default' not in k]
for key in allpaths:
paths = self.get_paths(key)
self.check_paths(paths, update=update) | [] |
Please provide a description of the function:def check_paths(self, paths, update=None):
''' Check if the path is in the os environ, and if not add it
Paramters:
paths (OrderedDict):
An ordered dict containing all of the paths from the
a given section, as key:val = name:path
update (bool):
If True, overwrites existing tree environment variables in your
local environment. Default is False.
'''
# set up the exclusion list
exclude = [] if not self.exclude else self.exclude \
if isinstance(self.exclude, list) else [self.exclude]
# check the path names
for pathname, path in paths.items():
if update and pathname.upper() not in exclude:
os.environ[pathname.upper()] = os.path.normpath(path)
elif pathname.upper() not in os.environ:
os.environ[pathname.upper()] = os.path.normpath(path) | [] |
Please provide a description of the function:def replant_tree(self, config=None, exclude=None):
''' Replant the tree with a different config setup
Parameters:
config (str):
The config name to reload
exclude (list):
A list of environment variables to exclude
from forced updates
'''
# reinitialize a new Tree with a new config
self.__init__(key=self.key, config=config, update=True, exclude=exclude) | [] |
Please provide a description of the function:def print_exception_formatted(type, value, tb):
tbtext = ''.join(traceback.format_exception(type, value, tb))
lexer = get_lexer_by_name('pytb', stripall=True)
formatter = TerminalFormatter()
sys.stderr.write(highlight(tbtext, lexer, formatter)) | [
"A custom hook for printing tracebacks with colours."
] |
Please provide a description of the function:def colored_formatter(record):
colours = {'info': ('blue', 'normal'),
'debug': ('magenta', 'normal'),
'warning': ('yellow', 'normal'),
'print': ('green', 'normal'),
'error': ('red', 'bold')}
levelname = record.levelname.lower()
if levelname == 'error':
return
if levelname.lower() in colours:
levelname_color = colours[levelname][0]
header = color_text('[{}]: '.format(levelname.upper()), levelname_color)
message = '{0}'.format(record.msg)
warning_category = re.match(r'^(\w+Warning:).*', message)
if warning_category is not None:
warning_category_colour = color_text(warning_category.groups()[0], 'cyan')
message = message.replace(warning_category.groups()[0], warning_category_colour)
sub_level = re.match(r'(\[.+\]:)(.*)', message)
if sub_level is not None:
sub_level_name = color_text(sub_level.groups()[0], 'red')
message = '{}{}'.format(sub_level_name, ''.join(sub_level.groups()[1:]))
# if len(message) > 79:
# tw = TextWrapper()
# tw.width = 79
# tw.subsequent_indent = ' ' * (len(record.levelname) + 2)
# tw.break_on_hyphens = False
# message = '\n'.join(tw.wrap(message))
sys.__stdout__.write('{}{}\n'.format(header, message))
sys.__stdout__.flush()
return | [
"Prints log messages with colours."
] |
Please provide a description of the function:def _catch_exceptions(self, exctype, value, tb):
# Now we log it.
self.error('Uncaught exception', exc_info=(exctype, value, tb))
# First, we print to stdout with some colouring.
print_exception_formatted(exctype, value, tb) | [
"Catches all exceptions and logs them."
] |
Please provide a description of the function:def _set_defaults(self, log_level=logging.INFO, redirect_stdout=False):
# Remove all previous handlers
for handler in self.handlers[:]:
self.removeHandler(handler)
# Set levels
self.setLevel(logging.DEBUG)
# Set up the stdout handler
self.fh = None
self.sh = logging.StreamHandler()
self.sh.emit = colored_formatter
self.addHandler(self.sh)
self.sh.setLevel(log_level)
# warnings.showwarning = self._show_warning
# Redirects all stdout to the logger
if redirect_stdout:
sys.stdout = LoggerStdout(self._print)
# Catches exceptions
sys.excepthook = self._catch_exceptions | [
"Reset logger to its initial state."
] |
Please provide a description of the function:def start_file_logger(self, name, log_file_level=logging.DEBUG, log_file_path='./'):
log_file_path = os.path.expanduser(log_file_path) / '{}.log'.format(name)
logdir = log_file_path.parent
try:
logdir.mkdir(parents=True, exist_ok=True)
# If the log file exists, backs it up before creating a new file handler
if log_file_path.exists():
strtime = datetime.datetime.utcnow().strftime('%Y-%m-%d_%H:%M:%S')
shutil.move(log_file_path, log_file_path + '.' + strtime)
self.fh = TimedRotatingFileHandler(str(log_file_path), when='midnight', utc=True)
self.fh.suffix = '%Y-%m-%d_%H:%M:%S'
except (IOError, OSError) as ee:
warnings.warn('log file {0!r} could not be opened for writing: '
'{1}'.format(log_file_path, ee), RuntimeWarning)
else:
self.fh.setFormatter(fmt)
self.addHandler(self.fh)
self.fh.setLevel(log_file_level)
self.log_filename = log_file_path | [
"Start file logging."
] |
Please provide a description of the function:def create_index_table(environ, envdir):
''' create an html table
Parameters:
environ (dict):
A tree environment dictionary
envdir (str):
The filepath for the env directory
Returns:
An html table definition string
'''
table_header =
table_footer =
# create table
table = table_header
# loop over the environment
for section, values in environ.items():
if section == 'default':
continue
for tree_name, tree_path in values.items():
skipmsg = 'Skipping {0} for {1}'.format(tree_name, section)
if '_root' in tree_name:
continue
# create the src and target links
src = tree_path
link = os.path.join(envdir, tree_name.upper())
# get the local time of the symlink
try:
stattime = time.strftime('%d-%b-%Y %H:%M', time.localtime(os.stat(src).st_mtime))
except OSError:
print("{0} does not appear to exist, skipping...".format(src))
_remove_link(link)
continue
# skip the sas_base_dir
if section == 'general' and 'sas_base_dir' in tree_name:
print(skipmsg)
continue
# only create symlinks
if section == 'general' and tree_name in ['cas_load', 'staging_data']:
# only create links here if the target exist
if os.path.exists(src):
make_symlink(src, link)
else:
print(skipmsg)
else:
print('Processing {0} for {1}'.format(tree_name, section))
make_symlink(src, link)
# create the table entry
if os.path.exists(link):
table += ' <tr><td><a href="{0}/">{0}/</a></td><td>-</td><td>{1}</td></tr>\n'.format(tree_name.upper(), stattime)
table += table_footer
return table | [
"<table id=\"list\" cellpadding=\"0.1em\" cellspacing=\"0\">\n<colgroup><col width=\"55%\"/><col width=\"20%\"/><col width=\"25%\"/></colgroup>\n<thead>\n <tr><th><a href=\"?C=N&O=A\">File Name</a> <a href=\"?C=N&O=D\"> ↓ </a></th><th><a href=\"?C=S&O=A\">File Size</a> <a href=\"?C=S&O=D\"> ↓ </a></th><th><a href=\"?C=M&O=A\">Date</a> <a href=\"?C=M&O=D\"> ↓ </a></th></tr>\n</thead><tbody>\n <tr><td><a href=\"../\">Parent directory/</a></td><td>-</td><td>-</td></tr>",
"</tbody></table>"
] |
Please provide a description of the function:def create_index_page(environ, defaults, envdir):
''' create the env index html page
Builds the index.html page containing a table of symlinks
to datamodel directories
Parameters:
environ (dict):
A tree environment dictionary
defaults (dict):
The defaults dictionary from environ['default']
envdir (str):
The filepath for the env directory
Returns:
A string defintion of an html page
'''
# header of index file
header =
# footer of index file
footer =
# create index html file
index = header.format(**defaults)
index += create_index_table(environ, envdir)
index += footer.format(**defaults)
return index | [
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\" \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html xmlns=\"http://www.w3.org/1999/xhtml\">\n<head><meta name=\"viewport\" content=\"width=device-width\"/><meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\"/><style type=\"text/css\">body,html {{background:#fff;font-family:\"Bitstream Vera Sans\",\"Lucida Grande\",\"Lucida Sans Unicode\",Lucidux,Verdana,Lucida,sans-serif;}}tr:nth-child(even) {{background:#f4f4f4;}}th,td {{padding:0.1em 0.5em;}}th {{text-align:left;font-weight:bold;background:#eee;border-bottom:1px solid #aaa;}}#list {{border:1px solid #aaa;width:100%%;}}a {{color:#a33;}}a:hover {{color:#e33;}}</style>\n<link rel=\"stylesheet\" href=\"{url}/css/sas.css\" type=\"text/css\"/>\n<title>Index of /sas/{name}/env/</title>\n</head><body><h1>Index of /sas/{name}/env/</h1>\n",
"<h3><a href='{url}/sas/'>{location}</a></h3>\n<p>This directory contains links to the contents of\nenvironment variables defined by the tree product, version {name}.\nTo examine the <em>types</em> of files contained in each environment variable\ndirectory, visit <a href=\"/datamodel/files/\">the datamodel.</a></p>\n</body></html>\n"
] |
Please provide a description of the function:def create_env(environ, mirror=None, verbose=None):
''' create the env symlink directory structure
Creates the env folder filled with symlinks to datamodel directories
for a given tree config file.
Parameters:
environ (dict):
A tree environment dictionary
mirror (bool):
If True, use the SAM url location
verbose (bool):
If True, print more information
'''
defaults = environ['default'].copy()
defaults['url'] = "https://data.mirror.sdss.org" if mirror else "https://data.sdss.org"
defaults['location'] = "SDSS-IV Science Archive Mirror (SAM)" if mirror else "SDSS-IV Science Archive Server (SAS)"
if not os.path.exists(environ['general']['sas_root']):
if verbose:
print("{0} doesn't exist, skipping env link creation.".format(environ['general']['sas_root']))
return
if verbose:
print("Found {0}.".format(environ['general']['sas_root']))
# sets and creates envdir
envdir = os.path.join(environ['general']['sas_root'], 'env')
if not os.path.exists(envdir):
os.makedirs(envdir)
if not os.access(envdir, os.W_OK):
return
# create index html
index = create_index_page(environ, defaults, envdir)
# write the index file
indexfile = os.path.join(envdir, 'index.html')
with open(indexfile, 'w') as f:
f.write(index) | [] |
Please provide a description of the function:def check_sas_base_dir(root=None):
''' Check for the SAS_BASE_DIR environment variable
Will set the SAS_BASE_DIR in your local environment
or prompt you to define one if is undefined
Parameters:
root (str):
Optional override of the SAS_BASE_DIR envvar
'''
sasbasedir = root or os.getenv("SAS_BASE_DIR")
if not sasbasedir:
sasbasedir = input('Enter a path for SAS_BASE_DIR: ')
os.environ['SAS_BASE_DIR'] = sasbasedir | [] |
Please provide a description of the function:def write_header(term='bash', tree_dir=None, name=None):
''' Write proper file header in a given shell format
Parameters:
term (str):
The type of shell header to write, can be "bash", "tsch", or "modules"
tree_dir (str):
The path to this repository
name (str):
The name of the configuration
Returns:
A string header to insert
'''
assert term in ['bash', 'tsch', 'modules'], 'term must be either bash, tsch, or module'
product_dir = tree_dir.rstrip('/')
base = 'export' if term == 'bash' else 'setenv'
if term != 'modules':
hdr = .format(name, term, base, product_dir)
else:
hdr = .format(product_dir, name)
return hdr.strip() | [
"# Set up tree/{0} for {1}\n{2} TREE_DIR {3}\n{2} TREE_VER {1}\n{2} PATH $TREE_DIR/bin:$PATH\n{2} PYTHONPATH $TREE_DIR/python:$PYTHONPATH\n ",
"#%Module1.0\nproc ModulesHelp {{ }} {{\n global product version\n puts stderr \"This module adds $product/$version to various paths\"\n}}\nset name tree\nset product tree\nset version {1}\nconflict $product\nmodule-whatis \"Sets up $product/$version in your environment\"\n\nset PRODUCT_DIR {0}\nsetenv [string toupper $product]_DIR $PRODUCT_DIR\nsetenv [string toupper $product]_VER $version\nprepend-path PATH $PRODUCT_DIR/bin\nprepend-path PYTHONPATH $PRODUCT_DIR/python\n\n "
] |
Please provide a description of the function:def write_file(environ, term='bash', out_dir=None, tree_dir=None):
''' Write a tree environment file
Loops over the tree environ and writes them out to a bash, tsch, or
modules file
Parameters:
environ (dict):
The tree dictionary environment
term (str):
The type of shell header to write, can be "bash", "tsch", or "modules"
tree_dir (str):
The path to this repository
out_dir (str):
The output path to write the files (default is etc/)
'''
# get the proper name, header and file extension
name = environ['default']['name']
header = write_header(term=term, name=name, tree_dir=tree_dir)
exts = {'bash': '.sh', 'tsch': '.csh', 'modules': '.module'}
ext = exts[term]
# shell command
if term == 'bash':
cmd = 'export {0}={1}\n'
else:
cmd = 'setenv {0} {1}\n'
# write the environment config files
filename = os.path.join(out_dir, name + ext)
with open(filename, 'w') as f:
f.write(header + '\n')
for key, values in environ.items():
if key != 'default':
# write separator
f.write('#\n# {0}\n#\n'.format(key))
# write tree names and paths
for tree_name, tree_path in values.items():
f.write(cmd.format(tree_name.upper(), tree_path))
# write default .version file for modules
modules_version = write_version(name)
if term == 'modules' and environ['default']['current']:
version_name = os.path.join(out_dir, '.version')
with open(version_name, 'w') as f:
f.write(modules_version) | [] |
Please provide a description of the function:def get_tree(config=None):
''' Get the tree for a given config
Parameters:
config (str):
The name of the tree config to load
Returns:
a Python Tree instance
'''
path = os.path.dirname(os.path.abspath(__file__))
pypath = os.path.realpath(os.path.join(path, '..', 'python'))
if pypath not in sys.path:
sys.path.append(pypath)
os.chdir(pypath)
from tree.tree import Tree
tree = Tree(config=config)
return tree | [] |
Please provide a description of the function:def copy_modules(filespath=None, modules_path=None, verbose=None):
''' Copy over the tree module files into your path '''
# find or define a modules path
if not modules_path:
modulepath = os.getenv("MODULEPATH")
if not modulepath:
modules_path = input('Enter the root path for your module files:')
else:
split_mods = modulepath.split(':')
if len(split_mods) > 1:
if verbose:
print('Multiple module paths found. Finding all that contain a tree directory.')
for mfile in split_mods:
if os.path.exists(os.path.join(mfile, 'tree')):
copy_modules(filespath=filespath, modules_path=mfile, verbose=verbose)
else:
return
else:
modules_path = split_mods[0]
# check for the tree module directory
tree_mod = os.path.join(modules_path, 'tree')
if not os.path.isdir(tree_mod):
os.makedirs(tree_mod)
# copy the modules into the tree
if verbose:
print('Copying modules from etc/ into {0}'.format(tree_mod))
module_files = glob.glob(os.path.join(filespath, '*.module'))
for mfile in module_files:
base = os.path.splitext(os.path.basename(mfile))[0]
tree_out = os.path.join(tree_mod, base)
shutil.copy2(mfile, tree_out)
# copy the default version into the tree
version = os.path.join(filespath, '.version')
if os.path.isfile(version):
shutil.copy2(version, tree_mod) | [] |
Please provide a description of the function:def parse_args():
''' Parse the arguments '''
parser = argparse.ArgumentParser(prog='setup_tree_modules', usage='%(prog)s [opts]')
parser.add_argument('-v', '--verbose', action='store_true', dest='verbose',
help='Print extra information.', default=False)
parser.add_argument('-r', '--root', action='store', dest='root', default=os.getenv('SAS_BASE_DIR'),
help='Override the value of $SAS_BASE_DIR.', metavar='SAS_BASE_DIR')
parser.add_argument('-t', '--treedir', action='store', dest='treedir', default=os.getenv('TREE_DIR'),
help='Override the value of $TREE_DIR.', metavar='TREE_DIR')
parser.add_argument('-m', '--modulesdir', action='store', dest='modulesdir', default=os.getenv('MODULES_DIR'),
help='Your modules directory', metavar='MODULES_DIR')
parser.add_argument('-e', '--env', action='store_true', dest='env',
help='Create tree environment symlinks.', default=False)
parser.add_argument('-i', '--mirror', action='store_true', dest='mirror',
help='Use the mirror site (SAM) instead.')
parser.add_argument('-o', '--only', action='store', dest='only', metavar='[xxx].cfg',
default=None, help='create links for only the specified tree config.')
opts = parser.parse_args()
return opts | [] |
Please provide a description of the function:def _indent(text, level=1):
''' Does a proper indenting for Sphinx rst '''
prefix = ' ' * (4 * level)
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return ''.join(prefixed_lines()) | [] |
Please provide a description of the function:def _format_command(name, envvars, base=None):
''' Creates a list-table directive
for a set of defined environment variables
Parameters:
name (str):
The name of the config section
envvars (dict):
A dictionary of the environment variable definitions from the config
base (str):
The SAS_BASE to remove from the filepaths
Yields:
A string rst-formated list-table directive
'''
yield '.. list-table:: {0}'.format(name)
yield _indent(':widths: 20 50')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Path')
for envvar, path in envvars.items():
tail = path.split(base)[1] if base and base in path else path
tail = envvar.upper() if envvar.upper() == 'SAS_BASE_DIR' else tail
yield _indent('* - {0}'.format(envvar.upper()))
yield _indent(' - {0}'.format(tail))
yield '' | [] |
Please provide a description of the function:def _generate_section(self, name, config, cfg_section='default', remove_sasbase=False):
# the source name
source_name = name
# Title
section = nodes.section(
'',
nodes.title(text=cfg_section),
ids=[nodes.make_id(cfg_section)],
names=[nodes.fully_normalize_name(cfg_section)])
# Summarize
result = statemachine.ViewList()
base = config['default']['filesystem'] if remove_sasbase else None
lines = _format_command(cfg_section, config[cfg_section], base=base)
for line in lines:
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
return [section] | [
"Generate the relevant Sphinx nodes.\n\n Generates a section for the Tree datamodel. Formats a tree section\n as a list-table directive.\n\n Parameters:\n name (str):\n The name of the config to be documented, e.g. 'sdsswork'\n config (dict):\n The tree dictionary of the loaded config environ\n cfg_section (str):\n The section of the config to load\n remove_sasbase (bool):\n If True, removes the SAS_BASE_DIR from the beginning of each path\n\n Returns:\n A section docutil node\n\n "
] |
Please provide a description of the function:def get_requirements(opts):
''' Get the proper requirements file based on the optional argument '''
if opts.dev:
name = 'requirements_dev.txt'
elif opts.doc:
name = 'requirements_doc.txt'
else:
name = 'requirements.txt'
requirements_file = os.path.join(os.path.dirname(__file__), name)
install_requires = [line.strip().replace('==', '>=') for line in open(requirements_file)
if not line.strip().startswith('#') and line.strip() != '']
return install_requires | [] |
Please provide a description of the function:def remove_args(parser):
''' Remove custom arguments from the parser '''
arguments = []
for action in list(parser._get_optional_actions()):
if '--help' not in action.option_strings:
arguments += action.option_strings
for arg in arguments:
if arg in sys.argv:
sys.argv.remove(arg) | [] |
Please provide a description of the function:def clean(ctx):
ctx.run(f'python setup.py clean')
dist = ROOT.joinpath('dist')
print(f'[clean] Removing {dist}')
if dist.exists():
shutil.rmtree(str(dist)) | [
"Clean previously built package artifacts.\n "
] |
Please provide a description of the function:def _render_log():
config = load_config(ROOT)
definitions = config['types']
fragments, fragment_filenames = find_fragments(
pathlib.Path(config['directory']).absolute(),
config['sections'],
None,
definitions,
)
rendered = render_fragments(
pathlib.Path(config['template']).read_text(encoding='utf-8'),
config['issue_format'],
split_fragments(fragments, definitions),
definitions,
config['underlines'][1:],
)
return rendered | [
"Totally tap into Towncrier internals to get an in-memory result.\n "
] |
Please provide a description of the function:def adjust_name_for_printing(name):
if name is not None:
name2 = name
name = name.replace(" ", "_").replace(".", "_").replace("-", "_m_")
name = name.replace("+", "_p_").replace("!", "_I_")
name = name.replace("**", "_xx_").replace("*", "_x_")
name = name.replace("/", "_l_").replace("@", '_at_')
name = name.replace("(", "_of_").replace(")", "")
if re.match(r'^[a-zA-Z_][a-zA-Z0-9-_]*$', name) is None:
raise NameError("name {} converted to {} cannot be further converted to valid python variable name!".format(name2, name))
return name
return '' | [
"\n Make sure a name can be printed, alongside used as a variable name.\n "
] |
Please provide a description of the function:def name(self, name):
from_name = self.name
assert isinstance(name, str)
self._name = name
if self.has_parent():
self._parent_._name_changed(self, from_name) | [
"\n Set the name of this object.\n Tell the parent if the name has changed.\n "
] |
Please provide a description of the function:def hierarchy_name(self, adjust_for_printing=True):
if adjust_for_printing: adjust = lambda x: adjust_name_for_printing(x)
else: adjust = lambda x: x
if self.has_parent():
return self._parent_.hierarchy_name() + "." + adjust(self.name)
return adjust(self.name) | [
"\n return the name for this object with the parents names attached by dots.\n\n :param bool adjust_for_printing: whether to call :func:`~adjust_for_printing()`\n on the names, recursively\n \n "
] |
Please provide a description of the function:def link_parameter(self, param, index=None):
if param in self.parameters and index is not None:
self.unlink_parameter(param)
return self.link_parameter(param, index)
# elif param.has_parent():
# raise HierarchyError, "parameter {} already in another model ({}), create new object (or copy) for adding".format(param._short(), param._highest_parent_._short())
elif param not in self.parameters:
if param.has_parent():
def visit(parent, self):
if parent is self:
raise HierarchyError("You cannot add a parameter twice into the hierarchy")
param.traverse_parents(visit, self)
param._parent_.unlink_parameter(param)
# make sure the size is set
if index is None:
start = sum(p.size for p in self.parameters)
for name, iop in self._index_operations.items():
iop.shift_right(start, param.size)
iop.update(param._index_operations[name], self.size)
param._parent_ = self
param._parent_index_ = len(self.parameters)
self.parameters.append(param)
else:
start = sum(p.size for p in self.parameters[:index])
for name, iop in self._index_operations.items():
iop.shift_right(start, param.size)
iop.update(param._index_operations[name], start)
param._parent_ = self
param._parent_index_ = index if index>=0 else len(self.parameters[:index])
for p in self.parameters[index:]:
p._parent_index_ += 1
self.parameters.insert(index, param)
param.add_observer(self, self._pass_through_notify_observers, -np.inf)
parent = self
while parent is not None:
parent.size += param.size
parent = parent._parent_
self._notify_parent_change()
if not self._in_init_ and self._highest_parent_._model_initialized_:
#self._connect_parameters()
#self._notify_parent_change()
self._highest_parent_._connect_parameters()
self._highest_parent_._notify_parent_change()
self._highest_parent_._connect_fixes()
return param
else:
raise HierarchyError() | [
"\n :param parameters: the parameters to add\n :type parameters: list of or one :py:class:`paramz.param.Param`\n :param [index]: index of where to put parameters\n\n Add all parameters to this param class, you can insert parameters\n at any given index using the :func:`list.insert` syntax\n ",
"Parameter exists already, try making a copy"
] |
Please provide a description of the function:def unlink_parameter(self, param):
if not param in self.parameters:
try:
raise HierarchyError("{} does not belong to this object {}, remove parameters directly from their respective parents".format(param._short(), self.name))
except AttributeError:
raise HierarchyError("{} does not seem to be a parameter, remove parameters directly from their respective parents".format(str(param)))
start = sum([p.size for p in self.parameters[:param._parent_index_]])
self.size -= param.size
del self.parameters[param._parent_index_]
self._remove_parameter_name(param)
param._disconnect_parent()
param.remove_observer(self, self._pass_through_notify_observers)
for name, iop in self._index_operations.items():
iop.shift_left(start, param.size)
self._connect_parameters()
self._notify_parent_change()
parent = self._parent_
while parent is not None:
parent.size -= param.size
parent = parent._parent_
self._highest_parent_._connect_parameters()
self._highest_parent_._connect_fixes()
self._highest_parent_._notify_parent_change() | [
"\n :param param: param object to remove from being a parameter of this parameterized object.\n "
] |
Please provide a description of the function:def grep_param_names(self, regexp):
if not isinstance(regexp, _pattern_type): regexp = compile(regexp)
found_params = []
def visit(innerself, regexp):
if (innerself is not self) and regexp.match(innerself.hierarchy_name().partition('.')[2]):
found_params.append(innerself)
self.traverse(visit, regexp)
return found_params | [
"\n create a list of parameters, matching regular expression regexp\n "
] |
Please provide a description of the function:def _repr_html_(self, header=True):
name = adjust_name_for_printing(self.name) + "."
names = self.parameter_names()
desc = self._description_str
iops = OrderedDict()
for opname in self._index_operations:
iop = []
for p in self.parameters:
iop.extend(p.get_property_string(opname))
iops[opname] = iop
format_spec = self._format_spec(name, names, desc, iops, False)
to_print = []
if header:
to_print.append("<tr><th><b>" + '</b></th><th><b>'.join(format_spec).format(name=name, desc='value', **dict((name, name) for name in iops)) + "</b></th></tr>")
format_spec = "<tr><td class=tg-left>" + format_spec[0] + '</td><td class=tg-right>' + format_spec[1] + '</td><td class=tg-center>' + '</td><td class=tg-center>'.join(format_spec[2:]) + "</td></tr>"
for i in range(len(names)):
to_print.append(format_spec.format(name=names[i], desc=desc[i], **dict((name, iops[name][i]) for name in iops)))
style =
return style + '\n' + '<table class="tg">' + '\n'.join(to_print) + '\n</table>' | [
"Representation of the parameters in html for notebook display.",
"<style type=\"text/css\">\n.tg {font-family:\"Courier New\", Courier, monospace !important;padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}\n.tg td{font-family:\"Courier New\", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}\n.tg th{font-family:\"Courier New\", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}\n.tg .tg-left{font-family:\"Courier New\", Courier, monospace !important;font-weight:normal;text-align:left;}\n.tg .tg-center{font-family:\"Courier New\", Courier, monospace !important;font-weight:normal;text-align:center;}\n.tg .tg-right{font-family:\"Courier New\", Courier, monospace !important;font-weight:normal;text-align:right;}\n</style>"
] |
Please provide a description of the function:def build_pydot(self, G=None): # pragma: no cover
import pydot # @UnresolvedImport
iamroot = False
if G is None:
G = pydot.Dot(graph_type='digraph', bgcolor=None)
iamroot=True
node = pydot.Node(id(self), shape='box', label=self.name)#, color='white')
G.add_node(node)
for child in self.parameters:
child_node = child.build_pydot(G)
G.add_edge(pydot.Edge(node, child_node))#, color='white'))
for _, o, _ in self.observers:
label = o.name if hasattr(o, 'name') else str(o)
observed_node = pydot.Node(id(o), label=label)
if str(id(o)) not in G.obj_dict['nodes']:
G.add_node(observed_node)
edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee')
G.add_edge(edge)
if iamroot:
return G
return node | [
"\n Build a pydot representation of this model. This needs pydot installed.\n\n Example Usage::\n\n np.random.seed(1000)\n X = np.random.normal(0,1,(20,2))\n beta = np.random.uniform(0,1,(2,1))\n Y = X.dot(beta)\n m = RidgeRegression(X, Y)\n G = m.build_pydot()\n G.write_png('example_hierarchy_layout.png')\n\n The output looks like:\n\n .. image:: ./example_hierarchy_layout.png\n\n Rectangles are parameterized objects (nodes or leafs of hierarchy).\n\n Trapezoids are param objects, which represent the arrays for parameters.\n\n Black arrows show parameter hierarchical dependence. The arrow points\n from parents towards children.\n\n Orange arrows show the observer pattern. Self references (here) are\n the references to the call to parameters changed and references upwards\n are the references to tell the parents they need to update.\n "
] |
Please provide a description of the function:def gradient(self):
if getattr(self, '_gradient_array_', None) is None:
self._gradient_array_ = np.empty(self._realshape_, dtype=np.float64)
return self._gradient_array_ | [
"\n Return a view on the gradient, which is in the same shape as this parameter is.\n Note: this is not the real gradient array, it is just a view on it.\n\n To work on the real gradient array use: self.full_gradient\n "
] |
Please provide a description of the function:def _setup_observers(self):
if self.has_parent():
self.add_observer(self._parent_, self._parent_._pass_through_notify_observers, -np.inf) | [
"\n Setup the default observers\n\n 1: pass through to parent, if present\n "
] |
Please provide a description of the function:def _repr_html_(self, indices=None, iops=None, lx=None, li=None, lls=None):
filter_ = self._current_slice_
vals = self.flat
if indices is None: indices = self._indices(filter_)
if iops is None:
ravi = self._raveled_index(filter_)
iops = OrderedDict([name, iop.properties_for(ravi)] for name, iop in self._index_operations.items())
if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()]
header_format =
header = header_format.format(x=self.hierarchy_name(), i=__index_name__, iops="</b></th><th><b>".join(list(iops.keys()))) # nice header for printing
to_print = []
to_print.append('<table class="tg">')
to_print.append(header)
format_spec = self._format_spec(indices, iops, lx, li, lls, False)
format_spec[:2] = ["<tr><td class=tg-left>{i}</td>".format(i=format_spec[0]), "<td class=tg-right>{i}</td>".format(i=format_spec[1])]
for i in range(2, len(format_spec)):
format_spec[i] = '<td class=tg-left>{c}</td>'.format(c=format_spec[i])
format_spec = "".join(format_spec) + '</tr>'
for i in range(self.size):
to_print.append(format_spec.format(index=indices[i], value="{1:.{0}f}".format(__precision__, vals[i]), **dict((name, ' '.join(map(str, iops[name][i]))) for name in iops)))
return '\n'.join(to_print) | [
"Representation of the parameter in html for notebook display.",
"\n<tr>\n <th><b>{i}</b></th>\n <th><b>{x}</b></th>\n <th><b>{iops}</b></th>\n</tr>",
"<style type=\"text/css\">\n.tg {padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}\n.tg td{font-family:\"Courier New\", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}\n.tg th{font-family:\"Courier New\", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}\n.tg .tg-left{font-family:\"Courier New\", Courier, monospace !important;font-weight:normal;text-align:left;}\n.tg .tg-right{font-family:\"Courier New\", Courier, monospace !important;font-weight:normal;text-align:right;}\n</style>"
] |
Please provide a description of the function:def build_pydot(self,G): # pragma: no cover
import pydot
node = pydot.Node(id(self), shape='trapezium', label=self.name)#, fontcolor='white', color='white')
G.add_node(node)
for _, o, _ in self.observers:
label = o.name if hasattr(o, 'name') else str(o)
observed_node = pydot.Node(id(o), label=label)
if str(id(o)) not in G.obj_dict['nodes']: # pragma: no cover
G.add_node(observed_node)
edge = pydot.Edge(str(id(self)), str(id(o)), color='darkorange2', arrowhead='vee')
G.add_edge(edge)
return node | [
"\n Build a pydot representation of this model. This needs pydot installed.\n\n Example Usage:\n\n np.random.seed(1000)\n X = np.random.normal(0,1,(20,2))\n beta = np.random.uniform(0,1,(2,1))\n Y = X.dot(beta)\n m = RidgeRegression(X, Y)\n G = m.build_pydot()\n G.write_png('example_hierarchy_layout.png')\n\n The output looks like:\n\n .. image:: example_hierarchy_layout.png\n\n Rectangles are parameterized objects (nodes or leafs of hierarchy).\n\n Trapezoids are param objects, which represent the arrays for parameters.\n\n Black arrows show parameter hierarchical dependence. The arrow points\n from parents towards children.\n\n Orange arrows show the observer pattern. Self references (here) are\n the references to the call to parameters changed and references upwards\n are the references to tell the parents they need to update.\n "
] |
Please provide a description of the function:def add_observer(self, observer, callble, priority=0):
self.observers.add(priority, observer, callble) | [
"\n Add an observer `observer` with the callback `callble`\n and priority `priority` to this observers list.\n "
] |
Please provide a description of the function:def remove_observer(self, observer, callble=None):
to_remove = []
for poc in self.observers:
_, obs, clble = poc
if callble is not None:
if (obs is observer) and (callble == clble):
to_remove.append(poc)
else:
if obs is observer:
to_remove.append(poc)
for r in to_remove:
self.observers.remove(*r) | [
"\n Either (if callble is None) remove all callables,\n which were added alongside observer,\n or remove callable `callble` which was added alongside\n the observer `observer`.\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.