code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
__author__ = 'Paul Landes'
import logging
from typing import List, Callable
from abc import abstractmethod, ABC, ABCMeta
import sys
import re
import itertools as it
import parse
from copy import copy
import pickle
import time as tm
from pathlib import Path
import shelve as sh
import zensols.actioncli.time as time
logger = logging.getLogger(__name__)
# class level persistance
class PersistedWork(object):
"""This class automatically caches work that's serialized to the disk.
In order, it first looks for the data in ``owner``, then in globals (if
``cache_global`` is True), then it looks for the data on the file system.
If it can't find it after all of this it invokes function ``worker`` to
create the data and then pickles it to the disk.
This class is a callable itself, which is invoked to get or create the
work.
There are two ways to implement the data/work creation: pass a ``worker``
to the ``__init__`` method or extend this class and override
``__do_work__``.
"""
def __init__(self, path, owner, cache_global=False, transient=False):
"""Create an instance of the class.
:param path: if type of ``pathlib.Path`` then use disk storage to cache
of the pickeled data, otherwise a string used to store in the owner
:type path: pathlib.Path or str
:param owner: an owning class to get and retrieve as an attribute
:param cache_global: cache the data globals; this shares data across
instances but not classes
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug('pw inst: path={}, global={}'.format(path, cache_global))
self.owner = owner
self.cache_global = cache_global
self.transient = transient
self.worker = None
if isinstance(path, Path):
self.path = path
self.use_disk = True
fname = re.sub(r'[ /\\.]', '_', str(self.path.absolute()))
else:
self.path = Path(path)
self.use_disk = False
fname = str(path)
cstr = owner.__module__ + '.' + owner.__class__.__name__
self.varname = f'_{cstr}_{fname}_pwvinst'
def _info(self, msg, *args):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(self.varname + ': ' + msg, *args)
def clear_global(self):
"""Clear only any cached global data.
"""
vname = self.varname
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'global clearning {vname}')
if vname in globals():
if logger.isEnabledFor(logging.DEBUG):
logger.debug('removing global instance var: {}'.format(vname))
del globals()[vname]
def clear(self):
"""Clear the data, and thus, force it to be created on the next fetch. This is
done by removing the attribute from ``owner``, deleting it from globals
and removing the file from the disk.
"""
vname = self.varname
if self.path.exists():
if logger.isEnabledFor(logging.DEBUG):
logger.debug('deleting cached work: {}'.format(self.path))
self.path.unlink()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'owner exists: {self.owner is not None} ' +
f'has {vname}: {hasattr(self.owner, vname)}')
if self.owner is not None and hasattr(self.owner, vname):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('removing instance var: {}'.format(vname))
delattr(self.owner, vname)
self.clear_global()
def _do_work(self, *argv, **kwargs):
t0 = tm.time()
obj = self.__do_work__(*argv, **kwargs)
if logger.isEnabledFor(logging.INFO):
self._info('created work in {:2f}s, saving to {}'.format(
(tm.time() - t0), self.path))
return obj
def _load_or_create(self, *argv, **kwargs):
"""Invoke the file system operations to get the data, or create work.
If the file does not exist, calling ``__do_work__`` and save it.
"""
if self.path.exists():
self._info('loading work from {}'.format(self.path))
with open(self.path, 'rb') as f:
obj = pickle.load(f)
else:
self._info('saving work to {}'.format(self.path))
with open(self.path, 'wb') as f:
obj = self._do_work(*argv, **kwargs)
pickle.dump(obj, f)
return obj
def set(self, obj):
"""Set the contents of the object on the owner as if it were persisted from the
source. If this is a global cached instance, then add it to global
memory.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'saving in memory value {type(obj)}')
vname = self.varname
setattr(self.owner, vname, obj)
if self.cache_global:
if vname not in globals():
globals()[vname] = obj
def __getstate__(self):
"""We must null out the owner and worker as they are not pickelable.
:seealso: PersistableContainer
"""
d = copy(self.__dict__)
d['owner'] = None
d['worker'] = None
return d
def __call__(self, *argv, **kwargs):
"""Return the cached data if it doesn't yet exist. If it doesn't exist, create
it and cache it on the file system, optionally ``owner`` and optionally
the globals.
"""
vname = self.varname
obj = None
if logger.isEnabledFor(logging.DEBUG):
logger.debug('call with vname: {}'.format(vname))
if self.owner is not None and hasattr(self.owner, vname):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('found in instance')
obj = getattr(self.owner, vname)
if obj is None and self.cache_global:
if vname in globals():
if logger.isEnabledFor(logging.DEBUG):
logger.debug('found in globals')
obj = globals()[vname]
if obj is None:
if self.use_disk:
obj = self._load_or_create(*argv, **kwargs)
else:
self._info('invoking worker')
obj = self._do_work(*argv, **kwargs)
self.set(obj)
return obj
def __do_work__(self, *argv, **kwargs):
"""You can extend this class and overriding this method. This method will
invoke the worker to do the work.
"""
return self.worker(*argv, **kwargs)
def pprint(self, writer=sys.stdout, indent=0, include_content=False):
sp = ' ' * indent
writer.write(f'{sp}{self}:\n')
sp = ' ' * (indent + 1)
writer.write(f'{sp}global: {self.cache_global}\n')
writer.write(f'{sp}transient: {self.transient}\n')
writer.write(f'{sp}type: {type(self())}\n')
if include_content:
writer.write(f'{sp}content: {self()}\n')
def __str__(self):
return self.varname
class PersistableContainerMetadata(object):
def __init__(self, container):
self.container = container
@property
def persisted(self):
"""Return all ``PersistedWork`` instances on this object as a ``dict``.
"""
pws = {}
for k, v in self.container.__dict__.items():
if isinstance(v, PersistedWork):
pws[k] = v
return pws
def pprint(self, writer=sys.stdout, indent=0,
include_content=False, recursive=False):
sp = ' ' * indent
spe = ' ' * (indent + 1)
for k, v in self.container.__dict__.items():
if isinstance(v, PersistedWork):
v.pprint(writer, indent, include_content)
else:
writer.write(f'{sp}{k}:\n')
writer.write(f'{spe}type: {type(v)}\n')
if include_content:
writer.write(f'{spe}content: {v}\n')
if recursive and isinstance(v, PersistableContainer):
cmeta = v._get_persistable_metadata()
cmeta.write(writer, indent + 2, include_content, True)
def clear(self):
"""Clear all ``PersistedWork`` instances on this object.
"""
for pw in self.persisted.values():
pw.clear()
class PersistableContainer(object):
"""Classes can extend this that want to persist ``PersistableWork`` instances,
which otherwise are not persistable.
"""
def __getstate__(self):
state = copy(self.__dict__)
removes = []
for k, v in state.items():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'container get state: {k} => {type(v)}')
if isinstance(v, PersistedWork):
if v.transient:
removes.append(v.varname)
for k in removes:
state[k] = None
return state
def __setstate__(self, state):
"""Set the owner to containing instance and the worker function to the owner's
function by name.
"""
self.__dict__.update(state)
for k, v in state.items():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'container set state: {k} => {type(v)}')
if isinstance(v, PersistedWork):
setattr(v, 'owner', self)
def _get_persistable_metadata(self) -> PersistableContainerMetadata:
"""Return the metadata for this container.
"""
return PersistableContainerMetadata(self)
class persisted(object):
"""Class level annotation to further simplify usage with PersistedWork.
For example:
class SomeClass(object):
@property
@persisted('counter', 'tmp.dat')
def someprop(self):
return tuple(range(5))
"""
def __init__(self, attr_name, path=None, cache_global=False,
transient=False):
logger.debug('persisted decorator on attr: {}, global={}'.format(
attr_name, cache_global))
self.attr_name = attr_name
self.path = path
self.cache_global = cache_global
self.transient = transient
def __call__(self, fn):
logger.debug(f'call: {fn}:{self.attr_name}:{self.path}:' +
f'{self.cache_global}')
def wrapped(*argv, **kwargs):
inst = argv[0]
logger.debug(f'wrap: {fn}:{self.attr_name}:{self.path}:' +
f'{self.cache_global}')
if hasattr(inst, self.attr_name):
pwork = getattr(inst, self.attr_name)
else:
if self.path is None:
path = self.attr_name
else:
path = Path(self.path)
pwork = PersistedWork(
path, owner=inst, cache_global=self.cache_global,
transient=self.transient)
setattr(inst, self.attr_name, pwork)
pwork.worker = fn
return pwork(*argv, **kwargs)
return wrapped
# resource/sql
class resource(object):
"""This annotation uses a template pattern to (de)allocate resources. For
example, you can declare class methods to create database connections and
then close them. This example looks like this:
class CrudManager(object):
def _create_connection(self):
return sqlite3.connect(':memory:')
def _dispose_connection(self, conn):
conn.close()
@resource('_create_connection', '_dispose_connection')
def commit_work(self, conn, obj):
conn.execute(...)
"""
def __init__(self, create_method_name, destroy_method_name):
"""Create the instance based annotation.
:param create_method_name: the name of the method that allocates
:param destroy_method_name: the name of the method that deallocates
"""
logger.debug(f'connection decorator {create_method_name} ' +
f'destructor method name: {destroy_method_name}')
self.create_method_name = create_method_name
self.destroy_method_name = destroy_method_name
def __call__(self, fn):
logger.debug(f'connection call with fn: {fn}')
def wrapped(*argv, **kwargs):
logger.debug(f'in wrapped {self.create_method_name}')
inst = argv[0]
resource = getattr(inst, self.create_method_name)()
try:
result = fn(inst, resource, *argv[1:], **kwargs)
finally:
getattr(inst, self.destroy_method_name)(resource)
return result
return wrapped
class chunks(object):
"""An iterable that chunks any other iterable in to chunks. Each element
returned is a list of elemnets of the given size or smaller. That element
that might be smaller is the remainer of the iterable once it is exhausted.
"""
def __init__(self, iterable: iter, size: int, enum: bool = False):
"""Initialize the chunker.
:param iterable: any iterable object
:param size: the size of each chunk
"""
self.iterable = iterable
self.size = size
self.enum = enum
def __iter__(self):
self.iterable_session = iter(self.iterable)
return self
def __next__(self):
ds = []
for e in range(self.size):
try:
obj = next(self.iterable_session)
except StopIteration:
break
if self.enum:
obj = (e, obj)
ds.append(obj)
if len(ds) == 0:
raise StopIteration()
return ds
# collections
class Stash(ABC):
"""Pure virtual classes that represents CRUDing data that uses ``dict``
semantics. The data is usually CRUDed to the file system but need not be.
Instance can be used as iterables or dicsts. If the former, each item is
returned as a key/value tuple.
Note that while the functionality might appear similar to a dict when used
as such, there are subtle differences. For example, when indexing
obtaining the value is sometimes *forced* by using some mechanism to create
the item. When using ``get`` it relaxes this creation mechanism for some
implementations.
"""
@abstractmethod
def load(self, name: str):
"""Load a data value from the pickled data with key ``name``.
"""
pass
def get(self, name: str, default=None):
"""Load an object or a default if key ``name`` doesn't exist.
"""
ret = self.load(name)
if ret is None:
return default
else:
return ret
@abstractmethod
def exists(self, name: str) -> bool:
"""Return ``True`` if data with key ``name`` exists.
"""
pass
@abstractmethod
def dump(self, name: str, inst):
"Persist data value ``inst`` with key ``name``."
pass
@abstractmethod
def delete(self, name=None):
"""Delete the resource for data pointed to by ``name`` or the entire resource
if ``name`` is not given.
"""
pass
def clear(self):
"""Delete all data from the from the stash.
*Important*: Exercise caution with this method, of course.
"""
for k in self.keys():
self.delete(k)
@abstractmethod
def keys(self) -> List[str]:
"""Return an iterable of keys in the collection.
"""
pass
def key_groups(self, n):
"Return an iterable of groups of keys, each of size at least ``n``."
return chunks(self.keys(), n)
def values(self):
"""Return the values in the hash.
"""
return map(lambda k: self.__getitem__(k), self.keys())
def items(self):
"""Return an iterable of all stash items."""
return map(lambda k: (k, self.__getitem__(k)), self.keys())
def __getitem__(self, key):
exists = self.exists(key)
item = self.load(key)
if item is None:
raise KeyError(key)
if not exists:
self.dump(key, item)
return item
def __setitem__(self, key, value):
self.dump(key, value)
def __delitem__(self, key):
self.delete(key)
def __contains__(self, key):
return self.exists(key)
def __iter__(self):
return map(lambda x: (x, self.__getitem__(x),), self.keys())
def __len__(self):
return len(tuple(self.keys()))
class CloseableStash(Stash):
"""Any stash that has a resource that needs to be closed.
"""
@abstractmethod
def close(self):
"Close all resources created by the stash."
pass
class DelegateStash(CloseableStash, metaclass=ABCMeta):
"""Delegate pattern. It can also be used as a no-op if no delegate is given.
A minimum functioning implementation needs the ``load`` and ``keys``
methods overriden. Inheriting and implementing a ``Stash`` such as this is
usually used as the ``factory`` in a ``FactoryStash``.
"""
def __init__(self, delegate: Stash = None):
if delegate is not None and not isinstance(delegate, Stash):
raise ValueError(f'not a stash: {delegate}')
self.delegate = delegate
def __getattr__(self, attr, default=None):
try:
delegate = super(DelegateStash, self).__getattribute__('delegate')
except AttributeError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}'; delegate not set'")
if delegate is not None:
return delegate.__getattribute__(attr)
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{attr}''")
def load(self, name: str):
if self.delegate is not None:
return self.delegate.load(name)
def get(self, name: str, default=None):
if self.delegate is None:
return super(DelegateStash, self).get(name, default)
else:
return self.delegate.get(name, default)
def exists(self, name: str):
if self.delegate is not None:
return self.delegate.exists(name)
else:
return False
def dump(self, name: str, inst):
if self.delegate is not None:
return self.delegate.dump(name, inst)
def delete(self, name=None):
if self.delegate is not None:
self.delegate.delete(name)
def keys(self):
if self.delegate is not None:
return self.delegate.keys()
return ()
def clear(self):
super(DelegateStash, self).clear()
if self.delegate is not None:
self.delegate.clear()
def close(self):
if self.delegate is not None:
return self.delegate.close()
class KeyLimitStash(DelegateStash):
"""A stash that limits the number of generated keys useful for debugging.
For most stashes, this also limits the iteration output since that is based
on key mapping.
"""
def __init__(self, delegate: Stash, n_limit=10):
super(KeyLimitStash, self).__init__(delegate)
self.n_limit = n_limit
def keys(self):
ks = super(KeyLimitStash, self).keys()
return it.islice(ks, self.n_limit)
class PreemptiveStash(DelegateStash):
"""Provide support for preemptively creating data in a stash.
"""
@property
def has_data(self):
"""Return whether or not the stash has any data available or not.
"""
return self._calculate_has_data()
def _calculate_has_data(self):
"""Return ``True`` if the delegate has keys.
"""
if not hasattr(self, '_has_data'):
try:
next(iter(self.delegate.keys()))
self._has_data = True
except StopIteration:
self._has_data = False
return self._has_data
def _reset_has_data(self):
"""Reset the state of whether the stash has data or not.
"""
if hasattr(self, '_has_data'):
delattr(self, '_has_data')
def _set_has_data(self, has_data=True):
"""Set the state of whether the stash has data or not.
"""
self._has_data = has_data
def clear(self):
if self._calculate_has_data():
super(PreemptiveStash, self).clear()
self._reset_has_data()
class FactoryStash(PreemptiveStash):
"""A stash that defers to creation of new items to another ``factory`` stash.
"""
def __init__(self, delegate, factory, enable_preemptive=True):
"""Initialize.
:param delegate: the stash used for persistence
:type delegate: Stash
:param factory: the stash used to create using ``load`` and ``keys``
:type factory: Stash
"""
super(FactoryStash, self).__init__(delegate)
self.factory = factory
self.enable_preemptive = enable_preemptive
def _calculate_has_data(self) -> bool:
if self.enable_preemptive:
return super(FactoryStash, self)._calculate_has_data()
else:
return False
def load(self, name: str):
item = super(FactoryStash, self).load(name)
if item is None:
self._reset_has_data()
item = self.factory.load(name)
return item
def keys(self) -> List[str]:
if self.has_data:
ks = super(FactoryStash, self).keys()
else:
ks = self.factory.keys()
return ks
class OneShotFactoryStash(PreemptiveStash, metaclass=ABCMeta):
"""A stash that is populated by a callable or an iterable 'worker'. The data
is generated by the worker and dumped to the delegate.
"""
def __init__(self, worker, *args, **kwargs):
"""Initialize the stash.
:param worker: either a callable (i.e. function) or an interable that
return tuples or lists of (key, object)
"""
super(OneShotFactoryStash, self).__init__(*args, **kwargs)
self.worker = worker
def _process_work(self):
"""Invoke the worker to generate the data and dump it to the delegate.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'processing with {type(self.worker)}')
if callable(self.worker):
itr = self.worker()
else:
itr = self.worker
for id, obj in itr:
self.delegate.dump(id, obj)
def prime(self):
has_data = self.has_data
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'asserting data: {has_data}')
if not has_data:
with time(f'processing work in {self}'):
self._process_work()
self._reset_has_data()
def get(self, name: str, default=None):
self.prime()
return super(OneShotFactoryStash, self).get(name, default)
def load(self, name: str):
self.prime()
return super(OneShotFactoryStash, self).load(name)
def keys(self):
self.prime()
return super(OneShotFactoryStash, self).keys()
class OrderedKeyStash(DelegateStash):
"""Specify an ordering to how keys in a stash are returned. This usually also
has an impact on the order in which values are iterated since a call to get
the keys determins it.
"""
def __init__(self, delegate: Stash, order_function: Callable = int):
super(OrderedKeyStash, self).__init__(delegate)
self.order_function = order_function
def keys(self) -> List[str]:
keys = super(OrderedKeyStash, self).keys()
if self.order_function:
keys = sorted(keys, key=self.order_function)
else:
keys = sorted(keys)
return keys
class DictionaryStash(DelegateStash):
"""Use a dictionary as a backing store to the stash. If one is not provided in
the initializer a new ``dict`` is created.
"""
def __init__(self, data: dict = None):
super(DictionaryStash, self).__init__()
if data is None:
self._data = {}
else:
self._data = data
@property
def data(self):
return self._data
def load(self, name: str):
return self.data.get(name)
def get(self, name: str, default=None):
return self.data.get(name, default)
def exists(self, name: str):
return name in self.data
def dump(self, name: str, inst):
self.data[name] = inst
def delete(self, name=None):
del self.data[name]
def keys(self):
return self.data.keys()
def clear(self):
self.data.clear()
super(DictionaryStash, self).clear()
def __getitem__(self, key):
return self.data[key]
class CacheStash(DelegateStash):
"""Provide a dictionary based caching based stash.
"""
def __init__(self, delegate, cache_stash=None, read_only=False):
"""Initialize.
:param delegate: the underlying persistence stash
:param cache_stash: a stash used for caching (defaults to
``DictionaryStash``)
:param read_only: if ``True``, make no changes to ``delegate``
"""
super(CacheStash, self).__init__(delegate)
if cache_stash is None:
self.cache_stash = DictionaryStash()
else:
self.cache_stash = cache_stash
self.read_only = read_only
def load(self, name: str):
if self.cache_stash.exists(name):
return self.cache_stash.load(name)
else:
obj = self.delegate.load(name)
self.cache_stash.dump(name, obj)
return obj
def exists(self, name: str):
return self.cache_stash.exists(name) or self.delegate.exists(name)
def delete(self, name=None):
if self.cache_stash.exists(name):
self.cache_stash.delete(name)
if not self.read_only:
self.delegate.delete(name)
def clear(self):
if not self.read_only:
super(CacheStash, self).clear()
self.cache_stash.clear()
class DirectoryStash(Stash):
"""Creates a pickeled data file with a file name in a directory with a given
pattern across all instances.
"""
def __init__(self, create_path: Path, pattern='{name}.dat'):
"""Create a stash.
:param create_path: the directory of where to store the files
:param pattern: the file name portion with ``name`` populating to the
key of the data value
"""
self.pattern = pattern
self.create_path = create_path
def _create_path_dir(self):
self.create_path.mkdir(parents=True, exist_ok=True)
def _get_instance_path(self, name):
"Return a path to the pickled data with key ``name``."
fname = self.pattern.format(**{'name': name})
logger.debug(f'path {self.create_path}: {self.create_path.exists()}')
self._create_path_dir()
return Path(self.create_path, fname)
def load(self, name):
path = self._get_instance_path(name)
inst = None
if path.exists():
logger.info(f'loading instance from {path}')
with open(path, 'rb') as f:
inst = pickle.load(f)
logger.debug(f'loaded instance: {inst}')
return inst
def exists(self, name):
path = self._get_instance_path(name)
return path.exists()
def keys(self):
def path_to_key(path):
p = parse.parse(self.pattern, path.name).named
if 'name' in p:
return p['name']
if not self.create_path.is_dir():
keys = ()
else:
keys = filter(lambda x: x is not None,
map(path_to_key, self.create_path.iterdir()))
return keys
def dump(self, name, inst):
logger.info(f'saving instance: {inst}')
path = self._get_instance_path(name)
with open(path, 'wb') as f:
pickle.dump(inst, f)
def delete(self, name):
logger.info(f'deleting instance: {name}')
path = self._get_instance_path(name)
if path.exists():
path.unlink()
def close(self):
pass
class ShelveStash(CloseableStash):
"""Stash that uses Python's shelve library to store key/value pairs in dbm
(like) databases.
"""
def __init__(self, create_path: Path, writeback=False):
"""Initialize.
:param create_path: a file to be created to store and/or load for the
data storage
:param writeback: the writeback parameter given to ``shelve``
"""
self.create_path = create_path
self.writeback = writeback
self.is_open = False
@property
@persisted('_shelve')
def shelve(self):
"""Return an opened shelve object.
"""
logger.info('creating shelve data')
fname = str(self.create_path.absolute())
inst = sh.open(fname, writeback=self.writeback)
self.is_open = True
return inst
def load(self, name):
if self.exists(name):
return self.shelve[name]
def dump(self, name, inst):
self.shelve[name] = inst
def exists(self, name):
return name in self.shelve
def keys(self):
return self.shelve.keys()
def delete(self, name=None):
"Delete the shelve data file."
logger.info('clearing shelve data')
self.close()
for path in Path(self.create_path.parent, self.create_path.name), \
Path(self.create_path.parent, self.create_path.name + '.db'):
logger.debug(f'clearing {path} if exists: {path.exists()}')
if path.exists():
path.unlink()
break
def close(self):
"Close the shelve object, which is needed for data consistency."
if self.is_open:
logger.info('closing shelve data')
try:
self.shelve.close()
self._shelve.clear()
except Exception:
self.is_open = False
def clear(self):
if self.create_path.exists():
self.create_path.unlink()
# utility functions
class shelve(object):
"""Object used with a ``with`` scope that creates the closes a shelve object.
For example, the following opens a file ``path``, sets a temporary variable
``stash``, prints all the data from the shelve, and then closes it.
with shelve(path) as stash:
for id, val in stash, 30:
print(f'{id}: {val}')
"""
def __init__(self, *args, **kwargs):
self.shelve = ShelveStash(*args, **kwargs)
def __enter__(self):
return self.shelve
def __exit__(self, type, value, traceback):
self.shelve.close() | zensols.actioncli | /zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/persist.py | persist.py |
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
import logging
import math
from multiprocessing import Pool
from zensols.actioncli.time import time
from zensols.actioncli import (
Stash,
Configurable,
PreemptiveStash,
StashFactory,
chunks,
)
logger = logging.getLogger(__name__)
class StashMapReducer(object):
def __init__(self, stash: Stash, n_workers: int = 10):
self.stash = stash
self.n_workers = n_workers
@property
def key_group_size(self):
n_items = len(self.stash)
return math.ceil(n_items / self.n_workers)
def _map(self, id: str, val):
return (id, val)
def _reduce(self, vals):
return vals
def _reduce_final(self, reduced_vals):
return reduced_vals
def _map_ids(self, id_sets):
return tuple(map(lambda id: self._map(id, self.stash[id]), id_sets))
def map(self):
id_sets = self.stash.key_groups(self.key_group_size)
pool = Pool(self.n_workers)
return pool.map(self._map_ids, id_sets)
def __call__(self):
mapval = self.map()
reduced = map(self._reduce, mapval)
return self._reduce_final(reduced)
class FunctionStashMapReducer(StashMapReducer):
def __init__(self, stash: Stash, func, n_workers: int = 10):
super(FunctionStashMapReducer, self).__init__(stash, n_workers)
self.func = func
def _map(self, id: str, val):
return self.func(id, val)
@staticmethod
def map_func(*args, **kwargs):
mr = FunctionStashMapReducer(*args, **kwargs)
return mr.map()
@dataclass
class ChunkProcessor(object):
"""Represents a chunk of work created by the parent and processed on the child.
:param config: the application context configuration used to create the
parent stash
:param name: the name of the parent stash used to create the chunk, and
subsequently process this chunk
:param chunk_id: the nth chunk
:param data: the data created by the parent to be processed
"""
config: Configurable
name: str
chunk_id: int
data: object
def _create_stash(self):
return StashFactory(self.config).instance(self.name)
def process(self):
"""Create the stash used to process the data, then persisted in the stash.
"""
stash = self._create_stash()
cnt = 0
for id, inst in stash._process(self.data):
stash.delegate.dump(id, inst)
cnt += 1
return cnt
def __str__(self):
return f'{self.name}: data: {type(self.data)}'
class MultiProcessStash(PreemptiveStash, metaclass=ABCMeta):
"""A stash that forks processes to process data in a distributed fashion. The
stash is typically created by a ``StashFactory`` in the child process.
Work is chunked (grouped) and then sent to child processes. In each, a new
instance of this same stash is created using the ``StashFactory`` and then
an abstract method is called to dump the data.
To implement, the ``_create_chunks`` and ``_process`` methods must be
implemented.
"""
def __init__(self, config: Configurable, name: str, delegate: Stash,
chunk_size: int, workers: int):
"""Initialize the stash from a ``StashFactory``.
This class is abstract and subclasses is are usually be created by a
``StashFactory`` since it needs to be able to be created by the factory
in the child process.
:param config: the application context configuration used to create the
parent stash
:param delegate: the stash in charge of the actual persistance
:param name: the name of the stash in the configuration, which is
created by a ``StashFactory``
:param chunk_size: the size of each group of data sent to the child
process to be handled; in some cases the child
process will get a chunk of data smaller than this
(the last) but never more
"""
super(MultiProcessStash, self).__init__(delegate)
self.config = config
self.name = name
self.chunk_size = chunk_size
self.workers = workers
@abstractmethod
def _create_data(self) -> list:
"""Create data in the parent process to be processed in the child process(es)
in chunks.
"""
pass
@abstractmethod
def _process(self, chunks: list) -> (iter (str, object)):
"""Process a chunk of data, each created by ``_create_data`` and then grouped.
"""
pass
@staticmethod
def _process_work(chunk: ChunkProcessor) -> int:
"""Process a chunk of data in the child process that was created by the parent
process.
"""
return chunk.process()
def _create_chunk_processor(self, chunk_id: int, data: object):
"""Factory method to create the ``ChunkProcessor`` instance.
"""
return ChunkProcessor(self.config, self.name, chunk_id, data)
def _spawn_work(self) -> int:
"""Chunks and invokes a multiprocessing pool to invokes processing on the
children.
"""
data = map(lambda x: self._create_chunk_processor(*x),
enumerate(chunks(self._create_data(), self.chunk_size)))
logger.debug(f'spawning {self.chunk_size} chunks across ' +
f'{self.workers} workers')
with Pool(self.workers) as p:
with time('processed chunks'):
cnt = sum(p.map(self.__class__._process_work, data))
return cnt
def prime(self):
"""If the delegate stash data does not exist, use this implementation to
generate the data and process in children processes.
"""
has_data = self.has_data
logger.debug(f'asserting data: {has_data}')
if not has_data:
with time('spawining work in {self}'):
self._spawn_work()
self._reset_has_data()
def get(self, name: str, default=None):
self.prime()
return super(MultiProcessStash, self).get(name, default)
def load(self, name: str):
self.prime()
return super(MultiProcessStash, self).load(name)
def keys(self):
self.prime()
return super(MultiProcessStash, self).keys() | zensols.actioncli | /zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/multi.py | multi.py |
import logging
import sys
import threading
from io import StringIO
class LoggerStream(object):
"""
Each line of standard out/error becomes a logged line
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, c):
if c == '\n':
self.logger.log(self.log_level, self.linebuf.rstrip())
self.linebuf = ''
else:
self.linebuf += c
def flush(self):
if len(self.linebuf) > 0:
self.write('\n')
class LogLevelSetFilter(object):
def __init__(self, levels):
self.levels = levels
def filter(self, record):
return record.levelno in self.levels
class StreamLogDumper(threading.Thread):
"Redirect stream output to a logger in a running thread."
def __init__(self, stream, logger, level):
super(StreamLogDumper, self).__init__()
self.stream = stream
self.logger = logger
self.level = level
def run(self):
with self.stream as s:
for line in iter(s.readline, b''):
line = line.decode('utf-8')
line = line.rstrip()
self.logger.log(self.level, line)
@classmethod
def dump(clz, stdout, stderr, logger):
StreamLogDumper(stdout, logger, logging.INFO).start()
StreamLogDumper(stderr, logger, logging.ERROR).start()
class LogConfigurer(object):
"""
Configure logging to go to a file or Graylog.
"""
def __init__(self, logger=logging.getLogger(None),
log_format='%(asctime)s %(levelname)s %(message)s',
level=None):
self.log_format = log_format
self.logger = logger
if level is not None:
self.logger.setLevel(level)
self.level = level
def config_handler(self, handler):
if self.log_format is not None:
formatter = logging.Formatter(self.log_format)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
return handler
def config_stream(self, stdout_stream, stderr_stream=None):
out = logging.StreamHandler(stdout_stream)
if stderr_stream is not None:
err = logging.StreamHandler(stderr_stream)
#err.setLevel(logging.DEBUG)
#out.setLevel(logging.DEBUG)
err.addFilter(LogLevelSetFilter({logging.ERROR}))
out.addFilter(LogLevelSetFilter(
{logging.WARNING, logging.INFO, logging.DEBUG}))
self.config_handler(err)
self.config_handler(out)
def config_buffer(self):
log_stream = StringIO()
self.config_stream(log_stream)
return log_stream
def config_file(self, file_name):
return self.config_handler(logging.FileHandler(file_name))
def config_basic(self):
logging.basicConfig(format=self.log_format, level=self.level)
def capture(self,
stdout_logger=logging.getLogger('STDOUT'),
stderr_logger=logging.getLogger('STDERR')):
if stdout_logger is not None:
sys.stdout = LoggerStream(stdout_logger, logging.INFO)
if stderr_logger is not None:
sys.stderr = LoggerStream(stderr_logger, logging.INFO) | zensols.actioncli | /zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/log.py | log.py |
import logging
from abc import ABC
from typing import Dict
import inspect
import importlib
import re
from functools import reduce
from time import time
from zensols.actioncli import (
Configurable,
Stash,
)
logger = logging.getLogger(__name__)
class ClassResolver(ABC):
"""Used to resolve a class from a string.
"""
def find_class(self, class_name: str) -> type:
"""Return a class given the name of the class.
:param class_name: represents the class name, which might or might not
have the module as part of that name
"""
pass
class DictionaryClassResolver(ClassResolver):
"""Resolve a class name from a list of registered class names without the
module part. This is used with the ``register`` method on
``ConfigFactory``.
:see: ConfigFactory.register
"""
def __init__(self, instance_classes: Dict[str, type]):
self.instance_classes = instance_classes
def find_class(self, class_name):
classes = {}
classes.update(globals())
classes.update(self.instance_classes)
logger.debug(f'looking up class: {class_name}')
if class_name not in classes:
raise ValueError(
f'class {class_name} is not registered in factory {self}')
cls = classes[class_name]
logger.debug(f'found class: {cls}')
return cls
class ClassImporter(object):
"""Utility class that reloads a module and instantiates a class from a string
class name. This is handy for prototyping code in a Python REPL.
"""
CLASS_REGEX = re.compile(r'^(.+)\.(.+?)$')
def __init__(self, class_name: str, reload: bool = True):
"""Initialize with the class name.
:param class_name: the fully qualifed name of the class (including the
module portion of the class name)
:param reload: if ``True`` then reload the module before returning the
class
"""
self.class_name = class_name
self.reload = reload
def parse_module_class(self):
"""Parse the module and class name part of the fully qualifed class name.
"""
cname = self.class_name
match = re.match(self.CLASS_REGEX, cname)
if not match:
raise ValueError(f'not a fully qualified class name: {cname}')
return match.groups()
def get_module_class(self):
"""Return the module and class as a tuple of the given class in the
initializer.
:param reload: if ``True`` then reload the module before returning the
class
"""
pkg, cname = self.parse_module_class()
logger.debug(f'pkg: {pkg}, class: {cname}')
pkg = pkg.split('.')
mod = reduce(lambda m, n: getattr(m, n), pkg[1:], __import__(pkg[0]))
logger.debug(f'mod: {mod}, reloading: {self.reload}')
if self.reload:
importlib.reload(mod)
cls = getattr(mod, cname)
logger.debug(f'class: {cls}')
return mod, cls
def instance(self, *args, **kwargs):
"""Create an instance of the specified class in the initializer.
:param args: the arguments given to the initializer of the new class
:param kwargs: the keyword arguments given to the initializer of the
new class
"""
mod, cls = self.get_module_class()
try:
inst = cls(*args, **kwargs)
except Exception as e:
msg = f'could not instantiate {cls}({args}, {kwargs})'
logger.error(msg, e)
raise e
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'inst: {inst}')
return inst
def set_log_level(self, level=logging.INFO):
"""Convenciene method to set the log level of the module given in the
initializer of this class.
:param level: and instance of ``logging.<level>``
"""
mod, cls = self.parse_module_class()
logging.getLogger(mod).setLevel(level)
class ImportClassResolver(ClassResolver):
"""Resolve a class name from a list of registered class names without the
module part. This is used with the ``register`` method on
``ConfigFactory``.
:see: ConfigFactory.register
"""
def __init__(self, reload: bool = False):
self.reload = reload
def find_class(self, class_name):
class_importer = ClassImporter(class_name, reload=self.reload)
return class_importer.get_module_class()[1]
class ConfigFactory(object):
"""Creates new instances of classes and configures them given data in a
configuration ``Configurable`` instance.
"""
def __init__(self, config: Configurable, pattern: str = '{name}',
config_param_name: str = 'config',
name_param_name: str = 'name', default_name: str = 'default',
class_importer: ClassImporter = None):
"""Initialize a new factory instance.
:param config: the configuration used to create the instance; all data
from the corresponding section is given to the
``__init__`` method
:param pattern: section pattern used to find the values given to the
``__init__`` method
:param config_param_name: the ``__init__`` parameter name used for the
configuration object given to the factory's
``instance`` method; defaults to ``config``
:param config_param_name: the ``__init__`` parameter name used for the
instance name given to the factory's
``instance`` method; defaults to ``name``
"""
self.config = config
self.pattern = pattern
self.config_param_name = config_param_name
self.name_param_name = name_param_name
self.default_name = default_name
if class_importer is None:
self.class_importer = DictionaryClassResolver(self.INSTANCE_CLASSES)
else:
self.class_importer = class_importer
@classmethod
def register(cls, instance_class, name=None):
"""Register a class with the factory. This method assumes the factory instance
was created with a (default) ``DictionaryClassResolver``.
:param instance_class: the class to register with the factory (not a
string)
:param name: the name to use as the key for instance class lookups;
defaults to the name of the class
"""
if name is None:
name = instance_class.__name__
logger.debug(f'registering: {instance_class} for {cls} -> {name}')
cls.INSTANCE_CLASSES[name] = instance_class
def _find_class(self, class_name):
"Resolve the class from the name."
return self.class_importer.find_class(class_name)
def _class_name_params(self, name):
"Get the class name and parameters to use for ``__init__``."
sec = self.pattern.format(**{'name': name})
logger.debug(f'section: {sec}')
params = {}
params.update(self.config.populate({}, section=sec))
class_name = params['class_name']
del params['class_name']
return class_name, params
def _has_init_config(self, cls):
"""Return whether the class has a ``config`` parameter in the ``__init__``
method.
"""
args = inspect.signature(cls.__init__)
return self.config_param_name in args.parameters
def _has_init_name(self, cls):
"""Return whether the class has a ``name`` parameter in the ``__init__``
method.
"""
args = inspect.signature(cls.__init__)
return self.name_param_name in args.parameters
def _instance(self, cls, *args, **kwargs):
"""Return the instance.
:param cls: the class to create the instance from
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
"""
logger.debug(f'args: {args}, kwargs: {kwargs}')
try:
return cls(*args, **kwargs)
except Exception as e:
logger.error(f'couldnt not create class {cls}({args})({kwargs}): {e}')
raise e
def instance(self, name=None, *args, **kwargs):
"""Create a new instance using key ``name``.
:param name: the name of the class (by default) or the key name of the
class used to find the class
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
"""
logger.info(f'new instance of {name}')
t0 = time()
name = self.default_name if name is None else name
logger.debug(f'creating instance of {name}')
class_name, params = self._class_name_params(name)
cls = self._find_class(class_name)
params.update(kwargs)
if self._has_init_config(cls):
logger.debug(f'found config parameter')
params['config'] = self.config
if self._has_init_name(cls):
logger.debug(f'found name parameter')
params['name'] = name
if logger.level >= logging.DEBUG:
for k, v in params.items():
logger.debug(f'populating {k} -> {v} ({type(v)})')
inst = self._instance(cls, *args, **params)
logger.info(f'created {name} instance of {cls.__name__} ' +
f'in {(time() - t0):.2f}s')
return inst
class ImportConfigFactory(ConfigFactory):
"""Import a class by the fully qualified class name (includes the module).
This is a convenience class for setting the parent class ``class_importer``
parameter.
"""
CHILD_REGEXP = re.compile(r'^instance:\s*(.+)$')
def __init__(self, *args, reload: bool = False, **kwargs):
"""Initialize the configuration factory.
:param reload: whether or not to reload the module when resolving the
class, which is useful for debugging in a REPL
"""
logger.debug(f'creating import config factory with reload: {reload}')
class_importer = ImportClassResolver(reload=reload)
super(ImportConfigFactory, self).__init__(
*args, **kwargs, class_importer=class_importer)
def _class_name_params(self, name):
class_name, params = super(ImportConfigFactory, self).\
_class_name_params(name)
insts = {}
for k, v in params.items():
if isinstance(v, str):
m = self.CHILD_REGEXP.match(v)
if m:
section = m.group(1)
insts[k] = self.instance(section)
params.update(insts)
return class_name, params
class ConfigChildrenFactory(ConfigFactory):
"""Like ``ConfigFactory``, but create children defined with the configuration
key ``CREATE_CHILDREN_KEY``. For each of these defined in the comma
separated property child property is set and then passed on to the
initializer of the object created.
In addition, any parameters passed to the initializer of the instance
method are passed on the comma separate list ``<name>_pass_param`` where
``name`` is the name of the next object to instantiate per the
configuraiton.
"""
CREATE_CHILDREN_KEY = 'create_children'
def _process_pass_params(self, name, kwargs):
passkw = {}
kname = f'{name}_pass_param'
if kname in kwargs:
for k in kwargs[kname].split(','):
logger.debug(f'passing parameter {k}')
passkw[k] = kwargs[k]
del kwargs[kname]
return passkw
def _instance_children(self, kwargs):
if self.CREATE_CHILDREN_KEY in kwargs:
for k in kwargs[self.CREATE_CHILDREN_KEY].split(','):
passkw = self._process_pass_params(k, kwargs)
logger.debug(f'create {k}: {kwargs}')
if k in kwargs:
kwargs[k] = self.instance(kwargs[k], **passkw)
for pk in passkw.keys():
del kwargs[pk]
del kwargs[self.CREATE_CHILDREN_KEY]
def _instance(self, cls, *args, **kwargs):
logger.debug(f'stash create: {cls}({args})({kwargs})')
self._instance_children(kwargs)
return super(ConfigChildrenFactory, self)._instance(
cls, *args, **kwargs)
class ConfigManager(ConfigFactory):
"""Like ``ConfigFactory`` base saves off instances (really CRUDs).
"""
def __init__(self, config: Configurable, stash: Stash, *args, **kwargs):
"""Initialize.
:param config: the configuration object used to configure the new
instance
:param stash: the stash object used to persist instances
"""
super(ConfigManager, self).__init__(config, *args, **kwargs)
self.stash = stash
def load(self, name=None, *args, **kwargs):
"Load the instance of the object from the stash."
inst = self.stash.load(name)
if inst is None:
inst = self.instance(name, *args, **kwargs)
logger.debug(f'loaded (conf mng) instance: {inst}')
return inst
def exists(self, name: str):
"Return ``True`` if data with key ``name`` exists."
return self.stash.exists(name)
def keys(self):
"""Return an iterable of keys in the collection."""
return self.stash.keys()
def dump(self, name: str, inst):
"Save the object instance to the stash."
self.stash.dump(name, inst)
def delete(self, name=None):
"Delete the object instance from the backing store."
self.stash.delete(name)
class SingleClassConfigManager(ConfigManager):
"""A configuration manager that specifies a single class. This is useful when
you don't want to specify the class in the configuration.
"""
def __init__(self, config: Configurable, cls, *args, **kwargs):
"""Initialize.
:param config: the configuration object
:param cls: the class used to create each instance
"""
super(SingleClassConfigManager, self).__init__(config, *args, **kwargs)
self.cls = cls
def _find_class(self, class_name):
return self.cls
def _class_name_params(self, name):
sec = self.pattern.format(**{'name': name})
logger.debug(f'section: {sec}')
params = {}
params.update(self.config.populate({}, section=sec))
return None, params
class CachingConfigFactory(object):
"""Just like ``ConfigFactory`` but caches instances in memory by name.
"""
def __init__(self, delegate: ConfigFactory):
"""Initialize.
:param delegate: the delegate factory to use for the actual instance
creation
"""
self.delegate = delegate
self.insts = {}
def instance(self, name=None, *args, **kwargs):
logger.debug(f'cache config instance for {name}')
if name in self.insts:
logger.debug(f'reusing cached instance of {name}')
return self.insts[name]
else:
logger.debug(f'creating new instance of {name}')
inst = self.delegate.instance(name, *args, **kwargs)
self.insts[name] = inst
return inst
def load(self, name=None, *args, **kwargs):
if name in self.insts:
logger.debug(f'reusing (load) cached instance of {name}')
return self.insts[name]
else:
logger.debug(f'load new instance of {name}')
inst = self.delegate.load(name, *args, **kwargs)
self.insts[name] = inst
return inst
def exists(self, name: str):
return self.delegate.exists(name)
def dump(self, name: str, inst):
self.delegate.dump(name, inst)
def delete(self, name):
self.delegate.delete(name)
self.evict(name)
def evict(self, name):
if name in self.insts:
del self.insts[name]
def evict_all(self):
self.insts.clear() | zensols.actioncli | /zensols.actioncli-1.1.5-py3-none-any.whl/zensols/actioncli/factory.py | factory.py |
__author__ = 'Paul Landes'
from typing import Dict, List, Tuple, Optional
from dataclasses import dataclass, field
from datetime import datetime
import logging
import re
import dateparser
from zensols.config import ConfigFactory
from zensols.persist import persisted
from . import BibstractError, Converter, ConverterLibrary, DestructiveConverter
logger = logging.getLogger(__name__)
@dataclass
class DateToYearConverter(DestructiveConverter):
"""Converts the year part of a date field to a year. This is useful when using
Zotero's Better Biblatex extension that produces BibLatex formats, but you
need BibTex entries.
"""
NAME = 'date_year'
"""The name of the converter."""
source_field: str = field(default='date')
update_fields: Tuple[str] = field(default=('year',))
"""The fields to update using the new date format."""
format: str = field(default='%Y')
"""The :meth:`datetime.datetime.strftime` formatted time, which defaults to
a four digit year.
"""
def __post_init__(self):
import warnings
m = 'The localize method is no longer necessary, as this time zone'
warnings.filterwarnings("ignore", message=m)
def _convert(self, entry: Dict[str, str]):
if self.source_field in entry:
dt_str = entry[self.source_field]
dt: datetime = dateparser.parse(dt_str)
if dt is None:
raise BibstractError(
f"Could not parse date: {dt_str} for entry {entry['ID']}")
dtfmt = dt.strftime(self.format)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"{entry['date']} -> {dt} -> {dtfmt}")
for update_field in self.update_fields:
entry[update_field] = dtfmt
if self.destructive:
del entry['date']
@dataclass
class CopyOrMoveKeyConverter(DestructiveConverter):
"""Copy or move one or more fields in the entry. This is useful when your
bibliography style expects one key, but the output (i.e.BibLatex) outputs a
different named field).
When :obj:``destructive`` is set to ``True``, this copy operation becomes a
move.
"""
NAME = 'copy'
"""The name of the converter."""
fields: Dict[str, str] = field(default_factory=dict)
"""The source to target list of fields specifying which keys to keys get copied
or moved.
"""
def _convert(self, entry: Dict[str, str]):
for src, dst in self.fields.items():
if src in entry:
entry[dst] = entry[src]
if self.destructive:
del entry[src]
@dataclass
class RemoveConverter(DestructiveConverter):
"""Remove entries that match a regular expression.
"""
NAME = 'remove'
"""The name of the converter."""
keys: Tuple[str] = field(default=())
"""A list of regular expressions, that if match the entry key, will remove the
entry.
"""
def __post_init__(self):
self.keys = tuple(map(lambda r: re.compile(r), self.keys))
def _convert(self, entry: Dict[str, str]):
entry_keys_to_del = set()
for kreg in self.keys:
for k, v in entry.items():
km: Optional[re.Match] = kreg.match(k)
if km is not None:
entry_keys_to_del.add(k)
for k in entry_keys_to_del:
del entry[k]
@dataclass
class UpdateOrAddValue(Converter):
"""Update (clobber) or add a new mapping in an entry.
"""
NAME = 'update'
fields: List[Tuple[str, str]] = field(default_factory=list)
"""A list of tuples, each tuple having the key to add and the value to update
or add using Python interpolation syntax from existing entry keys.
"""
def _convert(self, entry: Dict[str, str]):
for src, dst in self.fields:
if src is None:
src = self.ENTRY_TYPE
try:
val = dst.format(**entry)
except KeyError as e:
msg = f'Can not execute update/add converter for {entry["ID"]}'
raise BibstractError(msg) from e
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{src} -> {val}')
entry[src] = val
@dataclass
class ReplaceValue(Converter):
"""Replace values of entries by regular expression.
"""
NAME = 'replace'
fields: List[Tuple[str, str, str]] = field(default_factory=list)
"""A list of tuples, each tuple having the key of the entry to modify, a string
regular expression of what to change, and the replacement string.
"""
def _convert(self, entry: Dict[str, str]):
for src, regex, repl in self.fields:
if src is None:
src = self.ENTRY_TYPE
try:
old = entry[src]
new = re.sub(regex, repl, old)
if old != new:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{src} -> {new}')
entry[src] = new
except KeyError as e:
msg = f'Can not execute update/add converter for {entry["ID"]}'
raise BibstractError(msg) from e
@dataclass
class ConditionalConverter(Converter):
"""A converter that invokes a list of other converters if a certain entry
key/value pair matches.
"""
NAME = 'conditional_converter'
config_factory: ConfigFactory = field()
"""The configuration factory used to create this converter and used to get
referenced converters.
"""
converters: List[str] = field(default_factory=list)
"""The list of converters to inovke if the predicate condition is satisfied.
"""
includes: Dict[str, str] = field(default_factory=dict)
"""The key/values that must match in the entry to invoke the converters
referenced by :obj:`converters`.
"""
excludes: Dict[str, str] = field(default_factory=dict)
"""The key/values that can *not* match in the entry to invoke the converters
referenced by :obj:`converters`.
"""
@persisted('_converter_insts')
def _get_converters(self):
lib: ConverterLibrary = self.config_factory('converter_library')
return tuple(map(lambda n: lib[n], self.converters))
def _matches(self, entry: Dict[str, str], crit: Dict[str, str],
negate: bool) -> bool:
matches = True
for k, v in crit.items():
k = self.ENTRY_TYPE if k is None else k
val = entry.get(k)
if val is None:
if negate:
matches = False
break
else:
is_match = re.match(v, val)
if negate:
is_match = not is_match
if is_match:
matches = False
break
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'matches: {matches}: {crit} ' +
f'{"!=" if negate else "=="} {entry}')
return matches
def _convert(self, entry: Dict[str, str]):
if self._matches(entry, self.includes, True) and \
self._matches(entry, self.excludes, False):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'matches on {entry["ID"]}: {self.includes}')
for conv in self._get_converters():
entry.update(conv.convert(entry)) | zensols.bibstract | /zensols.bibstract-1.1.0-py3-none-any.whl/zensols/bibstract/converter.py | converter.py |
__author__ = 'Paul Landes'
from typing import Dict, Set
from dataclasses import dataclass, field
import logging
from pathlib import Path
from zensols.config import ConfigFactory
from . import Extractor, ConverterLibrary, PackageFinder
logger = logging.getLogger(__name__)
@dataclass
class Exporter(object):
"""This utility extracts Bib(La)Tex references from a (La)Tex.
"""
CLI_META = {'mnemonic_overrides': {'print_bibtex_ids': 'showbib',
'print_texfile_refs': 'showtex',
'print_extracted_ids': 'showextract',
'print_entry': 'entry'},
'option_includes': {'libpath', 'output', 'inverse',
'package_regex', 'no_extension'},
'option_overrides': {'output': {'long_name': 'output',
'short_name': 'o'},
'package_regex': {'long_name': 'filter',
'short_name': 'f'},
'no_extension': {'long_name': 'noext',
'short_name': None}}}
config_factory: ConfigFactory = field()
"""The configuration factory used to create this instance."""
converter_library: ConverterLibrary = field()
"""The converter library used to print what's available."""
log_name: str = field()
"""The name of the package logger."""
def _get_extractor(self, texpath: str) -> Extractor:
return self.config_factory.new_instance('extractor', texpaths=texpath)
def _get_package_finder(self, texpath: str, package_regex: str,
library_dir: str, inverse: bool = False) -> \
PackageFinder:
return self.config_factory.new_instance(
'package_finder',
texpaths=texpath,
package_regex=package_regex,
library_dirs=library_dir,
inverse=inverse,
)
def converters(self):
"""List converters and their information."""
self.converter_library.write()
def print_bibtex_ids(self):
"""Print BibTex citation keys."""
extractor = self._get_extractor()
extractor.print_bibtex_ids()
def print_texfile_refs(self, texpath: Path):
"""Print citation references.
:param texpath: either a file or directory to recursively scan for
files with LaTex citation references
"""
extractor = self.get_extractor(texpath)
extractor.print_texfile_refs()
def print_extracted_ids(self, texpath: Path):
"""Print BibTex export citation keys.
:param texpath: either a file or directory to recursively scan for
files with LaTex citation references
"""
extractor = self.get_extractor(texpath)
extractor.print_extracted_ids()
def print_entry(self, citation_key: str):
"""Print a single BibTex entry as it would be given in the output.
:param citation_key: the citation key of entry to print out
"""
extractor = self._get_extractor()
entry: Dict[str, Dict[str, str]] = extractor.get_entry(citation_key)
extractor.write_entry(entry)
def export(self, texpath: str, output: Path = None):
"""Export the derived BibTex file.
:param texpath: a path separated (':' on Linux) list of files or
directories to export
:param output: the output path name, or standard out if not given
"""
extractor = self._get_extractor(texpath)
if output is None:
extractor.extract()
else:
with open(output, 'w') as f:
extractor.extract(writer=f)
def package(self, texpath: str, libpath: str = None,
package_regex: str = None, no_extension: bool = False,
inverse: bool = False):
"""Return a list of all packages.
:param texpath: a path separated (':' on Linux) list of files or
directories to export
:param libpath: a path separated (':' on Linux) list of files or
directories of libraries to not include in results
:param package_regex: the regular expression used to filter packages
:param no_extension: do not add the .sty extension
"""
pkg_finder: PackageFinder = self._get_package_finder(
texpath, package_regex, inverse=inverse)
pkgs: Set[str] = pkg_finder.get_packages()
pkgs = sorted(pkgs)
if not no_extension:
pkgs = tuple(map(lambda s: f'{s}.sty', pkgs))
print('\n'.join(pkgs)) | zensols.bibstract | /zensols.bibstract-1.1.0-py3-none-any.whl/zensols/bibstract/app.py | app.py |
__author__ = 'Paul Landes'
from typing import Set, Union, Iterable, Sequence
from dataclasses import dataclass, field
import logging
import re
from pathlib import Path
from . import TexPathIterator, RegexFileParser
logger = logging.getLogger(__name__)
@dataclass
class PackageFinder(TexPathIterator):
"""Find packages used in the tex path.
"""
package_regex: Union[str, re.Pattern] = field(default=re.compile(r'.*'))
"""The regular expression used to filter what to return."""
library_dirs: Union[str, Path, Sequence[Path]] = field(default=None)
"""The list of library paths. Each path is not traversed to find packages.
"""
inverse: bool = field(default=False)
"""Whether to invert the packages with all those packages found in
:obj:`library_dirs`.
"""
def __post_init__(self):
super().__post_init__()
self.library_dirs = set(self._to_path_seq(self.library_dirs))
if isinstance(self.package_regex, str):
self.package_regex = re.compile(self.package_regex)
def _get_tex_paths(self) -> Iterable[Path]:
paths = super()._get_tex_paths()
paths = filter(lambda p: p.parent not in self.library_dirs, paths)
return paths
def _get_use_packages(self) -> Set[str]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'finding packages in {self.texpaths}')
pattern: re.Pattern = re.compile(r'\\usepackage{([a-zA-Z0-9,-]+?)\}')
parser = RegexFileParser(pattern=pattern)
path: Path
for path in self._get_tex_paths():
with open(path) as f:
parser.find(f)
return parser.collector
def get_packages(self) -> Set[str]:
pks: Set[str] = self._get_use_packages()
if self.package_regex is not None:
pks = set(filter(
lambda s: self.package_regex.match(s) is not None, pks))
if self.inverse:
lps = super()._get_tex_paths(self.library_dirs)
lps = set(map(lambda p: p.stem, lps))
pks = lps - pks
return pks | zensols.bibstract | /zensols.bibstract-1.1.0-py3-none-any.whl/zensols/bibstract/pkg.py | pkg.py |
__author__ = 'Paul Landes'
from typing import Set, Dict, List, Tuple, Sequence, Union, Iterable
from dataclasses import dataclass, field
import os
import logging
import sys
import itertools as it
from pathlib import Path
from io import TextIOBase
import re
from zensols.persist import persisted
from zensols.config import Writable, ConfigFactory
from zensols.introspect import ClassImporter, ClassInspector, Class
from zensols.cli import ApplicationError
logger = logging.getLogger(__name__)
class BibstractError(ApplicationError):
"""Application level error."""
pass
@dataclass
class TexPathIterator(object):
"""Base class that finds LaTeX files (``.tex``, ``.sty``, etc).
"""
TEX_FILE_REGEX = re.compile(r'.+\.(?:tex|sty|cls)$')
texpaths: Union[str, Path, Sequence[Path]] = field()
"""Either a file or directory to recursively scan for files with LaTex citation
references.
"""
def __post_init__(self):
self.texpaths = self._to_path_seq(self.texpaths)
def _to_path_seq(self, path_thing: Union[str, Path, Sequence[Path]]) -> \
Sequence[Path]:
"""Create a path sequence from a string, path or sequence of paths."""
if path_thing is None:
path_thing = ()
if isinstance(path_thing, Path):
path_thing = (path_thing,)
elif isinstance(path_thing, str):
path_thing = tuple(map(Path, path_thing.split(os.pathsep)))
return path_thing
def _iterate_path(self, par: Path) -> Iterable[Path]:
"""Recursively find LaTeX files."""
childs: Iterable[Path]
if par.is_file():
childs = (par,)
elif par.is_dir():
childs = it.chain.from_iterable(
map(self._iterate_path, par.iterdir()))
else:
childs = ()
logger.warning(f'unknown file type: {par}--skipping')
return childs
def _get_tex_paths(self, paths: Sequence[Path] = None) -> Iterable[Path]:
"""Recursively find LaTeX files in all directories/files in ``paths``.
"""
paths = self.texpaths if paths is None else paths
files = it.chain.from_iterable(map(self._iterate_path, paths))
return filter(self._is_tex_file, files)
def _is_tex_file(self, path: Path) -> bool:
"""Return whether or not path is a file that might contain citation references.
"""
return path.is_file() and \
self.TEX_FILE_REGEX.match(path.name) is not None
@dataclass
class RegexFileParser(object):
"""Finds all instances of the citation references in a file.
"""
REF_REGEX = re.compile(r'\\cite\{(.+?)\}|\{([a-zA-Z0-9,-]+?)\}')
"""The default regular expression used to find citation references in sty and
tex files (i.e. ``\\cite`` commands).
"""
MULTI_REF_REGEX = re.compile(r'[^,\s]+')
"""The regular expression used to find comma separated lists of citations
commands (i.e. ``\\cite``).
"""
pattern: re.Pattern = field(default=REF_REGEX)
"""The regular expression pattern used to find the references."""
collector: Set[str] = field(default_factory=set)
"""The set to add found references."""
def find(self, fileobj: TextIOBase):
def map_match(t: Union[str, Tuple[str, str]]) -> Iterable[str]:
if not isinstance(t, str):
t = t[0] if len(t[0]) > 0 else t[1]
return filter(lambda s: not s.startswith('\\'),
re.findall(self.MULTI_REF_REGEX, t))
for line in fileobj.readlines():
refs: List[Tuple[str, str]] = self.pattern.findall(line)
refs = it.chain.from_iterable(map(map_match, refs))
self.collector.update(refs)
@dataclass
class Converter(object):
"""A base class to convert fields of a BibTex entry (which is of type ``dict``)
to another field.
Subclasses should override :meth:`_convert`.
"""
ENTRY_TYPE = 'ENTRYTYPE'
name: str = field()
"""The name of the converter."""
def convert(self, entry: Dict[str, str]) -> Dict[str, str]:
"""Convert and return a new entry.
:param entry: the source data to transform
:return: a new instance of a ``dict`` with the transformed data
"""
entry = dict(entry)
self._convert(entry)
return entry
def _convert(self, entry: Dict[str, str]):
"""The templated method subclasses should extend. The default base class
implementation is to return what's given as an identity mapping.
"""
return entry
def __str__(self) -> str:
return f'converter: {self.name}'
@dataclass
class DestructiveConverter(Converter):
"""A converter that can optionally remove or modify entries.
"""
destructive: bool = field(default=False)
"""If true, remove the original field if converting from one key to another in
the Bibtex entry.
"""
@dataclass
class ConverterLibrary(Writable):
config_factory: ConfigFactory = field()
"""The configuration factory used to create the converters."""
converter_class_names: List[str] = field()
"""The list of converter class names currently available."""
converter_names: List[str] = field(default=None)
"""A list of converter names used to convert to BibTex entries."""
def __post_init__(self):
self.converter_names = list(filter(
lambda x: x != 'identity', self.converter_names))
self._unregistered = {}
def _create_converter(self, name: str) -> Converter:
conv = self.config_factory(f'{name}_converter')
conv.name = name
return conv
@property
@persisted('_converters')
def converters(self) -> Tuple[Converter]:
return tuple(map(self._create_converter, self.converter_names))
@property
@persisted('_by_name')
def converters_by_name(self) -> Dict[str, Converter]:
convs = self.converters
return {c.name: c for c in convs}
def __getitem__(self, key: str):
conv = self.converters_by_name.get(key)
if conv is None:
conv = self._unregistered.get(key)
if conv is None:
conv = self._create_converter(key)
self._unregistered[key] = conv
if conv is None:
raise BibstractError(f'No such converter: {key}')
return conv
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
markdown_depth: int = 1):
for cname in self.converter_class_names:
cls = ClassImporter(cname).get_class()
inspector = ClassInspector(cls)
mcls: Class = inspector.get_class()
header = '#' * markdown_depth
self._write_line(f'{header} Converter {cls.NAME}', depth, writer)
writer.write('\n')
self._write_line(mcls.doc.text, depth, writer)
writer.write('\n\n') | zensols.bibstract | /zensols.bibstract-1.1.0-py3-none-any.whl/zensols/bibstract/domain.py | domain.py |
__author__ = 'plandes'
from typing import Dict, Tuple, Iterable
from dataclasses import dataclass, field
import sys
import logging
from pathlib import Path
from io import TextIOWrapper
import bibtexparser
from bibtexparser.bibdatabase import BibDatabase
from bibtexparser.bwriter import BibTexWriter
from bibtexparser.bparser import BibTexParser
from zensols.persist import persisted
from . import (
TexPathIterator, RegexFileParser, Converter, ConverterLibrary
)
logger = logging.getLogger(__name__)
@dataclass
class Extractor(TexPathIterator):
"""Extracts references, parses the BibTex master source file, and extracts
matching references from the LaTex file.
"""
converter_library: ConverterLibrary = field()
"""The converter library used to print what's available."""
master_bib: Path = field()
"""The path to the master BibTex file."""
@property
def converters(self) -> Tuple[Converter]:
return self.converter_library.converters
@property
@persisted('_database')
def database(self) -> BibDatabase:
"""Return the BibTex Python object representation of master file.
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsing master bibtex file: {self.master_bib}')
parser = BibTexParser()
parser.ignore_nonstandard_types = False
with open(self.master_bib) as f:
db = bibtexparser.load(f, parser)
return db
@property
def bibtex_ids(self) -> iter:
"""Return all BibTex string IDs. These could be BetterBibtex citation
references.
"""
return map(lambda e: e['ID'], self.database.entries)
@property
@persisted('_tex_refs')
def tex_refs(self) -> set:
"""Return the set of parsed citation references.
"""
parser = RegexFileParser()
for path in self._get_tex_paths():
with open(path) as f:
parser.find(f)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'parsed refs: {", ".join(parser.collector)}')
return parser.collector
@property
def extract_ids(self) -> set:
"""Return the set of BibTex references to be extracted.
"""
bib = set(self.bibtex_ids)
trefs = self.tex_refs
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'extracted: {trefs}')
return bib & trefs
def print_bibtex_ids(self):
logging.getLogger('bibtexparser').setLevel(logging.ERROR)
for id in self.bibtex_ids:
print(id)
def print_texfile_refs(self):
for ref in self.tex_refs:
print(ref)
def print_extracted_ids(self):
for id in self.extract_ids:
print(id)
def _convert_dict(self, db: Dict[str, Dict[str, str]],
keys: Iterable[str]) -> Dict[str, str]:
entries = {}
for did in sorted(keys):
entry = db[did]
for conv in self.converters:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'applying {conv}')
entry = conv.convert(entry)
entries[did] = entry
return entries
@property
@persisted('_entries', cache_global=True)
def entries(self) -> Dict[str, Dict[str, str]]:
"""The BibTex entries parsed from the master bib file."""
db = self.database.get_entry_dict()
return self._convert_dict(db, db.keys())
def _get_all_entries(self) -> Dict[str, Dict[str, str]]:
return {}
def get_entry(self, citation_key: str) -> Dict[str, Dict[str, str]]:
entries = self._get_all_entries()
entry: Dict[str, str] = entries.get(citation_key)
if entry is None:
db = self.database.get_entry_dict()
entry = db[citation_key]
converted = self._convert_dict(
{citation_key: entry}, [citation_key])
entry = converted[citation_key]
entries[citation_key] = entry
return entry
@property
@persisted('_extracted_entries')
def extracted_entries(self) -> Dict[str, Dict[str, str]]:
"""The BibTex entries parsed from the master bib file."""
db = self.database.get_entry_dict()
return self._convert_dict(db, self.extract_ids)
def extract(self, writer: TextIOWrapper = sys.stdout,
extracted_entries: Dict[str, Dict[str, str]] = None):
"""Extract the master source BibTex matching citation references from the LaTex
file(s) and write them to ``writer``.
:param writer: the BibTex entry data sink
"""
bwriter = BibTexWriter()
if extracted_entries is None:
extracted_entries = self.extracted_entries
for bid, entry in extracted_entries.items():
if logger.isEnabledFor(logging.DEBUG):
estr = str(entry)[:80]
logger.debug(f'extracting: {bid}: <{estr}>')
if logger.isEnabledFor(logging.INFO):
logger.info(f'writing entry {bid}')
self.write_entry(entry, bwriter, writer)
writer.flush()
def write_entry(self, entry: Dict[str, str],
bwriter: BibTexWriter = None,
writer: TextIOWrapper = sys.stdout):
bwriter = BibTexWriter() if bwriter is None else bwriter
writer.write(bwriter._entry_to_bibtex(entry)) | zensols.bibstract | /zensols.bibstract-1.1.0-py3-none-any.whl/zensols/bibstract/extractor.py | extractor.py |
# Python to Clojure Bridge
[![Travis CI Build Status][travis-badge]][travis-link]
Python to Clojure Bridge using a Py4J Gateway. This simple library aims to
make it easy and trivial to invoke [Clojure] from [Python] using the [py4j]
gateway server and API.
If you think this sounds convoluted, it is. However it also solves a lot of
issues using a JVM API and proxying in the same process. This method separates
the JVM and python in separate processes so they don't step on eachother's feet
(think memory and resource allocation issues). On the downside the setup is
more complex.
The end to end request looks like:
1. Invoke the [clojure Python library](python/clojure/api.py).
2. Marshall the RPC request via the py4j python library
3. Request is sent via the network
4. Request received py4j Java Gateway server
5. `zensols.py4j.gateway` (this library) invokes the actual Clojure request
6. Return the result back all the way up the chain.
<!-- markdown-toc start - Don't edit this section. Run M-x markdown-toc-refresh-toc -->
## Table of Contents
- [Usage](#usage)
- [NLP Complex Example](#nlp-complex-example)
- [Installing and Running](#installing-and-running)
- [Obtaining](#obtaining)
- [Documentation](#documentation)
- [Binaries](#binaries)
- [Building](#building)
- [Changelog](#changelog)
- [License](#license)
<!-- markdown-toc end -->
## Usage
First [download, install and run the server](#installing-and-running)
```python
from zensols.clojure import Clojure
def test():
cw = Clojure('taoensso.nippy')
try:
cw.add_depenedency('com.taoensso', 'nippy', '2.13.0')
dat = cw.invoke('freeze', [123, 'strarg', 1.2])
thawed = cw.invoke('thaw', dat)
for i in thawed:
print('thawed item: %s' % i)
finally:
cw.close()
>>> test()
>>> thawed item: 123
thawed item: strarg
thawed item: 1.2
```
See the [test cases](test/python/tests.py) for more examples.
### NLP Complex Example
This example uses the [NLP Clojure Project] parse function. The `py4jgw`
(gateway) needs
the [models installed](https://github.com/plandes/clj-nlp-parse#setup) and the
following system property set by adding it to the environment setup script:
```bash
$ echo 'JAVA_OPTS="-Dzensols.model=${HOME}/opt/nlp/model"' > py4jgw/bin/setupenv
$ /bin/bash py4jgw/bin/py4jgw
```
The following example parses an utterance and prints out what could be used as
features in a [machine learning](https://github.com/plandes/clj-ml-model)
model:
```python
import json
from zensols.clojure import Clojure
def test():
parse = Clojure('zensols.nlparse.parse')
cjson = Clojure('clojure.data.json')
try:
parse.add_depenedency('com.zensols.nlp', 'parse', '0.1.4')
cjson.add_depenedency('org.clojure', 'data.json', '0.2.6')
panon = parse.invoke('parse', """I LOVE Bill Joy and he's the smartest guy in the world!""")
jstr = cjson.invoke('write-str', panon)
parsed = json.loads(jstr)
print('sentiment: %s' % parsed['sentiment'])
ment = parsed['mentions'][0]
print("'%s' is a %s" % (ment['text'], ment['ner-tag']))
#print(json.dumps(parsed, indent=2, sort_keys=True))
finally:
parse.close()
cjson.close()
>>> test()
>>> sentiment: 1
'Bill Joy' is a PERSON
{
"mentions": [
{
"char-range": [
7,
15
],
...
```
### Installing and Running
1. Download the binary:
`$ wget https://github.com/plandes/clj-py4j/releases/download/v0.0.1/py4jgw.zip`
2. Extract: `$ unzip py4jgw.zip`
3. Run the server: `$ /bin/bash ./py4jgw/bin/py4jgw` (or `py4jgw\bin\py4jgw.bat`)
4. Install the Python library: `$ pip install zensols.clojure`
5. [Hack!](#usage)
## Obtaining
In your `project.clj` file, add:
[](https://clojars.org/com.zensols.py4j/gateway/)
### Binaries
The latest release binaries are
available [here](https://github.com/plandes/clj-py4j/releases/latest).
## Documentation
* [Clojure](https://plandes.github.io/clj-py4j/codox/index.html)
* [Java](https://plandes.github.io/clj-py4j/apidocs/index.html)
## Building
To build from source, do the folling:
- Install [Leiningen](http://leiningen.org) (this is just a script)
- Install [GNU make](https://www.gnu.org/software/make/)
- Install [Git](https://git-scm.com)
- Download the source: `git clone https://github.com/plandes/clj-py4j && cd clj-py4j`
- Download the make include files:
```bash
mkdir ../clj-zenbuild && wget -O - https://api.github.com/repos/plandes/clj-zenbuild/tarball | tar zxfv - -C ../clj-zenbuild --strip-components 1
```
- Build the software: `make jar`
- Build the distribution binaries: `make dist`
- Build the Python egg/wheel distribution libraries: `make pydist`
Note that you can also build a single jar file with all the dependencies with: `make uber`
## Changelog
An extensive changelog is available [here](CHANGELOG.md).
## License
Copyright © 2017 Paul Landes
Apache License version 2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
<!-- links-->
[NLP Clojure Project]: https://github.com/plandes/clj-nlp-parse
[py4j]: https://www.py4j.org
[Clojure]: https://clojure.org
[Python]: https://www.python.org
[travis-link]: https://travis-ci.org/plandes/clj-py4j
[travis-badge]: https://travis-ci.org/plandes/clj-py4j.svg?branch=master
| zensols.clojure | /zensols.clojure-0.5.tar.gz/zensols.clojure-0.5/README.md | README.md |
import py4j
from py4j.java_gateway import JavaGateway, GatewayParameters
import logging
logger = logging.getLogger('py4j.clojure')
class Clojure(object):
"""
Invoke Clojure call via a py4j gateway.
usage:
Clojure.call("clojure.string", "join", "||", ["test", "one", 2234])
"""
def __init__(self, namespace=None, address='127.0.0.1', port=25333,
gateway=None):
self.ns_name = namespace
self.ns_obj = None
self.params = GatewayParameters(address=address, port=port)
self.gateway = gateway
def _java_object(self, o):
"""Convert **o** to a Java object usable by the gateway."""
if isinstance(o, list) or isinstance(o, tuple):
return self._java_array(o)
elif isinstance(o, dict):
return self._java_map(o)
return o
def _java_array(self, args):
"""Convert args from a python list to a Java array."""
alen = len(args)
jargs = self._gw.new_array(self._jvm.Object, alen)
for i in range(alen):
jargs[i] = self._java_object(args[i])
return jargs
def _java_map(self, odict):
m = self._jvm.java.util.HashMap()
for k, v in odict.items():
m.put(k, v)
return m
def _python_array(self, arr):
size = len(arr)
pa = size * [None]
for i in range(size):
pa[i] = self.python_object(arr[i])
return pa
def _python_dict(self, m):
pm = self.eval("""(->> m (map (fn [[k v]] {(name k) v})) (apply merge))""",
{'m': m})
pd = {}
for e in pm.entrySet():
pd[e.getKey()] = self.python_object(e.getValue())
return pd
def python_object(self, o):
if isinstance(o, py4j.java_collections.JavaList):
return self._python_array(o)
elif isinstance(o, py4j.java_collections.JavaMap):
return self._python_dict(o)
elif isinstance(o, py4j.java_gateway.JavaObject) and \
o.getClass().getName() == 'clojure.lang.Keyword':
return o.toString()
else: return o;
@property
def _gw(self):
"""Return the gateway"""
if self.gateway == None:
logger.info('connecting to %s' % self.params)
self.gateway = JavaGateway(gateway_parameters=self.params)
return self.gateway
@property
def _jvm(self):
"""Return the gateway's JVM"""
return self._gw.jvm
def set_namespace(self, namespace):
"""Set the Clojure namespace"""
self.ns_name = namespace
self.ns_obj = None
def _new_namespace(self, namespace):
"""Create a new `com.zensols.py4j.InvokableNamespace` instance."""
logger.debug('creating namespace: %s' % namespace)
return self._jvm.com.zensols.py4j.InvokableNamespace.instance(namespace)
def get_namespace(self):
"""Return a `com.zensols.py4j.InvokableNamespace` instance."""
if self.ns_obj == None:
self.ns_obj = self._new_namespace(self.ns_name)
return self.ns_obj
def invoke(self, function_name, *args):
"""Invoke a function in the namespace."""
jargs = self._java_array(args)
return self.get_namespace().invoke(function_name, jargs)
def eval(self, code, context=None):
context = self._java_object(context)
return self.get_namespace().eval(code, context)
def close(self):
"""Close the gateway."""
logger.info('shutting down')
if self.gateway != None:
self.gateway.close()
self.gateway = None
def shutdown(self):
"""Shutdown the py4j gateway server (careful!)."""
self._gw.shutdown()
self.close()
def add_depenedency(self, group_id, artifact_id, version):
"""Download and classload a maven coordinate."""
self._jvm.com.zensols.py4j.InvokableNamespace.addDependency(group_id, artifact_id, version)
@property
def entry_point(self):
"""Return the gateway entry point."""
return self._gw.entry_point
@classmethod
def call(cls, namespace, function_name, *args):
"""One shot convenience method to that invokes a function."""
inst = Clojure(namespace)
try:
return inst.invoke(function_name, *args)
finally:
inst.close()
@classmethod
def eval_default(cls, code, context=None):
"""One shot convenience method to evaulate in the default namespace."""
inst = Clojure()
try:
return inst.eval(code, context)
finally:
inst.close()
@classmethod
def kill_server(cls):
"""Stop the Clojure p4j gateway server (careful!)."""
Clojure.eval_default('(zensols.py4j.gateway/shutdown)') | zensols.clojure | /zensols.clojure-0.5.tar.gz/zensols.clojure-0.5/zensols/clojure.py | clojure.py |
__author__ = 'Paul Landes'
from typing import List, ClassVar, Set
from dataclasses import dataclass, field
import sys
import logging
from itertools import chain
import re
import yaml
from datetime import datetime
from pathlib import Path
from io import TextIOWrapper
from zensols.persist import persisted
from zensols.config import Writable
from zensols.introspect import ClassImporter
from . import LatexTableError, Table
logger = logging.getLogger(__name__)
@dataclass
class CsvToLatexTable(Writable):
"""Generate a Latex table from a CSV file.
"""
tables: List[Table] = field()
"""A list of table instances to create Latex table definitions."""
package_name: str = field()
"""The name Latex .sty package."""
def _write_header(self, depth: int, writer: TextIOWrapper):
date = datetime.now().strftime('%Y/%m/%d')
writer.write("""\\NeedsTeXFormat{LaTeX2e}
\\ProvidesPackage{%(package_name)s}[%(date)s Tables]
""" % {'date': date, 'package_name': self.package_name})
uses: Set[str] = set(chain.from_iterable(
map(lambda t: t.uses, self.tables)))
for use in sorted(uses):
writer.write(f'\\usepackage{{{use}}}\n')
def write(self, depth: int = 0, writer: TextIOWrapper = sys.stdout):
"""Write the Latex table to the writer given in the initializer.
"""
self._write_header(depth, writer)
for table in self.tables:
table.write(depth, writer)
@dataclass
class TableFileManager(object):
"""Reads the table definitions file and writes a Latex .sty file of the
generated tables from the CSV data.
"""
_FILE_NAME_REGEX: ClassVar[re.Pattern] = re.compile(r'(.+)\.yml')
_PACKAGE_FORMAT: ClassVar[str] = '{name}'
table_path: Path = field()
"""The path to the table YAML defintiions file."""
@property
@persisted('_package_name')
def package_name(self) -> str:
fname = self.table_path.name
m = self._FILE_NAME_REGEX.match(fname)
if m is None:
raise LatexTableError(f'does not appear to be a YAML file: {fname}')
return self._PACKAGE_FORMAT.format(**{'name': m.group(1)})
@property
def tables(self) -> List[Table]:
logger.info(f'reading table definitions file {self.table_path}')
tables: List[Table] = []
with open(self.table_path) as f:
content = f.read()
tdefs = yaml.load(content, yaml.FullLoader)
for name, td in tdefs.items():
class_name: str
if 'type' in td:
cls_name = td['type'].capitalize() + 'Table'
del td['type']
else:
cls_name = 'Table'
cls_name = f'zensols.datdesc.{cls_name}'
td['name'] = name
inst: Table = ClassImporter(cls_name, reload=False).instance(**td)
tables.append(inst)
return tables | zensols.datdesc | /zensols.datdesc-0.1.0-py3-none-any.whl/zensols/datdesc/mng.py | mng.py |
__author__ = 'Paul Landes'
from typing import Tuple, Iterable, Any, Dict
from dataclasses import dataclass, field
from enum import Enum, auto
import logging
import re
from pathlib import Path
from zensols.util import stdout
from zensols.cli import ApplicationError
from zensols.config import Settings
from . import TableFileManager, CsvToLatexTable, Table, DataFrameDescriber
from .hyperparam import HyperparamModel, HyperparamSet, HyperparamSetLoader
logger = logging.getLogger(__name__)
class _OutputFormat(Enum):
"""The output format for hyperparameter data.
"""
short = auto()
full = auto()
json = auto()
yaml = auto()
sphinx = auto()
table = auto()
@dataclass
class Application(object):
"""Generate LaTeX tables files from CSV files and hyperparameter .sty files.
"""
data_file_regex: re.Pattern = field(default=re.compile(r'^.+-table\.yml$'))
"""Matches file names of tables process in the LaTeX output."""
hyperparam_file_regex: re.Pattern = field(
default=re.compile(r'^.+-hyperparam\.yml$'))
"""Matches file names of tables process in the LaTeX output."""
hyperparam_table_default: Settings = field(default=None)
"""Default settings for hyperparameter :class:`.Table` instances."""
def _process_data_file(self, data_file: Path, output_file: Path):
mng = TableFileManager(Path(data_file))
logger.info(f'{data_file} -> {output_file}, pkg={mng.package_name}')
with stdout(output_file, 'w') as f:
tab = CsvToLatexTable(mng.tables, mng.package_name)
tab.write(writer=f)
logger.info(f'wrote {output_file}')
def _write_hyper_table(self, hset: HyperparamSet, table_file: Path):
def map_table(dd: DataFrameDescriber, hp: HyperparamModel) -> Table:
hmtab: Dict[str, Any] = hp.table
params: Dict[str, Any] = dict(**table_defs, **hmtab) \
if hmtab is not None else table_defs
return dd.create_table(**params)
table_defs: Dict[str, Any] = self.hyperparam_table_default.asdict()
tables: Tuple[Table] = tuple(
map(lambda x: map_table(*x),
zip(hset.create_describer().describers, hset.models.values())))
with open(table_file, 'w') as f:
tab = CsvToLatexTable(tables, table_file.stem)
tab.write(writer=f)
logger.info(f'wrote: {table_file}')
def _process_hyper_file(self, hyper_file: Path, output_file: Path,
output_format: _OutputFormat):
loader = HyperparamSetLoader(hyper_file)
hset: HyperparamSet = loader.load()
with stdout(output_file, 'w') as f:
{_OutputFormat.short: lambda: hset.write(
writer=f, include_full=False),
_OutputFormat.full: lambda: hset.write(
writer=f, include_full=True),
_OutputFormat.json: lambda: hset.asjson(
writer=f, indent=4),
_OutputFormat.yaml: lambda: hset.asyaml(writer=f),
_OutputFormat.sphinx: lambda: hset.write_sphinx(writer=f),
_OutputFormat.table: lambda: self._write_hyper_table(
hset, output_file)
}[output_format]()
def _process_file(self, input_file: Path, output_file: Path,
file_type: str):
try:
if file_type == 'd':
return self._process_data_file(input_file, output_file)
else:
return self._process_hyper_file(
input_file, output_file, _OutputFormat.table)
except FileNotFoundError as e:
raise ApplicationError(str(e)) from e
def _get_paths(self, input_path: Path, output_path: Path) -> \
Iterable[Tuple[str, Path]]:
if input_path.is_dir() and not output_path.exists():
output_path.mkdir(parents=True)
if output_path is not None and \
((input_path.is_dir() and not output_path.is_dir()) or
(not input_path.is_dir() and output_path.is_dir())):
raise ApplicationError(
'Both parameters must both be either files or directories, ' +
f"got: '{input_path}', and '{output_path}'")
def _map_file_type(path: Path) -> Tuple[str, Path]:
t: str = None
if self.data_file_regex.match(path.name) is not None:
t = 'd'
elif self.hyperparam_file_regex.match(path.name) is not None:
t = 'h'
return (t, path)
paths: Iterable[str, Path]
if input_path.is_dir():
paths = filter(lambda t: t[0] is not None,
map(_map_file_type, input_path.iterdir()))
elif input_path.exists():
paths = (_map_file_type(input_path),)
else:
raise ApplicationError(f'No such file for directory: {input_path}')
return paths
def generate_tables(self, input_path: Path, output_path: Path):
"""Create LaTeX tables.
:param input_path: definitions YAML path location or directory
:param output_path: output file or directory
"""
paths: Iterable[str, Path] = self._get_paths(input_path, output_path)
file_type: str
path: Path
for file_type, path in paths:
if input_path.is_dir():
ofile: Path = output_path / f'{path.stem}.sty'
self._process_file(path, ofile, file_type)
else:
self._process_file(input_path, output_path, file_type)
def generate_hyperparam(self, input_path: Path, output_path: Path = None,
output_format: _OutputFormat = _OutputFormat.short):
"""Write hyperparameter formatted data
:param input_path: definitions YAML path location or directory
:param output_path: output file or directory
:param output_format: output format of the hyperparameter metadata
"""
paths: Iterable[str, Path] = self._get_paths(input_path, output_path)
path: Path
for _, path in filter(lambda x: x[0] == 'h', paths):
self._process_hyper_file(path, output_path, output_format) | zensols.datdesc | /zensols.datdesc-0.1.0-py3-none-any.whl/zensols/datdesc/app.py | app.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Tuple, Dict, Any, ClassVar, Type, List, Iterable
from dataclasses import dataclass, field
from abc import abstractmethod, ABCMeta
import logging
import sys
import itertools as it
import json
from json import JSONDecodeError
from pathlib import Path
from io import TextIOBase
import shutil
import pandas as pd
import hyperopt as ho
from hyperopt import hp
from zensols.persist import persisted, PersistedWork
from zensols.config import ConfigFactory, Dictable
from . import HyperparamModel, HyperparamSet, HyperparamSetLoader
from .hyperparam import HyperparamError, Hyperparam
logger = logging.getLogger(__name__)
@dataclass
class HyperparamResult(Dictable):
"""Results of an optimization and optionally the best fit.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
name: str = field()
"""The name of the of :class:`.HyperparameterOptimizer`, which is the
directory name.
"""
hyp: HyperparamModel = field()
"""The updated hyperparameters."""
scores: pd.DataFrame = field()
"""The last score results computed during the optimization."""
loss: float = field()
"""The last loss."""
eval_ix: int = field()
"""The index of the optimiation."""
@classmethod
def from_file(cls: Type, path: Path) -> HyperparamResult:
"""Restore a result from a file name.
:param path: the path from which to restore
"""
with open(path) as f:
data: Dict[str, Any] = json.load(f)
model_name: str = data['hyp']['name']
hyp_set: HyperparamSet = HyperparamSetLoader(
{model_name: data['hyp']}).load()
hyp_model: HyperparamModel = hyp_set[model_name]
return cls(
name=model_name,
hyp=hyp_model,
scores=pd.read_json(json.dumps(data['scores'])),
loss=data['loss'],
eval_ix=data['eval_ix'])
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
dct: Dict[str, Any] = super()._from_dictable(*args, **kwargs)
dct['scores'] = self.scores.to_dict()
return dct
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
dct: Dict[str, Any] = self.asdict()
del dct['scores']
del dct['hyp']
self._write_dict(dct, depth, writer)
self._write_line('hyp:', depth, writer)
self._write_object(self.hyp, depth + 1, writer)
self._write_line('scores:', depth, writer)
df = self.scores
if len(df) == 1:
df = df.T
with pd.option_context('display.max_colwidth', self.WRITABLE_MAX_COL):
self._write_block(repr(df), depth + 1, writer)
@dataclass
class HyperparamRun(Dictable):
"""A container for the entire optimization run. The best run contains the
best fit (:obj:`.HyperparamResult`) as predicted by the hyperparameter
optimization algorithm.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
_DICTABLE_ATTRIBUTES: ClassVar[List[str]] = [
'initial_loss', 'loss_stats', 'best']
runs: Tuple[Tuple[Path, HyperparamResult]] = field(repr=False)
"""The results from previous runs."""
@property
def final_path(self) -> Path:
"""The path of the final run."""
return self.runs[-1][0]
@property
def final(self) -> HyperparamResult:
"""The results of the final run, which as the best fit (see class
docs).
"""
return self.runs[-1][1]
@property
def initial_loss(self) -> float:
"""The loss from the first run."""
return self.runs[0][1].loss
@property
def losses(self) -> Tuple[float]:
"""The loss value for all runs"""
return tuple(map(lambda r: r[1].loss, self.runs))
@property
def loss_stats(self) -> Dict[str, float]:
"""The loss statistics (min, max, ave, etc)."""
df = pd.DataFrame(self.losses, columns=['loss'])
# skip initial row
df = df.iloc[1:]
stats = df.describe().to_dict()
return stats['loss']
@property
def best_result(self) -> HyperparamResult:
"""The result that had the lowest loss."""
runs: List[HyperparamRun] = list(map(lambda r: r[1], self.runs))
runs.sort(key=lambda r: r.loss)
return runs[0]
@classmethod
def from_dir(cls: Type, path: Path) -> HyperparamRun:
"""Return an instance with the runs stored in directory ``path``.
"""
def read_result(path: Path):
try:
return HyperparamResult.from_file(path)
except JSONDecodeError as e:
raise HyperparamError(f'Could not parse {path}: {e}') from e
files: List[Path] = sorted(path.iterdir(), key=lambda p: p.stem)
return cls(runs=tuple(map(lambda p: (p, read_result(p)), files)))
@dataclass
class CompareResult(Dictable):
"""Contains the loss and scores of an initial run and a run found on the
optimal hyperparameters.
"""
initial_param: Dict[str, Any] = field()
"""The initial hyperparameters."""
initial_loss: float = field()
"""The initial loss."""
initial_scores: pd.DataFrame = field()
"""The initial scores."""
best_eval_ix: int = field()
"""The optimized hyperparameters."""
best_param: Dict[str, Any] = field()
"""The optimized hyperparameters."""
best_loss: float = field()
"""The optimized loss."""
best_scores: pd.DataFrame = field()
"""The optimized scores."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line('initial:', depth, writer)
self._write_line('parameters:', depth + 1, writer)
self._write_dict(self.initial_param, depth + 2, writer)
self._write_line(f'loss: {self.initial_loss}', depth + 1, writer)
self._write_line('scores:', depth + 1, writer)
df = self.initial_scores
if len(df) == 1:
df = df.T
self._write_block(df.to_string(), depth + 2, writer)
self._write_line('best:', depth, writer)
self._write_line(f'eval_ix: {self.best_eval_ix}', depth + 1, writer)
self._write_line('parameters:', depth + 1, writer)
self._write_dict(self.best_param, depth + 2, writer)
self._write_line(f'loss: {self.best_loss}', depth + 1, writer)
self._write_line('scores:', depth + 1, writer)
df = self.best_scores
if len(df) == 1:
df = df.T
self._write_block(df.to_string(), depth + 2, writer)
@dataclass
class HyperparameterOptimizer(object, metaclass=ABCMeta):
"""Creates the files used to score optimizer output.
"""
name: str = field(default='default')
"""The name of the optimization experiment set. This has a bearing on where
files are stored (see :obj:`opt_intermediate_dir`).
"""
hyperparam_names: Tuple[str, ...] = field(default=())
"""The name of the hyperparameters to use to create the space.
:see: :meth:`_create_space`
"""
max_evals: int = field(default=1)
"""The maximum number of evaluations of the hyperparmater optimization
algorithm to execute.
"""
show_progressbar: bool = field(default=True)
"""Whether or not to show the progress bar while running the optimization.
"""
intermediate_dir: Path = field(default=Path('opthyper'))
"""The directory where the intermediate results are saved while the
algorithm works.
"""
baseline_path: Path = field(default=None)
"""A JSON file with hyperparameter settings to set on start. This file
contains the output portion of the ``final.json`` results, (which are the
results parsed and set in :obj:`HyperparamResult`).
"""
def __post_init__(self):
self._last_result: HyperparamResult = None
self._eval_ix = 0
self._config_factory = PersistedWork(
'_config_factory', self, cache_global=True)
@abstractmethod
def _create_config_factory(self) -> ConfigFactory:
"""Create the harness for the Zensols application."""
pass
@abstractmethod
def _get_hyperparams(self) -> HyperparamModel:
"""Return the hyperparameter instance for the application."""
pass
@abstractmethod
def _objective(self) -> Tuple[float, pd.DataFrame]:
"""The objective implementation used by this class.
:return: a tuple of the (``loss``, ``scores``), where the scores are any
dataframe of the scores of the evaulation
"""
pass
def _create_space(self) -> Dict[str, float]:
"""Create the hyperparamter spacy used by the :mod:`hyperopt` optimizer.
:see: :obj:`hyperparam_names`
"""
if len(self.hyperparam_names) == 0:
raise HyperparamError(
'No given hyperparamter names to optimizer create space')
model: HyperparamModel = self.hyperparams
space: Dict[str, Any] = {}
name: str
for name in self.hyperparam_names:
param: Hyperparam = model[name]
if param.type == 'float':
if param.interval is None:
raise HyperparamError(f'No interval for parameter {param}')
space[name] = hp.uniform(name, *param.interval)
elif param.type == 'int':
if param.interval is None:
raise HyperparamError(f'No interval for parameter {param}')
space[name] = hp.uniformint(name, *param.interval)
elif param.type == 'choice':
space[name] = hp.choice(name, param.choices)
else:
raise HyperparamError(
f'Unsupported parameter type: {param.type}')
return space
def _compare(self) -> Tuple[float, pd.DataFrame]:
"""Like :meth:`_objective` but used when comparing the initial
hyperparameters with the optimized.
"""
return self._objective()
def _get_score_iterations(self) -> int:
"""Return the number of scored items (times to call :meth:`_optimize`)
called by :meth:`get_score_dataframe`.
"""
return 1
def _get_result_file_name(self, name: str = None) -> str:
"""Return a file name based ``name`` used for storing results."""
if name is None:
name = f'{self._eval_ix}.json' if name is None else f'{name}.json'
else:
name = f'{name}.json'
return name
@property
@persisted('_config_factory')
def config_factory(self) -> ConfigFactory:
"""The app config factory."""
return self._create_config_factory()
@property
def hyperparams(self) -> HyperparamModel:
"""The model hyperparameters to be updated by the optimizer."""
return self._get_hyperparams()
@property
def results_intermediate_dir(self) -> Path:
"""The directory that has all intermediate results by subdirectory
name.
"""
return self.intermediate_dir / 'tmp'
@property
def opt_intermediate_dir(self) -> Path:
"""The optimization result directory for the config/parser.
"""
return self.results_intermediate_dir / self.name
def remove_result(self):
"""Remove an entire run's previous optimization results."""
to_del: Path = self.opt_intermediate_dir
if to_del.is_dir():
logger.warning(f'deleting: {to_del}')
shutil.rmtree(to_del)
def _persist_result(self, name: str = None):
"""Write the last result to the file system in JSON format."""
name: str = self._get_result_file_name(name)
res_path: Path = self.opt_intermediate_dir / name
res_path.parent.mkdir(parents=True, exist_ok=True)
with open(res_path, 'w') as f:
self._last_result.asjson(writer=f, indent=4)
def _run_objective(self, space: Dict[str, Any] = None) -> float:
hp: HyperparamModel = self.hyperparams
if space is not None:
hp.update(space)
loss: float
scores: pd.DataFrame
loss, scores = self._objective()
self._last_result = HyperparamResult(
name=self.name,
hyp=hp,
scores=scores,
loss=loss,
eval_ix=self._eval_ix)
self._persist_result()
self._eval_ix += 1
return loss
def _create_uniform_space(self, params: Tuple[str, float, float],
integer: bool = False) -> \
Dict[str, float]:
"""Create a uniform space used by the optimizer.
:param params: a tuple of tuples with the form
``(<param name>, <start range>, <end range>)``
:param integer: whether the uniform range are of type integer
"""
def map_param(name: str, start: float, end: float) -> Tuple[Any, ...]:
if integer:
return (name, hp.uniformint(name, start, end))
else:
return (name, hp.uniform(name, start, end))
return dict(map(lambda x: map_param(*x), params))
def _create_choice(self, params: Tuple[str, Tuple[str, ...]]):
"""Create a choice space.
:param params: a tuple of tuples with the form
``(<param name>, (<choice 1>, <choice 2>...))``
"""
def map_param(name: str, choices: Tuple[Any, ...]) -> Tuple[Any, ...]:
return (name, hp.choice(name, choices))
return dict(map(lambda x: map_param(*x), params))
def _minimize_objective(self) -> Dict[str, float]:
"""Run the hyperparameter optimization process and return the results as
a dict of the optimized parameters.
"""
search_space: Dict[str, float] = self._create_space()
if logger.isEnabledFor(logging.INFO):
logger.info('starting hyperparameter minimization objective')
return ho.fmin(
fn=self._run_objective,
show_progressbar=self.show_progressbar,
space=search_space,
algo=ho.tpe.suggest,
max_evals=self.max_evals)
def _finish_optimize(self, best: Dict[str, float]):
"""Called by :meth:`optimize` when complete. Command line programs will
probably want to report the hyperparameters and last score values
computed during the hyperparameter optimization using
:meth:`write_best_result`.
"""
pass
def optimize(self):
"""Run the optimization algorithm.
"""
self.remove_result()
self._objective()
if logger.isEnabledFor(logging.DEBUG):
logger.info(f'initial loss: {self._last_result.loss}')
best: Dict[str, float] = self._minimize_objective()
self._finish_optimize(best)
def get_run(self, result_dir: Path = None) -> HyperparamRun:
"""Get the best run from the file system.
:param result_dir: the result directory, which defaults to
:obj:`opt_intermediate_dir`
"""
if result_dir is None:
result_dir = self.opt_intermediate_dir
return HyperparamRun.from_dir(result_dir)
def get_best_result(self) -> HyperparamResult:
return self.get_run().best_result
def get_best_results(self) -> Dict[str, HyperparamResult]:
"""Return the best results across all hyperparameter optimization runs
with keys as run names.
"""
res_dirs: Iterable[Path] = self.results_intermediate_dir.iterdir()
res_dirs = filter(lambda p: p.is_dir(), res_dirs)
best_results: Dict[str, HyperparamResult] = {}
res_dir: Path
for res_dir in res_dirs:
run: HyperparamRun = self.get_run(res_dir)
res: HyperparamResult = run.best_result
best_results[res_dir.name] = res
return best_results
def _get_baseline(self, set_hyp: bool = True) -> HyperparamResult:
"""Get the baseline hyperparamters from a file if specified in
:obj:`baseline_path` if set, otherwise get it from the file system.
"""
res: HyperparamResult
if self.baseline_path is None:
logger.info('restoring from best run')
res: HyperparamResult = self.get_run().best_result
else:
logger.info(f'restoring from {self.baseline_path}')
if self.baseline_path.is_dir():
# assume a previous run when a directorty
run = HyperparamRun.from_dir(self.baseline_path)
res = run.best_result
else:
try:
res = HyperparamResult.from_file(self.baseline_path)
except KeyError:
# when a flat parameter file, there will be no `hyp` key
logger.info(
f'baseline file {self.baseline_path} does not ' +
'look like previous results--trying as parameter file')
with open(self.baseline_path) as f:
params: Dict[str, Any] = json.load(f)
hyp = self.hyperparams.clone()
hyp.update(params)
# return a bogus result, which is alright since used
# only to copy parameters
res = HyperparamResult(
hyp=hyp,
scores=None,
loss=-1,
eval_ix=-1)
if set_hyp:
self.hyperparams.update(res.hyp)
return res
def get_comparison(self) -> CompareResult:
"""Compare the scores of the default parameters with those predicted by
the optimizer of the best run.
"""
hyp: HyperparamModel = self.hyperparams
prev: Dict[str, Any] = hyp.flatten()
cmp_res: CompareResult = None
try:
initial_loss: float
initial_scores: pd.DataFrame
initial_loss, initial_scores = self._compare()
best_res: HyperparamResult = self._get_baseline()
best: Dict[str, Any] = best_res.hyp.flatten()
best_loss: float
best_scores: pd.DataFrame
hyp.update(best)
best_loss, best_scores = self._compare()
cmp_res = CompareResult(
initial_param=prev,
initial_loss=initial_loss,
initial_scores=initial_scores,
best_eval_ix=best_res.eval_ix,
best_param=best,
best_loss=best_loss,
best_scores=best_scores)
finally:
hyp.update(prev)
return cmp_res
def _write_result(self, res: HyperparamResult, writer: TextIOBase):
print(f'{self.name}:', file=writer)
res.write(writer=writer)
def write_best_result(self, writer: TextIOBase = sys.stdout,
include_param_json: bool = False):
"""Print the results from the best run.
:param include_param_json: whether to output the JSON formatted
hyperparameters
"""
best_res: HyperparamResult = self.get_best_result()
self._write_result(best_res, writer)
if include_param_json:
print('parameters:', file=writer)
print(json.dumps(best_res.hyp.flatten(), indent=4), file=writer)
def write_compare(self, writer: TextIOBase = sys.stdout):
"""Write the results of a compare of the initial hyperparameters against
the optimized.
"""
cmp_res: CompareResult = self.get_comparison()
if cmp_res is None:
print('no results or error', file=writer)
else:
cmp_res.write(writer=writer)
def get_score_dataframe(self, iterations: int = None) -> pd.DataFrame:
"""Create a dataframe from the results scored from the best
hyperparameters.
:param iterations: the number times the objective is called to produce
the results (the objective space is not altered)
"""
hyp: HyperparamModel = self.hyperparams
prev: Dict[str, Any] = hyp.flatten()
dfs: List[pd.DataFrame] = []
if iterations is None:
iterations = self._get_score_iterations()
logger.info(f'scoring {iterations} iterations using best settings')
try:
self._get_baseline()
for i in range(iterations):
self._run_objective(None)
df: pd.DataFrame = self._last_result.scores
df.insert(0, 'iteration',
tuple(it.islice(it.repeat(i), len(df))))
dfs.append(df)
finally:
hyp.update(prev)
return pd.concat(dfs)
def write_score(self, writer: TextIOBase = sys.stdout) -> HyperparamResult:
"""Restore the hyperparameter state, score the data and print the
results. Use the :obj:`baseline` parameters if available, otherwise use
the parameters from the best best run.
"""
hyp: HyperparamModel = self.hyperparams
prev: Dict[str, Any] = hyp.flatten()
try:
self._get_baseline()
print('using hyperparameters:')
self.hyperparams.write(1)
self._run_objective(None)
self._write_result(self._last_result, writer)
return self._last_result
finally:
hyp.update(prev)
def write_scores(self, output_file: Path = None, iterations: int = None):
"""Write a file of the results scored from the best hyperparameters.
:param output_file: where to write the CSV file; defaults to a file in
:obj:`opt_intermediate_dir`
:param iterations: the number times the objective is called to produce
the results (the objective space is not altered)
"""
if output_file is None:
output_file = self.intermediate_dir / 'scores' / f'{self.name}.csv'
df: pd.DataFrame = self.get_score_dataframe(iterations)
output_file.parent.mkdir(parents=True, exist_ok=True)
df.to_csv(output_file)
logger.info(f'wrote scores to {output_file}')
@property
def aggregate_score_dir(self) -> Path:
"""The output directory containing runs with the best parameters of the
top N results (see :meth:`aggregate_scores`).
"""
return self.intermediate_dir / 'agg'
def aggregate_scores(self):
"""Aggregate best score results as a separate CSV file for each data
point with :meth:`get_score_dataframe`. This is saved as a separate
file for each optmiziation run since this method can take a long time as
it will re-score the dataset. These results are then "stiched" together
with :meth:`gather_aggregate_scores`.
"""
results: Dict[str, HyperparamResult] = self.get_best_results()
res_tups: Tuple[str, HyperparamResult] = sorted(
results.items(), key=lambda t: t[1].loss)
logger.info('scoring top best results')
self.aggregate_score_dir.mkdir(parents=True, exist_ok=True)
logger.setLevel(logging.INFO)
name: str
best: HyperparamResult
for name, best in res_tups:
output_file: Path = self.aggregate_score_dir / f'{name}.csv'
self.name = name
logger.info(f'scoring {name} on {self.match_sample_size} ' +
f'samples with loss {best.loss}')
df: pd.DataFrame = self.get_score_dataframe(1)
df.insert(0, 'name', name)
df.to_csv(output_file, index=False)
logger.info(f'wrote: {output_file}')
def gather_aggregate_scores(self) -> pd.DataFrame:
"""Return a dataframe of all the aggregate scores written by
:meth:`aggregate_scores`.
"""
dfs: List[pd.DataFrame] = []
agg_score_file: Path
for agg_score_file in self.aggregate_score_dir.iterdir():
dfs.append(pd.read_csv(agg_score_file))
return pd.concat(dfs) | zensols.datdesc | /zensols.datdesc-0.1.0-py3-none-any.whl/zensols/datdesc/opt.py | opt.py |
__author__ = 'Paul Landes'
from typing import (
Tuple, Any, Dict, List, Set, ClassVar, Optional, Iterable, Union
)
from dataclasses import dataclass, field
import logging
import sys
from frozendict import frozendict
from collections import OrderedDict
from io import TextIOBase
from pathlib import Path
import yaml
import pandas as pd
from zensols.config import Dictable
from zensols.persist import PersistableContainer, persisted, FileTextUtil
from . import Table
logger = logging.getLogger(__name__)
@dataclass
class DataFrameDescriber(PersistableContainer, Dictable):
"""A class that contains a Pandas dataframe, a description of the data, and
descriptions of all the columns in that dataframe.
"""
_PERSITABLE_PROPERTIES: ClassVar[Set[str]] = {'_metadata_val'}
name: str = field()
"""The description of the data this describer holds."""
df: pd.DataFrame = field()
"""The dataframe to describe."""
desc: str = field()
"""The description of the data frame."""
meta_path: Optional[Path] = field(default=None)
"""A path to use to create :obj:`meta` metadata.
:see: :obj:`meta`
"""
meta: pd.DataFrame = field(default=None)
"""The column metadata for :obj:`dataframe`, which needs columns ``name``
and ``description``. If this is not provided, it is read from file
:obj:`meta_path`. If this is set to a tuple of tuples, a dataframe is
generated from the form::
((<column name 1>, <column description 1>),
(<column name 2>, <column description 2>) ...
If both this and :obj:`meta_path` are not provided, the following is used::
(('description', 'Description'),
('value', 'Value')))
"""
table_kwargs: Dict[str, Any] = field(default_factory=dict)
"""Additional key word arguments given when creating a table in
:meth:`create_table`.
"""
def __post_init__(self):
super().__init__()
def _meta_dict_to_dataframe(self, meta: Tuple[Tuple[str, str]]):
return pd.DataFrame(data=map(lambda t: t[1], meta),
index=map(lambda t: t[0], meta),
columns=['description'])
@property
def _meta(self) -> pd.DataFrame:
if self._meta_val is None:
self._meta_val = pd.read_csv(self.meta_path, index_col='name')
return self._meta_val
@_meta.setter
def _meta(self, meta: Union[pd.DataFrame, Tuple[Tuple[str, str], ...]]):
if meta is None:
meta = (('description', 'Description'),
('value', 'Value'))
if isinstance(meta, (list, tuple)):
self._meta_val = self._meta_dict_to_dataframe(meta)
else:
self._meta_val = meta
@property
@persisted('_csv_path', transient=True)
def csv_path(self) -> Path:
"""The CVS file that contains the data this instance describes."""
fname: str = FileTextUtil.normalize_text(self.name) + '.csv'
return Path(fname)
@property
@persisted('_tab_name', transient=True)
def tab_name(self) -> str:
"""The table derived from :obj:`name`."""
return self.csv_path.stem.replace('-', '')
def create_table(self, **kwargs) -> Table:
"""Create a table from the metadata using:
* :obj:`csv_path` as :obj:`.Table.path`
* :obj:`df` as :obj:`.Table.dataframe`
* :obj:`desc` as :obj:`.Table.caption`
* :meth:`~zensols.config.dictable.Dictable.asdict` as
:obj:`.Table.column_renames`
:param kwargs: key word arguments that override the default
parameterized data passed to :class:`.Table`
"""
params: Dict[str, Any] = dict(
path=self.csv_path,
name=f'{self.tab_name}tab',
caption=self.desc,
column_renames=dict(filter(lambda x: x[1] is not None,
self.asdict().items())))
params.update(kwargs)
table = Table(**params)
table.dataframe = self.df
return table
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
df_params: Dict[str, Any] = None):
"""
:param df_params: the formatting pandas options, which defaults to
``max_colwidth=80``
"""
if df_params is None:
df_params = dict(max_colwidth=self.WRITABLE_MAX_COL)
self._write_line(f'name: {self.name}', depth, writer, max_len=True)
self._write_line(f'desc: {self.desc}', depth, writer, max_len=True)
self._write_line('dataframe:', depth, writer)
dfs: str = self.df.to_string(**df_params)
self._write_block(dfs, depth + 1, writer)
self._write_line('columns:', depth, writer)
self._write_dict(self.asdict(), depth + 1, writer)
def _from_dictable(self, *args, **kwargs) -> Dict[str, str]:
dfm: pd.DataFrame = self.meta
descs: Dict[str, str] = OrderedDict()
col: str
for col in self.df.columns:
if col in dfm.index:
descs[col] = dfm.loc[col]['description']
else:
descs[col] = None
return descs
DataFrameDescriber.meta = DataFrameDescriber._meta
@dataclass
class DataDescriber(PersistableContainer, Dictable):
"""Container class for :class:`.DataFrameDescriber` instances. It also
saves their instances as CSV data files and YAML configuration files.
"""
describers: Tuple[DataFrameDescriber] = field()
"""The contained dataframe and metadata.
"""
name: str = field(default='default')
"""The name of the dataset."""
output_dir: Path = field(default=Path('results'))
"""The directory where to write the results."""
csv_dir: Path = field(default=Path('csv'))
"""The directory where to write the CSV files."""
yaml_dir: Path = field(default=Path('config'))
"""The directory where to write the CSV files."""
mangle_sheet_name: bool = field(default=False)
"""Whether to normalize the Excel sheet names when
:class:`xlsxwriter.exceptions.InvalidWorksheetName` is raised.
"""
def _create_path(self, fname: Union[Path, str]) -> Path:
return self.output_dir / fname
@property
def describers_by_name(self) -> Dict[str, DataFrameDescriber]:
"""Data frame describers keyed by the describer name."""
return frozendict(dict(map(lambda t: (t.name, t), self.describers)))
@staticmethod
def _get_col_widths(df: pd.DataFrame, min_col: int = 100):
# we concatenate this to the max of the lengths of column name and
# its values for each column, left to right
return [max([min(min_col, len(str(s))) for s in df[col].values] +
[len(col)]) for col in df.columns]
def save_excel(self, output_file: Path = None) -> Path:
"""Save all provided dataframe describers to an Excel file.
:param output_file: the Excel file to write, which needs an ``.xlsx``
extension; this defaults to a path created from
:obj:`output_dir` and :obj:`name`
"""
from xlsxwriter.worksheet import Worksheet
if output_file is None:
fname: str = FileTextUtil.normalize_text(self.name)
output_file = self._create_path(f'{fname}.xlsx')
# create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(output_file, engine='xlsxwriter')
for desc in self.describers:
sheet_name: str = desc.name
if self.mangle_sheet_name:
sheet_name = FileTextUtil.normalize_text(sheet_name)
# convert the dataframe to an XlsxWriter Excel object.
desc.df.to_excel(writer, sheet_name=sheet_name, index=False)
# set comments of header cells to descriptions
worksheet: Worksheet = writer.sheets[sheet_name]
cdesc: Dict[str, str] = desc.asdict()
col: str
for cix, col in enumerate(desc.df.columns):
comment: str = cdesc[col]
if comment is None:
logger.warning(f'missing column {col} in {desc.name}')
continue
worksheet.write_comment(0, cix, comment)
# simulate column auto-fit
for i, width in enumerate(self._get_col_widths(desc.df)):
worksheet.set_column(i, i, width)
writer.save()
logger.info(f'wrote {output_file}')
return output_file
def save_csv(self, output_dir: Path = None) -> List[Path]:
"""Save all provided dataframe describers to an CSV files.
:param output_dir: the directory of where to save the data
"""
if output_dir is None:
output_dir = self._create_path(self.csv_dir)
paths: List[Path] = []
desc: DataFrameDescriber
for desc in self.describers:
out_file: Path = output_dir / desc.csv_path
out_file.parent.mkdir(parents=True, exist_ok=True)
desc.df.to_csv(out_file, index=False)
logger.info(f'saved csv file to: {out_file}')
paths.append(out_file)
logger.info(f'saved csv files to directory: {output_dir}')
return paths
def save_yaml(self, output_dir: Path = None,
yaml_dir: Path = None) -> List[Path]:
"""Save all provided dataframe describers YAML files used by the
``datdesc`` command.
:param output_dir: the directory of where to save the data
"""
if output_dir is None:
output_dir = self._create_path(self.csv_dir)
if yaml_dir is None:
yaml_dir = self._create_path(self.yaml_dir)
paths: List[Path] = []
desc: DataFrameDescriber
for desc in self.describers:
csv_file: Path = output_dir / desc.csv_path
out_file: Path = yaml_dir / f'{desc.tab_name}-table.yml'
tab: Table = desc.create_table()
tab.path = csv_file
tab_def: Dict[str, Any] = tab.serialize()
out_file.parent.mkdir(parents=True, exist_ok=True)
with open(out_file, 'w') as f:
yaml.dump(tab_def, f)
logger.info(f'saved yml file to: {out_file}')
paths.append(out_file)
return paths
def save(self, output_dir: Path = None,
yaml_dir: Path = None, include_excel: bool = False) -> List[Path]:
"""Save both the CSV and YAML configuration file.
:param include_excel: whether to also write the Excel file to its
default output file name
:see: :meth:`save_csv`
:see :meth:`save_yaml`
"""
paths: List[Path] = self.save_csv(output_dir)
paths = paths + self.save_yaml(output_dir, yaml_dir)
if include_excel:
paths.append(self.save_excel())
return paths
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
df_params: Dict[str, Any] = None):
"""
:param df_params: the formatting pandas options, which defaults to
``max_colwidth=80``
"""
desc: DataFrameDescriber
for desc in self.describers:
self._write_line(f'{desc.name}:', depth, writer)
desc.write(depth + 1, writer, df_params=df_params)
def __len__(self) -> int:
return len(self.describers)
def __iter__(self) -> Iterable[DataFrameDescriber]:
return iter(self.describers)
def __getitem__(self, name: str) -> DataFrameDescriber:
return self.describers_by_name[name] | zensols.datdesc | /zensols.datdesc-0.1.0-py3-none-any.whl/zensols/datdesc/desc.py | desc.py |
__author__ = 'Paul Landes'
from typing import (
Dict, List, Sequence, Tuple, Any, ClassVar, Optional, Callable
)
from dataclasses import dataclass, field
import sys
import re
import string
from io import TextIOWrapper, StringIO
from tabulate import tabulate
import itertools as it
import pandas as pd
from zensols.persist import persisted, PersistedWork, PersistableContainer
from zensols.config import Dictable
from . import VariableParam
@dataclass
class Table(PersistableContainer, Dictable):
"""Generates a Zensols styled Latex table from a CSV file.
"""
_VARIABLE: ClassVar[str] = 'VAR'
_VARIABLE_ATTRIBUTES: ClassVar[Tuple[VariableParam]] = (
VariableParam('placement', value_format='{val}'),
VariableParam('size'))
path: str = field()
"""The path to the CSV file to make a latex table."""
name: str = field()
"""The name of the table, also used as the label."""
caption: str = field()
"""The human readable string used to the caption in the table."""
head: str = field(default=None)
"""The header to use for the table, which is used as the text in the list of
tables and made bold in the table."""
placement: str = field(default=None)
"""The placement of the table."""
size: str = field(default='normalsize')
"""The size of the table, and one of:
* Huge
* huge
* LARGE
* Large
* large
* normalsize (default)
* small
* footnotesize
* scriptsize
* tiny
"""
uses: Sequence[str] = field(default=('zentable',))
"""Comma separated list of packages to use."""
single_column: bool = field(default=True)
"""Makes the table one column wide in a two column. Setting this to false
generates a ``table*`` two column table, which won't work in beamer
(slides) document types.
"""
hlines: Sequence[int] = field(default_factory=set)
"""Indexes of rows to put horizontal line breaks."""
double_hlines: Sequence[int] = field(default_factory=set)
"""Indexes of rows to put double horizontal line breaks."""
column_keeps: Optional[List[str]] = field(default=None)
"""If provided, only keep the columns in the list"""
column_removes: List[str] = field(default_factory=list)
"""The name of the columns to remove from the table, if any."""
column_renames: Dict[str, str] = field(default_factory=dict)
"""Columns to rename, if any."""
column_aligns: str = field(default=None)
"""The alignment/justification (i.e. ``|l|l|`` for two columns). If not
provided, they are automatically generated based on the columns of the
table.
"""
percent_column_names: Sequence[str] = field(default=())
"""Column names that have a percent sign to be escaped."""
make_percent_column_names: Dict[str, int] = field(default_factory=dict)
"""Each columnn in the map will get rounded to the value * 100 of the name.
For example, ``{'ann_per': 3}`` will round column ``ann_per`` to 3 decimal
places.
"""
format_thousands_column_names: Dict[str, Optional[Dict[str, Any]]] = \
field(default_factory=dict)
"""Columns to format using thousands. The keys are the column names of the
table and the values are either ``None`` or the keyword arguments to
:meth:`format_thousand`.
"""
column_evals: Dict[str, str] = field(default_factory=dict)
"""Keys are column names with values as functions (i.e. lambda expressions)
evaluated with a single column value parameter. The return value replaces
the column identified by the key.
"""
read_kwargs: Dict[str, str] = field(default_factory=dict)
"""Keyword arguments used in the :meth:`~pandas.read_csv` call when reading the
CSV file.
"""
write_kwargs: Dict[str, str] = field(
default_factory=lambda: {'disable_numparse': True})
"""Keyword arguments used in the :meth:`~tabulate.tabulate` call when
writing the table. The default tells :mod:`tabulate` to not parse/format
numerical data.
"""
replace_nan: str = field(default=None)
"""Replace NaN values with a the value of this field as :meth:`tabulate` is
not using the missing value due to some bug I assume.
"""
blank_columns: List[int] = field(default_factory=list)
"""A list of column indexes to set to the empty string (i.e. 0th to fixed the
``Unnamed: 0`` issues).
"""
bold_cells: List[Tuple[int, int]] = field(default_factory=list)
"""A list of row/column cells to bold."""
bold_max_columns: List[str] = field(default_factory=list)
"""A list of column names that will have its max value bolded."""
capitalize_columns: Dict[str, bool] = field(default_factory=dict)
"""Capitalize either sentences (``False`` values) or every word (``True``
values). The keys are column names.
"""
index_col_name: str = field(default=None)
"""If set, add an index column with the given name."""
df_code: str = field(default=None)
"""Python code executed that manipulates the table's dataframe. The code
has a local ``df`` variable and the returned value is used as the
replacement. This is usually a one-liner used to subset the data etc. The
code is evaluated with :func:`eval`.
"""
df_code_pre: str = field(default=None)
"""Like :obj:`df_code` but right after the source data is read and before
any modifications. The code is evaluated with :func:`eval`.
"""
df_code_exec: str = field(default=None)
"""Like :obj:`df_code` but invoke with :func:`exec` instead of :func:`eval`.
"""
df_code_exec_pre: str = field(default=None)
"""Like :obj:`df_code_pre` but invoke with :func:`exec` instead of
:func:`eval`.
"""
def __post_init__(self):
super().__init__()
if isinstance(self.uses, str):
self.uses = re.split(r'\s*,\s*', self.uses)
if isinstance(self.hlines, (tuple, list)):
self.hlines = set(self.hlines)
if isinstance(self.double_hlines, (tuple, list)):
self.double_hlines = set(self.double_hlines)
self._formatted_dataframe = PersistedWork(
'_formatted_dataframe', self, transient=True)
@property
def latex_environment(self) -> str:
"""Return the latex environment for the table.
"""
tab: str
if self.single_column:
tab = 'zztable'
else:
if self.placement is None:
tab = 'zztabletcol'
else:
tab = 'zztabletcolplace'
if self.head is not None:
tab += 'head'
return tab
@property
def columns(self) -> str:
"""Return the columns field in the Latex environment header.
"""
cols: str = self.column_aligns
if cols is None:
df = self.formatted_dataframe
cols = 'l' * df.shape[1]
cols = '|' + '|'.join(cols) + '|'
return cols
def get_cmd_args(self, add_brackets: bool) -> Dict[str, str]:
args = {}
var: VariableParam
for i, var in enumerate(self._VARIABLE_ATTRIBUTES):
attr: str = var.name
val = getattr(self, attr)
if val is None:
val = ''
elif val == self._VARIABLE:
val = var.index_format.format(index=(i + 1), val=val, var=var)
else:
val = var.value_format.format(index=(i + 1), val=val, var=var)
if add_brackets and len(val) > 0:
val = f'[{val}]'
args[attr] = val
return args
@property
@persisted('_var_args')
def var_args(self) -> Tuple[str]:
var = tuple(map(lambda a: (a, getattr(self, a.name)),
self._VARIABLE_ATTRIBUTES))
return tuple(map(lambda x: x[0],
filter(lambda x: x[1] == self._VARIABLE, var)))
def get_params(self, add_brackets: bool) -> Dict[str, str]:
"""Return the parameters used for creating the table.
"""
params = {'tabname': self.name,
'latex_environment': self.latex_environment,
'caption': self.caption,
'columns': self.columns}
params.update(self.get_cmd_args(add_brackets))
return params
@staticmethod
def format_thousand(x: int, apply_k: bool = True,
add_comma: bool = True) -> str:
"""Format a number as a string with comma separating thousands.
:param x: the number to format
:param apply_k: add a ``K`` to the end of large numbers
:param add_comma: whether to add a comma
"""
add_k = False
if x > 10000:
if apply_k:
x = round(x / 1000)
add_k = True
if add_comma:
x = f'{x:,}'
else:
x = str(x)
if add_k:
x += 'K'
return x
@property
def header(self) -> str:
"""The Latex environment header.
"""
head: str = self._get_header()
if self.head is not None:
head += f'{{{self.head}}}'
return head
def _get_header(self) -> str:
"""Return the Latex environment header.
"""
params = self.get_params(False)
if len(params['placement']) == 0:
params['placement'] = 'h!'
return """\\begin{%(latex_environment)s}[%(placement)s]{%(tabname)s}%%
{%(caption)s}{%(size)s}{%(columns)s}""" % params
def _apply_df_eval_pre(self, df: pd.DataFrame) -> pd.DataFrame:
if self.df_code_exec_pre is not None:
_locs = locals()
exec(self.df_code_exec_pre)
df = _locs['df']
if self.df_code_pre is not None:
df = eval(self.df_code_pre)
return df
def _apply_df_number_format(self, df: pd.DataFrame) -> pd.DataFrame:
col: str
for col in self.percent_column_names:
df[col] = df[col].apply(lambda s: s.replace('%', '\\%'))
kwargs: Optional[Dict[str, Any]]
for col, kwargs in self.format_thousands_column_names.items():
kwargs = {} if kwargs is None else kwargs
df[col] = df[col].apply(lambda x: self.format_thousand(x, **kwargs))
for col, rnd in self.make_percent_column_names.items():
fmt = f'{{v:.{rnd}f}}\\%'
df[col] = df[col].apply(
lambda v: fmt.format(v=round(v * 100, rnd), rnd=rnd))
return df
def _apply_df_eval_post(self, df: pd.DataFrame) -> pd.DataFrame:
if self.df_code_exec is not None:
exec(self.df_code_exec)
for col, code, in self.column_evals.items():
func = eval(code)
df[col] = df[col].apply(func)
if self.df_code is not None:
df = eval(self.df_code)
return df
def _apply_df_add_indexes(self, df: pd.DataFrame) -> pd.DataFrame:
if self.index_col_name is not None:
df[self.index_col_name] = range(1, len(df) + 1)
cols = df.columns.to_list()
cols = [cols[-1]] + cols[:-1]
df = df[cols]
return df
def _apply_df_column_modifies(self, df: pd.DataFrame) -> pd.DataFrame:
df = df.drop(columns=self.column_removes)
if self.column_keeps is not None:
df = df[self.column_keeps]
df = df.rename(columns=self.column_renames)
return df
def _apply_df_font_format(self, df: pd.DataFrame) -> pd.DataFrame:
if self.replace_nan is not None:
df = df.fillna(self.replace_nan)
if len(self.blank_columns) > 0:
cols = df.columns.to_list()
for i in self.blank_columns:
cols[i] = ''
df.columns = cols
if len(self.bold_cells) > 0:
df = self._apply_df_bold_cells(df, self.bold_cells)
return df
def _apply_df_bold_cells(self, df: pd.DataFrame,
cells: Sequence[Tuple[int, int]]):
str_cols: bool = len(cells) > 0 and isinstance(cells[0][1], str)
cixs: Dict[str, int] = dict(zip(df.columns, it.count()))
r: int
c: int
for r, c in cells:
val: Any = df[c].iloc[r] if str_cols else df.iloc[r, c]
fmt: str = '\\textbf{' + str(val) + '}'
if str_cols:
c = cixs[c]
df.iloc[r, c] = fmt
return df
def _apply_df_capitalize(self, df: pd.DataFrame):
for col, capwords in self.capitalize_columns.items():
fn: Callable = string.capwords if capwords else str.capitalize
df[col] = df[col].apply(fn)
return df
def _get_bold_columns(self, df: pd.DataFrame) -> Tuple[Tuple[int, int]]:
if len(self.bold_max_columns) > 0:
cixs: List[str] = self.bold_max_columns
return tuple(zip(df[cixs].idxmax(), cixs))
else:
return ()
@property
def dataframe(self) -> pd.DataFrame:
"""The Pandas dataframe that holds the CSV data."""
if not hasattr(self, '_dataframe_val'):
self._dataframe_val = pd.read_csv(self.path, **self.read_kwargs)
return self._dataframe_val
@dataframe.setter
def dataframe(self, dataframe: pd.DataFrame):
"""The Pandas dataframe that holds the CSV data."""
self._dataframe_val = dataframe
self._formatted_dataframe.clear()
@property
@persisted('_formatted_dataframe')
def formatted_dataframe(self) -> pd.DataFrame:
"""The :obj:`dataframe` with the formatting applied to it used to create
the Latex table. Modifications such as string replacements for adding
percents is done.
"""
df: pd.DataFrame = self.dataframe
df = self._apply_df_eval_pre(df)
bold_cols: Tuple[Tuple[int, int]] = self._get_bold_columns(df)
df = self._apply_df_number_format(df)
df = self._apply_df_eval_post(df)
df = self._apply_df_bold_cells(df, bold_cols)
df = self._apply_df_capitalize(df)
df = self._apply_df_add_indexes(df)
df = self._apply_df_column_modifies(df)
df = self._apply_df_font_format(df)
return df
def _get_header_rows(self, df: pd.DataFrame) -> List[List[Any]]:
cols = [tuple(map(lambda c: f'\\textbf{{{c}}}', df.columns))]
return it.chain(cols, map(lambda x: x[1].tolist(), df.iterrows()))
def _get_tabulate_params(self) -> Dict[str, Any]:
params = dict(tablefmt='latex_raw', headers='firstrow')
params.update(self.write_kwargs)
return params
def write(self, depth: int = 0, writer: TextIOWrapper = sys.stdout):
df: pd.DataFrame = self.formatted_dataframe
data = self._get_header_rows(df)
params: Dict[str, Any] = self._get_tabulate_params()
lines = tabulate(data, **params).split('\n')
params = dict(self.get_params(True))
params['cvars'] = ''
n_var_args = len(self.var_args)
if n_var_args > 0:
params['cvars'] = f'[{n_var_args}]'
writer.write('\n\\newcommand{\\%(tabname)s}%(cvars)s{%%\n' % params)
writer.write(self.header)
writer.write('\n')
for lix, ln in enumerate(lines[1:-1]):
writer.write(ln + '\n')
if (lix - 2) in self.hlines:
writer.write('\\hline \n')
if (lix - 2) in self.double_hlines:
writer.write('\\hline \\hline \n')
writer.write('\\end{%s}}\n' % self.latex_environment)
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
dct = super()._from_dictable(*args, **kwargs)
dct['type'] = re.sub(r'Table$', '', self.__class__.__name__).lower()
def_inst = self.__class__(name=None, path=None, caption=None)
dels: List[str] = []
for k, v in dct.items():
if (not hasattr(def_inst, k) or v == getattr(def_inst, k)) or \
(isinstance(v, (list, set, tuple, dict)) and len(v) == 0):
dels.append(k)
for k in dels:
del dct[k]
return dct
def serialize(self) -> Dict[str, Any]:
"""Return a data structure usable for YAML or JSON output by flattening
Python objects.
"""
tab_name: str = self.name
# using json to recursively convert OrderedDict to dicts
tab_def: Dict[str, Any] = self.asflatdict()
del tab_def['name']
return {tab_name: tab_def}
def __str__(self):
return f'{self.name}: env={self.latex_environment}, size={self.size}'
@dataclass
class SlackTable(Table):
"""An instance of the table that fills up space based on the widest column.
"""
slack_col: int = field(default=0)
"""Which column elastically grows or shrinks to make the table fit."""
@property
def latex_environment(self):
return 'zzvarcoltable' if self.single_column else 'zzvarcoltabletcol'
def _get_header(self) -> str:
params = self.get_params(False)
width = '\\columnwidth' if self.single_column else '\\textwidth'
params['width'] = width
return """\\begin{%(latex_environment)s}[%(width)s]{%(placement)s}{%(tabname)s}{%(caption)s}%%
{%(size)s}{%(columns)s}""" % params
@property
def columns(self) -> str:
cols: str = self.column_aligns
if cols is None:
df = self.formatted_dataframe
i = self.slack_col
cols = ('l' * (df.shape[1] - 1))
cols = cols[:i] + 'X' + cols[i:]
cols = '|' + '|'.join(cols) + '|'
return cols
@dataclass
class LongTable(SlackTable):
@property
def latex_environment(self):
return 'zzvarcoltabletcollong'
def _get_header(self) -> str:
df = self.formatted_dataframe
hcols = ' & '.join(map(lambda c: f'\\textbf{{{c}}}', df.columns))
return f'{super()._get_header()}{{{hcols}}}{{{df.shape[1]}}}'
def _get_header_rows(self, df: pd.DataFrame) -> List[List[Any]]:
return map(lambda x: x[1].tolist(), df.iterrows())
def _get_tabulate_params(self) -> Dict[str, Any]:
params = super()._get_tabulate_params()
del params['headers']
return params
def write(self, depth: int = 0, writer: TextIOWrapper = sys.stdout):
sio = StringIO()
super().write(depth, writer)
sio.seek(0)
hlremove = 1
for line in map(str.strip, sio.readlines()):
if line == '\\hline' and hlremove > 0:
hlremove += 1
continue
writer.write(line + '\n') | zensols.datdesc | /zensols.datdesc-0.1.0-py3-none-any.whl/zensols/datdesc/table.py | table.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
Union, Dict, Any, List, Tuple, ClassVar, Optional,
Type, Set, Sequence, Iterable
)
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
import sys
import re
import textwrap as tw
import copy
from itertools import chain
from pathlib import Path
from io import TextIOBase
from frozendict import frozendict
import yaml
import pandas as pd
from zensols.util import APIError
from zensols.config import Dictable, Configurable
from zensols.persist import persisted
from . import DataDescriptionError, DataFrameDescriber, DataDescriber
class HyperparamError(DataDescriptionError):
"""Raised for any error related hyperparameter access.
"""
pass
class HyperparamValueError(HyperparamError):
"""Raised for bad values set on a hyperparameter.
"""
pass
@dataclass(eq=True)
class Hyperparam(Dictable):
"""A hyperparameter's metadata, documentation and value. The value is
accessed (retrieval and setting) at runtime. Do not use this class
explicitly. Instead use :class:`.HyperparamModel`.
The index access only applies when :obj:`type` is ``list`` or ``dict``.
Otherwise, the :obj:`value` member has the value of the hyperparameter.
"""
_DICTABLE_WRITE_EXCLUDES: ClassVar[Set[str]] = {'name'}
_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_]+$')
_NODE: ClassVar[str] = 'node()'
CLASS_MAP: ClassVar[Dict[str, Type]] = frozendict({
'str': str,
'choice': str,
'float': float,
'int': int,
'bool': bool,
'list': list,
'dict': dict})
"""A mapping for values set in :obj:`type` to their Python class
equivalents.
"""
VALID_TYPES: ClassVar[str] = frozenset(CLASS_MAP.keys())
"""Valid settings for :obj:`type`."""
name: str = field()
"""The name of the hyperparameter (i.e. ``C`` or ``learning_rate``)."""
type: str = field()
"""The type of :obj:`value` (i.e. ``float``, or ``int``)."""
doc: str = field()
"""The human readable documentation for the hyperparameter. This is used in
documentation generation tasks.
"""
choices: Tuple[str, ...] = field(default=None)
"""When :obj:`type` is ``choice``, the value strings used in :obj:`value`.
"""
value: Optional[Union[str, float, int, bool, list, dict]] = \
field(default=None)
"""The value of the hyperparamer used in the application."""
interval: Union[Tuple[float, float], Tuple[int, int]] = field(default=None)
"""Valid intervals for :obj:`value` as an inclusive interval."""
def __post_init__(self):
if self.doc is None:
raise HyperparamError(f'Missing doc in {self}')
if self._NAME_REGEX.match(self.name) is None:
raise HyperparamError(
('Illegal name (only letters, numbers and underscores ' +
f'allowed): {self.name}'))
if self.type not in self.VALID_TYPES:
raise HyperparamError(
f'Unknown hyperparameter type: {self.type} in {self}')
if self.choices is None:
if self.type == 'choice':
raise HyperparamError(
f'No choice values given for choice: {self}')
@property
def _value(self) -> Optional[Union[str, float, int, bool, list, dict]]:
return self._value_val
@_value.setter
def _value(self, val: Optional[Union[str, float, int, bool, list, dict]]):
cls: Type = type(val)
tcls: Type = self.cls
if val is not None:
if cls != tcls:
raise HyperparamValueError(
f"Wrong type '{cls.__name__}', expecting " +
f"'{tcls.__name__}' for hyperparameter '{self.name}'")
if self.type == 'choice' and val not in self.choices:
choices: str = ', '.join(map(lambda s: f"'{s}'", self.choices))
raise HyperparamValueError(
f"Unknown choice '{val}', expecting one of: {choices}")
if self.interval is not None:
if val > self.interval[1] or val < self.interval[0]:
raise HyperparamValueError(
f"Out of range value '{val}' not in " +
f'[{self.interval[0]}, {self.interval[1]}]')
self._value_val = val
def _resolve(self, path: List[str], val: Any, set_val: bool = False) -> Any:
if len(path) == 0:
if set_val:
self.value = val
return self.value
if len(path) != 1:
raise HyperparamError(f'Wrong parameter path length: {path}')
path_item: str = path[0]
if path_item == self._NODE:
if set_val:
self.value = val
return self
if self.type == 'list':
try:
idx: int = int(path_item)
if set_val:
self.value[idx] = val
return self.value[idx]
except ValueError as e:
raise HyperparamError(
f"List indices must be integers, not '{path_item}'") from e
elif self.type == 'dict':
if set_val:
self.value[path_item] = val
return self.value[path_item]
else:
raise HyperparamError(
f"Trying to index '{self.type}' with path: {path}")
def write_sphinx(self, depth: int = 0, writer: TextIOBase = sys.stdout):
phead: str = f':param {self.name}: '
pdoc: str = self.doc.lower()
type_str: str = f':type {self.name}: {self.get_type_str(short=False)}'
if pdoc[-1] == '.':
pdoc = pdoc[:-1]
if self.interval is not None:
pdoc = f'{pdoc}, must be in the interval {self.interval_str}'
text_wrapper = tw.TextWrapper(
initial_indent=self._sp(depth),
subsequent_indent=(self._sp(depth) +
self._get_str_space(len(phead))))
self._write_block(text_wrapper.wrap(phead + pdoc), 0, writer)
self._write_block(type_str, depth, writer)
def __getitem__(self, index) -> Any:
if self.type == 'list':
return self.value[index]
elif self.type == 'dict':
return self.value[index]
else:
raise HyperparamError(f"'{self}' is not subscriptable")
@property
def cls(self) -> Type:
"""The Python equivalent class of :obj:`type`."""
return self.CLASS_MAP[self.type]
def get_type_str(self, short: bool = True) -> str:
if self.type == 'choice':
if short:
return 'str <' + '|'.join(self.choices) + '>'
else:
return 'str; one of: ' + ', '.join(self.choices)
else:
return self.type
@property
def interval_str(self) -> str:
return f'[{self.interval[0]}, {self.interval[1]}]'
def __str__(self) -> str:
s = f'({self.get_type_str()})'
if self.value is not None:
s = f'{self.value} {s}'
if self.interval is not None:
s = f'{s} in {self.interval_str}'
return self.name + ': ' + s
Hyperparam.value = Hyperparam._value
@dataclass
class HyperparamContainer(Dictable, metaclass=ABCMeta):
"""A container class for :class:`.Hyperparam` instances.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
@abstractmethod
def _resolve(self, path: List[str], val: Any, set_val: bool) -> Any:
"""Drill down to the hyperparameter navigating through
:class:`.HyperparamModel` and :class:`.HyperparamSet` using the dotted
path notation (see module docs).
"""
pass
@abstractmethod
def flatten(self, deep: bool = False) -> Dict[str, Any]:
"""Return a flattened directory with the dotted path notation (see
module docs).
:param deep: if ``True`` recurse in to :class:`dict` and :class:`list`
hyperparameter values
"""
pass
@abstractmethod
def write_sphinx(self, depth: int = 0, writer: TextIOBase = sys.stdout):
"""Write Sphinx autodoc used in a class as
:class:`dataclasses.dataclass` field.
"""
pass
def __call__(self, path: str) -> Any:
"""Return the value of the parameter.
:param path: a dotted path notation (see module docs)
"""
return self._resolve(path.split('.'), None, False)
def update(self, params: Union[Dict[str, Any], HyperparamContainer]):
"""Update parameter values.
:param params: a dict of dotted path notation keys
"""
if isinstance(params, HyperparamContainer):
self.update(params.flatten())
else:
for k, v in params.items():
self._resolve(k.split('.'), v, True)
@dataclass(eq=True)
class HyperparamModel(HyperparamContainer):
"""The model level class that contains the parameters. This class
represents a machine learning model such as a SVM with hyperparameters such
as ``C`` and ``maximum iterations``.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
name: str = field()
"""The name of the model (i.e. ``svm``). This name can have only
alpha-numeric and underscore charaters.
"""
doc: str = field()
"""The human readable documentation for the model."""
desc: str = field(default=None)
"""The description the model used in the documentation when obj:`name` is
not sufficient. Since :obj:`name` has naming constraints, this can be used
as in place during documentation generation.
"""
params: Dict[str, Hyperparam] = field(default_factory=dict)
"""The hyperparameters keyed by their names."""
table: Optional[Dict[str, Any]] = field(default=None, repr=False)
"""Overriding data used when creating a :class:`.Table` from
:meth:`.DataFrameDescriber.create_table`.
"""
def __post_init__(self):
if self.doc is None:
raise HyperparamError(f'Missing doc in {self}')
if self.desc is None:
self.desc = self.name
def _resolve(self, path: List[str], val: Any, set_val: bool) -> Any:
if len(path) == 0:
return self
if len(path) == 1 and path[0] == Hyperparam._NODE:
return self
param: Hyperparam = self.params[path[0]]
return param._resolve(path[1:], val, set_val)
@classmethod
def _flatten(cls, n: Any, p: Tuple[str], col: Dict[str, Any]):
if isinstance(n, Hyperparam):
np = n.name if p is None else f'{p}.{n.name}'
cls._flatten(n.value, n.name, col)
elif isinstance(n, Dict):
for k, v in n.items():
np = k if p is None else f'{p}.{k}'
cls._flatten(v, np, col)
elif isinstance(n, (tuple, list)):
for i, v in enumerate(n):
np = str(i) if p is None else f'{p}.{i}'
col[np] = v
else:
col[p] = n
def flatten(self, deep: bool = False) -> Dict[str, Any]:
def map_param(p: Hyperparam) -> Dict[str, Any]:
val: Any = p.value
if p.type == 'dict' or p.type == 'list':
val = copy.deepcopy(val)
return (p.name, val)
if deep:
col: Dict[str, Any] = {}
self._flatten(self.params, None, col)
return col
else:
return dict(map(map_param, self.params.values()))
@property
def values_dataframe(self) -> pd.DataFrame:
"""A dataframe with parameter data. This includes the name, type, value
and documentation.
"""
def map_row(p: Hyperparam) -> Dict[str, Any]:
dct: Dict[str, Any] = p.asdict()
dct['type'] = p.get_type_str()
del dct['choices']
del dct['interval']
return dct
return pd.DataFrame(tuple(map(map_row, self.params.values())))
@property
def metadata_dataframe(self) -> pd.DataFrame:
"""A dataframe describing the :obj:`values_dataframe`."""
df = pd.DataFrame(
['name', 'data type', 'documentation', 'settings'],
index='name type doc value'.split(),
columns='description'.split())
df.index.name = 'name'
return df
def clone(self) -> HyperparamModel:
"""Make a copy of this instance."""
params = copy.deepcopy(self.__dict__)
return self.__class__(**params)
def create_dataframe_describer(self) -> DataFrameDescriber:
"""Return an object with metadata fully describing the hyperparameters
of this model.
"""
return DataFrameDescriber(
name=self.desc,
df=self.values_dataframe,
desc=self.doc,
meta=self.metadata_dataframe)
def write_sphinx(self, depth: int = 0, writer: TextIOBase = sys.stdout):
doc: str = self.doc
doc = '"""' + self.doc[0].capitalize() + self.doc[1:]
if doc[-1] != '.':
doc += '.'
self._write_line(f'{self.name}: {self.__class__.__name__} = field()',
depth, writer)
self._write_wrap(doc, depth, writer)
self._write_empty(writer)
self._write_line('Hyperparameters::', depth, writer)
self._write_empty(writer)
param: Hyperparam
for i, param in enumerate(self.params.values()):
if i > 0:
self._write_empty(writer)
param.write_sphinx(depth + 1, writer)
self._write_line('"""', depth, writer)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_doc: bool = False):
self._write_line(f'{self.desc}:', depth, writer)
if include_doc:
self._write_line(f'doc: {self.doc}', depth + 1, writer)
self._write_line('params:', depth + 1, writer)
for name, param in sorted(self.params.items(), key=lambda x: x[0]):
if include_doc:
self._write_line(f'{name}:', depth + 2, writer)
self._write_object(param, depth + 3, writer)
else:
if isinstance(param.value, dict):
self._write_line(param.name, depth + 1, writer)
self._write_object(param.value, depth + 2, writer)
else:
self._write_line(str(param), depth + 1, writer)
def __getitem__(self, name: str):
return self.params[name]
def get(self, name: str) -> HyperparamModel:
return self.params.get(name)
def __contains__(self, name: str) -> bool:
return name in self.params
def __getattr__(self, attr: str, default: Any = None) -> Any:
val: Optional[Hyperparam] = self.params.get(attr)
if val is not None:
return val.value
return super().__getattribute__(attr)
def __setattr__(self, attr: str, value: Any = None):
if attr in {'params', 'name', 'doc', 'desc', 'table'}:
super().__setattr__(attr, value)
else:
val: Optional[Hyperparam] = self.params.get(attr)
if val is not None:
val.value = value
else:
super().__setattr__(attr, value)
def __len__(self) -> int:
return len(self.params)
def __str__(self) -> str:
s: str = self.name
if s != self.desc:
s += f' ({self.desc})'
return s
@dataclass(eq=True)
class HyperparamSet(HyperparamContainer):
"""The top level in the object graph hierarchy (see module docs). This
contains a set of models and typically where calls by packages such as
:mod:`hyperopt` are used to update the hyperparameters of the model(s).
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
models: Dict[str, HyperparamModel] = field(default_factory=dict)
"""The models containing hyperparameters for this set."""
name: Optional[str] = field(default=None)
"""The name fo the hyperparameter set."""
def _resolve(self, path: List[str], val: Any, set_val: bool) -> Any:
if len(path) == 0:
raise HyperparamError(f'Missing model in path: {path}')
model: HyperparamModel = self.models[path[0]]
return model._resolve(path[1:], val, set_val)
def flatten(self, deep: bool = False) -> Dict[str, Any]:
def map_model(m: HyperparamModel):
return map(lambda mt: (f'{m.name}.{mt[0]}', mt[1]),
m.flatten(deep).items())
return dict(chain.from_iterable(map(map_model, self.models.values())))
def create_describer(self, meta_path: Path = None) -> DataDescriber:
"""Return an object with metadata fully describing the hyperparameters
of this model.
:param meta_path: if provided, set the path on the returned instance
"""
def map_model(m: HyperparamModel) -> DataFrameDescriber:
dd = m.create_dataframe_describer()
if meta_path is not None:
dd.meta_path = meta_path / f'{self.name}.csv'
return dd
return DataDescriber(
describers=tuple(map(map_model, self.models.values())))
def write_sphinx(self, depth: int = 0, writer: TextIOBase = sys.stdout):
if self.name is not None:
cname: str = self.name.capitalize() + 'Hyperparams'
models: str = ', '.join(
map(lambda m: f'``{m.desc}``', self.models.values()))
doc: str = f'"""Hyperparaeters for models {models}.'
mod: str = re.sub(r'^(.+)\..+$', '\\1', __name__)
self._write_line('from dataclasses import dataclass, field',
depth, writer)
self._write_line(f'from {mod} import HyperparamModel',
depth, writer)
self._write_empty(writer, 2)
self._write_line('@dataclass', depth, writer)
self._write_line(f'class {cname}(object):', depth, writer)
self._write_line(doc, depth + 1, writer)
self._write_empty(writer)
self._write_line('"""', depth + 1, writer)
model: HyperparamModel
for i, model in enumerate(self.models.values()):
if i > 0:
self._write_empty(writer)
model.write_sphinx(depth + 1, writer)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_doc: bool = False):
self._write_line('models:', depth, writer)
for name, model in self.models.items():
model.write(depth + 1, writer, include_doc=include_doc)
def __getitem__(self, name: str):
return self.models[name]
def get(self, name: str) -> HyperparamModel:
return self.models.get(name)
def __contains__(self, name: str) -> bool:
return name in self.models
def __getattr__(self, attr: str, default: Any = None) -> Any:
val: Any = self.get(attr)
if val is not None:
return val
return super().__getattribute__(attr)
def __len__(self) -> int:
return len(self.models)
@dataclass
class HyperparamSetLoader(object):
"""Loads a set of hyperparameters from a YAML :class:`pathlib.Path`,
:class:`dict` or stream :class:`io.TextIOBase`.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
_FILE_NAME_REGEX: ClassVar[re.Match] = re.compile(r'^(.+)-hyperparam$')
"""The regular expression used to match hyperparameter YAML definition
files.
"""
data: Union[Dict[str, Any], Path, TextIOBase] = field()
"""The source of data to load, which is a YAML :class:`pathlib.Path`,
:class:`dict` or stream :class:`io.TextIOBase`.
:see: :obj:`updates`
"""
config: Configurable = field(default=None)
"""The application configuration used to update the hyperparameters from
other sections.
"""
updates: Sequence[Dict[str, Any]] = field(default=())
"""A sequence of dictionaries with keys as :class:`.HyperparamModel` names
and values as sections with values to set after loading using :obj:`data`.
"""
def _from_param(self, name: str, param: Dict[str, Any]):
if 'name' not in param:
param['name'] = name
return Hyperparam(**param)
def _from_model(self, name: str, model: Dict[str, Any]):
params: Dict[str, Any] = model['params']
return HyperparamModel(
name=name,
desc=model.get('desc'),
doc=model.get('doc'),
params=dict(map(lambda p: (p[0], self._from_param(*p)),
params.items())),
table=model.get('table'))
def _from_dict(self, data: Dict[str, Any],
name: str = None) -> HyperparamSet:
return HyperparamSet(
name=name,
models=dict(map(lambda m: (m[0], self._from_model(*m)),
data.items())))
def _from_stream(self, stream: TextIOBase,
name: str = None) -> HyperparamSet:
return self._from_dict(yaml.load(stream, yaml.FullLoader), name=name)
def _from_path(self, path: Path) -> HyperparamSet:
name: str = path.stem
m: re.Match = self._FILE_NAME_REGEX.match(name)
if m is not None:
name = m.group(1)
with open(path) as f:
return self._from_stream(f, name=name)
def _get_updates(self) -> Iterable[Dict[str, Any]]:
param_update: Dict[str, Any]
for param_update in self.updates:
for k, v in param_update.items():
settings: Dict[str, Any] = dict(self.config[v])
yield dict(map(lambda t: (f'{k}.{t[0]}', t[1]),
settings.items()))
@persisted('_load')
def load(self) -> HyperparamSet:
"""Load and return the hyperparameter object graph from :obj:`data`.
"""
hs: HyperparamSet
if isinstance(self.data, Path):
hs = self._from_path(self.data)
elif isinstance(self.data, Dict):
hs = self._from_dict(self.data)
elif isinstance(self.data, TextIOBase):
hs = self._from_stream(self.data)
else:
raise APIError(f'Unknown input type: {type(self.data)}')
if self.config is not None:
update: Dict[str, Any]
for update in self._get_updates():
hs.update(update)
return hs
def __call__(self, path: str = None) -> \
Union[HyperparamSet, HyperparamSet, Hyperparam]:
"""Calls "meth:`load`.
:param path: if provided, use as the dot separated path in to
:class:`.HyperparamSet`; the first level will be an
instance of :class:`.HyperparmModel`
"""
hset: HyperparamSet = self.load()
if path is not None:
return hset(path)
else:
return hset
def __getitem__(self, path: str) -> \
Union[HyperparamSet, HyperparamSet, Hyperparam]:
return self(path) | zensols.datdesc | /zensols.datdesc-0.1.0-py3-none-any.whl/zensols/datdesc/hyperparam.py | hyperparam.py |
__author__ = 'Paul Landes'
from typing import Tuple, Dict, List, Iterable, ClassVar
import logging
import re
import itertools as it
from pathlib import Path
from zensols.persist import persisted
logger = logging.getLogger(__name__)
class DynamicDataParser(object):
"""Parse a DDL/DML file meant also for prototyping.
For example the file::
-- meta=init_sections=create_tables,create_idx
-- name=create_idx
create index person_name on person(name);
-- name=create_tables
create table person (id int, name text, age int);
Would have ``create_idx`` and ``create_tables`` as sections and meta data::
{'init_sections':
'create_tables,create_idx'}
"""
COMMENT_PAT: ClassVar[re.Pattern] = re.compile(r'^--.*')
SEC_START_PAT: ClassVar[re.Pattern] = re.compile(
r'^-- name=([a-zA-Z0-9_]+)')
META_PAT: ClassVar[re.Pattern] = re.compile(
r'^-- meta=([a-zA-Z0-9_]+)=(.+)$')
def __init__(self, dd_path: Path):
"""Initialize.
:param dd_path: the path of the file to parse
"""
self.dd_path = dd_path
def _map_section_content(self, lines: List[str]) -> str:
return '\n'.join(lines)
@persisted('__parse')
def _parse(self) -> Tuple[Dict[str, str], Dict[str, str]]:
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsing {self.dd_path}')
secs: List[str, Tuple[str, List[str]]] = []
sec_content: List[str] = []
meta: Dict[str, str] = {}
with open(self.dd_path) as f:
line: str
for line in f.readlines():
line = line.rstrip()
if len(line) == 0:
continue
if re.match(self.COMMENT_PAT, line):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'matched comment: {line}')
sec_start = re.match(self.SEC_START_PAT, line)
meta_match = re.match(self.META_PAT, line)
sec_content = []
if sec_start is not None:
name = sec_start.group(1)
secs.append((name, sec_content))
elif meta_match is not None:
meta[meta_match.group(1)] = meta_match.group(2)
else:
sec_content.append(line)
sections = {x[0]: self._map_section_content(x[1]) for x in secs}
return sections, meta
@property
def sections(self) -> Dict[str, str]:
"""Return the sections of the file.
"""
return self._parse()[0]
@property
def metadata(self) -> Dict[str, str]:
"""Return the meta data found int he parse object.
"""
return self._parse()[1]
def get_init_db_sqls(self) -> Iterable[str]:
"""Return the set of statements that create all DB objects needed to fully
CRUD.
"""
init_secs = self.metadata['init_sections']
secs = init_secs.split(',')
entries = map(lambda x: self.sections[x], secs)
sts = map(lambda x: re.split(';[ \t\n]*', x, flags=re.MULTILINE),
entries)
return filter(lambda x: len(x) > 0, it.chain(*sts)) | zensols.db | /zensols.db-1.1.0-py3-none-any.whl/zensols/db/parse.py | parse.py |
__author__ = 'Paul Landes'
from typing import Any, Iterable, Tuple, Optional
from dataclasses import dataclass, field
from zensols.persist import Stash
from . import DBError, BeanDbPersister
@dataclass
class BeanStash(Stash):
"""A stash that uses a backing DB-API backed :class:`BeanDbPersister`.
"""
persister: BeanDbPersister = field()
"""The delegate bean persister."""
def load(self, name: str) -> Any:
return self.persister.get_by_id(int(name))
def exists(self, name: str) -> bool:
try:
name = int(name)
except ValueError:
# assume only number IDs
return False
return self.persister.exists(name)
def dump(self, name: str, inst: Any):
"""Since this implementation can let the database auto-increment the
unique/primary key, beware of "changing" keys.
:raises DBError: if the key changes after inserted it will raise a
``DBError``; for this reason, it's best to pass ``None`` as
``name``
"""
if name is not None:
id = int(name)
inst.id = id
else:
id = inst.id
if id is not None and self.exists(id):
self.persister.update(inst)
else:
self.persister.insert(inst)
if id is not None and inst.id != id:
raise DBError(f'unexpected key change: {inst.id} != {id}')
return inst
def delete(self, name: str):
self.persister.delete(int(name))
def keys(self) -> Iterable[str]:
return map(str, self.persister.get_keys())
def __len__(self) -> int:
return self.persister.get_count()
@dataclass
class AlternateKeyBeanStash(BeanStash):
"""A stash that uses another key rather than some unique primary key
(i.e. rowid for SQLite). It does this by looking up the alternate key in
some other column and resolves to the unique primary key.
The domain and range of the function (:meth:`_key_to_id`) that maps
alternate keys to unique primary keys ate strings.
.. document private functions
.. automethod:: _key_to_id
"""
key_to_id_name: str = field()
"""The select method SQL name that selects the unique priamry to the
alterante key.
"""
keys_name: str = field()
"""The select method SQL name that selects the alternate in :meth:`keys`."""
def _key_to_id(self, name: str) -> Optional[str]:
"""Maps alternate keys to unique primary keys.
:param name: the alternate key, which is usually a more client friendly
string
:return: the unique primary key in the database (usually an
:class:`int`)
"""
row: Tuple = self.persister.execute_singleton_by_name(
self.key_to_id_name, params=(name,),
row_factory='identity')
if row is not None:
return str(row[0])
def load(self, name: str) -> Any:
return super().load(self._key_to_id(name))
def exists(self, name: str) -> bool:
id: Optional[Any] = self._key_to_id(name)
return id is not None
def dump(self, name: str, inst: Any):
return super().dump(self._key_to_id(name), inst)
def delete(self, name: str):
return super().delete(self._key_to_id(name))
def keys(self) -> Iterable[str]:
return set(self.persister.execute_by_name(
self.keys_name, row_factory='identity',
map_fn=lambda r: r[0])) | zensols.db | /zensols.db-1.1.0-py3-none-any.whl/zensols/db/stash.py | stash.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Dict, Any, Tuple, Union, Callable, Iterable, Type, Optional
from dataclasses import dataclass, field, fields
from abc import abstractmethod, ABC
import logging
import traceback
from pathlib import Path
import pandas as pd
from zensols.persist import resource
from zensols.db import DynamicDataParser
logger = logging.getLogger(__name__)
class DBError(Exception):
""""Raised for all :mod:`zensols.db`` related errors.
"""
pass
class connection(resource):
"""Annotation used to create and dispose of DB-API connections.
"""
def __init__(self):
super().__init__('_create_connection', '_dispose_connection')
class _CursorIterator(object):
"""Iterates throw the rows of the database using a cursor.
"""
def __init__(self, mng: ConnectionManager, conn: Any, cursor: Any):
"""
:param mng: the connection manager to regulate database resources
:param conn: the connection to the database
:param cursor: the cursor to the database
"""
self._mng = mng
self._conn = conn
self._cursor = cursor
def __iter__(self) -> _CursorIterator:
return self
def __next__(self):
if self._cursor is None:
raise StopIteration
try:
return next(self._cursor)
except StopIteration:
try:
self.dispose()
finally:
raise StopIteration
def dispose(self):
if self._mng is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('closing cursor iterable')
self._mng._do_dispose_connection = True
self._cursor.close()
self._mng.dispose(self._conn)
self._mng = None
self._conn = None
self._cursor = None
class cursor(object):
"""Iterate through rows of a database. The connection is automatically
closed once out of scope.
Example::
config_factory: ConfigFactory = ...
persister: DbPersister = config_factory.instance('person_db_persister')
with cursor(persister, name='select_people') as c:
for row in c:
print(row)
"""
def __init__(self, persister: DbPersister, sql: str = None,
name: str = None, params: Tuple[Any, ...] = ()):
"""Initialize with either ``name`` or ``sql`` (only one should be
``None``).
:param persister: used to execute the SQL and obtain the cursor
:param sql: the string SQL to execute
:param name: the named SQL query in the :obj:`.DbPersister.sql_file`
:param params: the parameters given to the SQL statement (populated
with ``?``) in the statement
"""
self._curiter = persister._execute_iterate(
sql=sql,
name=name,
params=params)
def __enter__(self) -> Iterable[Any]:
return self._curiter
def __exit__(self, cls: Type[Exception], value: Optional[Exception],
trace: traceback):
self._curiter.dispose()
@dataclass
class ConnectionManager(ABC):
"""Instance DB-API connection lifecycle.
"""
def __post_init__(self):
self._do_dispose_connection = True
def register_persister(self, persister: DbPersister):
"""Register the persister used for this connection manager.
:param persister: the persister used for connection management
"""
self.persister = persister
@abstractmethod
def create(self) -> Any:
"""Create a connection to the database.
"""
pass
def dispose(self, conn):
"""Close the connection to the database.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'connection manager: closing {conn}')
if self._do_dispose_connection:
conn.close()
@abstractmethod
def drop(self):
"""Remove all objects from the database or the database itself.
For SQLite, this deletes the file. In database implementations, this
might drop all objects from the database. Regardless, it is expected
that ``create`` is able to recreate the database after this action.
"""
pass
def _to_dataframe(self, res: Iterable[Any], cursor: Any) -> pd.DataFrame:
"""Return a Pandas dataframe from the results given by the database.
:param res: the database results row by row
:param cursor: the database cursor object, which has a ``description``
attribute
"""
cols = tuple(map(lambda d: d[0], cursor.description))
return pd.DataFrame(res, columns=cols)
def execute(self, conn: Any, sql: str, params: Tuple[Any, ...],
row_factory: Union[str, Callable],
map_fn: Callable) -> Tuple[Union[dict, tuple, pd.DataFrame]]:
"""Execute SQL on a database connection.
The ``row_factory`` tells the method how to interpret the row data in
to an object that's returned. It can be one of:
* ``tuple``: tuples (the default)
* ``identity``: return the unmodified form from the database
* ``dict``: for dictionaries
* ``pandas``: for a :class:`pandas.DataFrame`
* otherwise: a function or class
Compare this with ``map_fn``, which transforms the data that's given to
the ``row_factory``.
:param conn: the connection object with the database
:param sql: the string SQL to execute
:param params: the parameters given to the SQL statement (populated
with ``?``) in the statement
:param row_factory: ``tuple``, ``dict``, ``pandas`` or a function
:param map_fn: a function that transforms row data given to the
``row_factory``
:see: :meth:`.DbPersister.execute`.
"""
def dict_row_factory(cursor: Any, row: Tuple[Any, ...]):
return dict(map(lambda x: (x[1][0], row[x[0]]),
enumerate(cursor.description)))
conn.row_factory = {
'dict': dict_row_factory,
'tuple': lambda cursor, row: row,
'identity': lambda cursor, row: row,
'pandas': None,
}.get(
row_factory,
lambda cursor, row: row_factory(*row)
)
cur: Any = conn.cursor()
try:
res = cur.execute(sql, params)
if map_fn is not None:
res = map(map_fn, res)
if row_factory == 'pandas':
res = self._to_dataframe(res, cur)
if conn.row_factory is not None:
res = tuple(res)
return res
finally:
cur.close()
def _create_cursor(self, conn: Any, sql: str,
params: Tuple[Any, ...]) -> Any:
"""Create a cursor object from connection ``conn``."""
cur: Any = conn.cursor()
cur.execute(sql, params)
return cur
def execute_no_read(self, conn: Any, sql: str,
params: Tuple[Any, ...]) -> int:
"""Return database level information such as row IDs rather than the
results of a query. Use this when inserting data to get a row ID.
:param conn: the connection object with the database
:param sql: the SQL statement used on the connection's cursor
:param params: the parameters given to the SQL statement (populated
with ``?``) in the statement
:see: :meth:`.DbPersister.execute_no_read`.
"""
cur = conn.cursor()
try:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sql: {sql}, params: {params}')
cur.execute(sql, params)
conn.commit()
return cur.lastrowid
finally:
cur.close()
def insert_rows(self, conn: Any, sql: str, rows: Iterable[Any],
errors: str, set_id_fn: Callable,
map_fn: Callable) -> int:
"""Insert a tuple of rows in the database and return the current row ID.
:param rows: a sequence of tuples of data (or an object to be
transformed, see ``map_fn`` in column order of the SQL
provided by the entry :obj:`insert_name`
:param errors: if this is the string ``raise`` then raise an error on
any exception when invoking the database execute
:param map_fn: if not ``None``, used to transform the given row in to a
tuple that is used for the insertion
See :meth:`.InsertableBeanDbPersister.insert_rows`.
"""
cur = conn.cursor()
try:
for row in rows:
if map_fn is not None:
org_row = row
row = map_fn(row)
if errors == 'raise':
cur.execute(sql, row)
elif errors == 'ignore':
try:
cur.execute(sql, row)
except Exception as e:
logger.error(f'could not insert row ({len(row)})', e)
else:
raise DBError(f'unknown errors value: {errors}')
if set_id_fn is not None:
set_id_fn(org_row, cur.lastrowid)
finally:
conn.commit()
cur.close()
return cur.lastrowid
@dataclass
class DbPersister(object):
"""CRUDs data to/from a DB-API connection.
"""
conn_manager: ConnectionManager = field()
"""Used to create DB-API connections."""
sql_file: Path = field(default=None)
"""The text file containing the SQL statements (see
:class:`DynamicDataParser`).
"""
row_factory: Union[str, Type] = field(default='tuple')
"""The default method by which data is returned from ``execute_*`` methods.
:see: :meth:`execute`.
"""
def __post_init__(self):
self.parser = self._create_parser(self.sql_file)
self.conn_manager.register_persister(self)
def _create_parser(self, sql_file: Path) -> DynamicDataParser:
return DynamicDataParser(sql_file)
@property
def sql_entries(self) -> Dict[str, str]:
"""Return a dictionary of names -> SQL statements from the SQL file.
"""
return self.parser.sections
@property
def metadata(self) -> Dict[str, str]:
"""Return the metadata associated with the SQL file.
"""
return self.parser.metadata
def _create_connection(self):
"""Create a connection to the database.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating connection')
return self.conn_manager.create()
def _dispose_connection(self, conn: Any):
"""Close the connection to the database.
:param conn: the connection to release
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'closing connection {conn}')
self.conn_manager.dispose(conn)
def _check_entry(self, name: str):
if name is None:
raise DBError('no defined SQL entry for persist function')
if len(name) == 0:
raise DBError('non-optional entry not provided')
if name not in self.sql_entries:
raise DBError(f"no entry '{name}' found in SQL configuration")
@connection()
def execute(self, conn: Any, sql: str, params: Tuple[Any, ...] = (),
row_factory: Union[str, Callable] = None,
map_fn: Callable = None) -> \
Tuple[Union[dict, tuple, pd.DataFrame]]:
"""Execute SQL on a database connection.
The ``row_factory`` tells the method how to interpret the row data in
to an object that's returned. It can be one of:
* ``tuple``: tuples (the default)
* ``dict``: for dictionaries
* ``pandas``: for a :class:`pandas.DataFrame`
* otherwise: a function or class
Compare this with ``map_fn``, which transforms the data that's given to
the ``row_factory``.
:param sql: the string SQL to execute
:param params: the parameters given to the SQL statement (populated
with ``?``) in the statement
:param row_factory: ``tuple``, ``dict``, ``pandas`` or a function
:param map_fn: a function that transforms row data given to the
``row_factory``
"""
row_factory = self.row_factory if row_factory is None else row_factory
return self.conn_manager.execute(
conn, sql, params, row_factory, map_fn)
def execute_by_name(self, name: str, params: Tuple[Any] = (),
row_factory: Union[str, Callable] = None,
map_fn: Callable = None):
"""Just like :meth:`execute` but look up the SQL statement to execute on
the database connection.
The ``row_factory`` tells the method how to interpret the row data in
to an object that's returned. It can be one of:
* ``tuple``: tuples (the default)
* ``dict``: for dictionaries
* ``pandas``: for a :class:`pandas.DataFrame`
* otherwise: a function or class
Compare this with ``map_fn``, which transforms the data that's given to
the ``row_factory``.
:param name: the named SQL query in the :obj:`sql_file`
:param params: the parameters given to the SQL statement (populated
with ``?``) in the statement
:param row_factory: ``tuple``, ``dict``, ``pandas`` or a function
:param map_fn: a function that transforms row data given to the
``row_factory``
:see: :meth:`execute`
"""
self._check_entry(name)
sql = self.sql_entries[name]
return self.execute(sql, params, row_factory, map_fn)
@connection()
def _execute_iterate(self, conn: Any, sql: str, name: str,
params: Tuple[Any, ...]):
if sql is None and name is None:
raise DBError('Both sql string and name can not be None')
if sql is None:
self._check_entry(name)
sql = self.sql_entries[name]
cur = self.conn_manager._create_cursor(conn, sql, params)
self.conn_manager._do_dispose_connection = False
return _CursorIterator(self.conn_manager, conn, cur)
def execute_singleton_by_name(self, *args, **kwargs):
"""Just like :meth:`execute_by_name` except return only the first item or
``None`` if no results.
"""
res = self.execute_by_name(*args, **kwargs)
if len(res) > 0:
return res[0]
@connection()
def execute_sql_no_read(self, conn: Any, sql: str,
params: Tuple[Any] = ()) -> int:
"""Execute SQL and return the database level information such as row IDs
rather than the results of a query. Use this when inserting data to get
a row ID.
"""
return self.conn_manager.execute_no_read(conn, sql, params)
@connection()
def execute_no_read(self, conn: Any, entry_name: str,
params: Tuple[Any] = ()) -> int:
"""Just like :meth:`execute_by_name`, but return database level
information such as row IDs rather than the results of a query. Use
this when inserting data to get a row ID.
:param entry_name: the key in the SQL file whose value is used as the
statement
:param capture_rowid: if ``True``, return the last row ID from the
cursor
:see: :meth:`execute_sql_no_read`
"""
self._check_entry(entry_name)
sql = self.sql_entries[entry_name]
return self.conn_manager.execute_no_read(conn, sql, params)
@dataclass
class Bean(ABC):
"""A container class like a Java *bean*.
"""
def get_attr_names(self) -> Tuple[str]:
"""Return a list of string attribute names.
"""
return tuple(map(lambda f: f.name, fields(self)))
def get_attrs(self) -> Dict[str, Any]:
"""Return a dict of attributes that are meant to be persisted.
"""
return {n: getattr(self, n) for n in self.get_attr_names()}
def get_row(self) -> Tuple[Any]:
"""Return a row of data meant to be printed. This includes the unique ID of
the bean (see :meth:`get_insert_row`).
"""
return tuple(map(lambda x: getattr(self, x), self.get_attr_names()))
def get_insert_row(self) -> Tuple[Any]:
"""Return a row of data meant to be inserted into the database. This method
implementation leaves off the first attriubte assuming it contains a
unique (i.e. row ID) of the object. See :meth:`get_row`.
"""
names = self.get_attr_names()
return tuple(map(lambda x: getattr(self, x), names[1:]))
def __eq__(self, other):
if other is None:
return False
if self is other:
return True
if self.__class__ != other.__class__:
return False
for n in self.get_attr_names():
if getattr(self, n) != getattr(other, n):
return False
return True
def __hash__(self):
vals = tuple(map(lambda n: getattr(self, n), self.get_attr_names()))
return hash(vals)
def __str__(self):
return ', '.join(map(lambda x: f'{x}: {getattr(self, x)}',
self.get_attr_names()))
def __repr__(self):
return self.__str__()
@dataclass
class ReadOnlyBeanDbPersister(DbPersister):
"""A read-only persister that CRUDs data based on predefined SQL given in the
configuration. The class optionally works with instances of :class:`.Bean`
when :obj:`row_factory` is set to the target bean class.
"""
select_name: str = field(default=None)
"""The name of the SQL entry used to select data/class."""
select_by_id_name: str = field(default=None)
"""The name of the SQL entry used to select a single row by unique ID."""
select_exists_name: str = field(default=None)
"""The name of the SQL entry used to determine if a row exists by unique
ID.
"""
def get(self) -> list:
"""Return using the SQL provided by the entry identified by :obj:`select_name`.
"""
return self.execute_by_name(
self.select_name, row_factory=self.row_factory)
def get_by_id(self, id: int):
"""Return an object using it's unique ID, which is could be the row ID in
SQLite.
"""
rows = self.execute_by_name(
self.select_by_id_name, params=(id,), row_factory=self.row_factory)
if len(rows) > 0:
return rows[0]
def exists(self, id: int) -> bool:
"""Return ``True`` if there is a object with unique ID (or row ID) in the
database. Otherwise return ``False``.
"""
if self.select_exists_name is None:
return self.get_by_id(id) is not None
else:
cnt = self.execute_by_name(
self.select_exists_name, params=(id,), row_factory='tuple')
return cnt[0][0] == 1
@dataclass
class InsertableBeanDbPersister(ReadOnlyBeanDbPersister):
"""A class that contains insert funtionality.
"""
insert_name: str = field(default=None)
"""The name of the SQL entry used to insert data/class instance."""
def insert_row(self, *row) -> int:
"""Insert a row in the database and return the current row ID.
:param row: a sequence of data in column order of the SQL provided by
the entry :obj:`insert_name`
"""
return self.execute_no_read(self.insert_name, params=row)
@connection()
def insert_rows(self, conn: Any, rows: Iterable[Any], errors='raise',
set_id_fn: Callable = None,
map_fn: Callable = None) -> int:
"""Insert a tuple of rows in the database and return the current row ID.
:param rows: a sequence of tuples of data (or an object to be
transformed, see ``map_fn`` in column order of the SQL
provided by the entry :obj:`insert_name`
:param errors: if this is the string ``raise`` then raise an error on
any exception when invoking the database execute
:param map_fn: if not ``None``, used to transform the given row in to a
tuple that is used for the insertion
"""
entry_name = self.insert_name
self._check_entry(entry_name)
sql = self.sql_entries[entry_name]
return self.conn_manager.insert_rows(
conn, sql, rows, errors, set_id_fn, map_fn)
def _get_insert_row(self, bean: Bean) -> Tuple[Any]:
"""Factory method to return the bean's insert row parameters."""
return bean.get_insert_row()
def insert(self, bean: Bean) -> int:
"""Insert a bean using the order of the values given in
:meth:`Bean.get_insert_row` as that of the SQL defined with entry
:obj:`insert_name` given in the initializer.
"""
row = self._get_insert_row(bean)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'inserting row: {row}')
curid = self.insert_row(*row)
bean.id = curid
return curid
def insert_beans(self, beans: Iterable[Any], errors: str = 'raise') -> int:
"""Insert a bean using the order of the values given in
:meth:`Bean.get_insert_row` as that of the SQL defined with entry
:obj:`insert_name` given in the initializer.
"""
def map_fn(bean):
return self._get_insert_row(bean)
def set_id_fn(bean, id):
pass
return self.insert_rows(beans, errors, set_id_fn, map_fn)
@dataclass
class UpdatableBeanDbPersister(InsertableBeanDbPersister):
"""A class that contains the remaining CRUD funtionality the super class
doesn't have.
"""
update_name: str = field(default=None)
"""The name of the SQL entry used to update data/class instance(s)."""
delete_name: str = field(default=None)
"""The name of the SQL entry used to delete data/class instance(s)."""
def update_row(self, *row: Tuple[Any]) -> int:
"""Update a row using the values of the row with the current unique ID as the
first element in ``*rows``.
"""
where_row = (*row[1:], row[0])
return self.execute_no_read(self.update_name, params=where_row)
def update(self, bean: Bean) -> int:
"""Update a a bean that using the ``id`` attribute and its attributes as
values.
"""
return self.update_row(*bean.get_row())
def delete(self, id) -> int:
"""Delete a row by ID.
"""
return self.execute_no_read(self.delete_name, params=(id,))
@dataclass
class BeanDbPersister(UpdatableBeanDbPersister):
"""A class that contains the remaining CRUD funtionality the super class
doesn't have.
"""
keys_name: str = field(default=None)
"""The name of the SQL entry used to fetch all keys."""
count_name: str = field(default=None)
"""The name of the SQL entry used to get a row count."""
def get_keys(self) -> Iterable[Any]:
"""Return the unique keys from the bean table.
"""
keys = self.execute_by_name(self.keys_name, row_factory='tuple')
return map(lambda x: x[0], keys)
def get_count(self) -> int:
"""Return the number of rows in the bean table.
"""
if self.count_name is not None:
cnt = self.execute_by_name(self.count_name, row_factory='tuple')
return cnt[0][0]
else:
# SQLite has a bug that returns one row with all null values
return sum(1 for _ in self.get_keys()) | zensols.db | /zensols.db-1.1.0-py3-none-any.whl/zensols/db/bean.py | bean.py |
__author__ = 'Paul Landes'
from typing import Type, List, Tuple, Any, ClassVar
from dataclasses import dataclass, field, fields
import dataclasses
import logging
from string import Template
from pathlib import Path
from . import DynamicDataParser, BeanDbPersister, Bean
logger = logging.getLogger(__name__)
class DataClassDynamicDataParser(DynamicDataParser):
"""An SQL data parser that replaces ``${cols}`` in the SQL file with the
:class:`dataclasses.dataclass` fields.
:see: :class:`.DataClassDbPersister`
"""
ID_FIELD: ClassVar[str] = 'id'
"""The name of the column that has the unique identifier of the row/object.
"""
def __init__(self, dd_path: Path, bean_class: Type):
super().__init__(dd_path)
if not dataclasses.is_dataclass(bean_class):
raise ValueError(f'not a dataclass: {bean_class}')
cols = map(lambda f: f.name, dataclasses.fields(bean_class))
cols = ', '.join(filter(lambda c: c != self.ID_FIELD, cols))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'cols: {cols}')
self.context = {'cols': cols}
def _map_section_content(self, lines: List[str]) -> str:
content: str = super()._map_section_content(lines)
templ = Template(content)
return templ.substitute(self.context)
@dataclass
class DataClassDbPersister(BeanDbPersister):
"""Persists instances of :class:`dataclasses.dataclass` by narrowing the
columns from select statements. Instead of ``select *``, use ``select
${cols}`` in the SQL resource file.
:see: :class:`.DataClassDynamicDataParser`
"""
bean_class: Type[dataclass] = field(default=None)
"""The data class that is CRUD'd for DB operations."""
def __post_init__(self):
self.row_factory = self.bean_class
super().__post_init__()
def _create_parser(self, sql_file: Path) -> DynamicDataParser:
return DataClassDynamicDataParser(sql_file, self.bean_class)
def _get_insert_row(self, bean: Bean) -> Tuple[Any]:
idf = DataClassDynamicDataParser.ID_FIELD
return tuple(map(lambda f: getattr(bean, f.name),
filter(lambda f: f.name != idf, fields(bean)))) | zensols.db | /zensols.db-1.1.0-py3-none-any.whl/zensols/db/dataclass.py | dataclass.py |
__author__ = 'Paul Landes'
import logging
from dataclasses import dataclass, field
from pathlib import Path
import sqlite3
from zensols.db import DBError, ConnectionManager
logger = logging.getLogger(__name__)
@dataclass
class SqliteConnectionManager(ConnectionManager):
"""An SQLite connection factory.
"""
db_file: Path = field()
"""The SQLite database file to read or create."""
create_db: bool = field(default=True)
"""If ``True``, create the database if it does not already exist. Otherwise,
:class:`.DBError` is raised (see :meth:`create`).
"""
def create(self) -> sqlite3.Connection:
"""Create a connection by accessing the SQLite file.
:raise DBError: if the SQLite file does not exist (caveat see
:`obj:create_db`)
"""
db_file = self.db_file
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating connection to {db_file}')
created = False
if not db_file.exists():
if not self.create_db:
raise DBError(f'database file {db_file} does not exist')
if not db_file.parent.exists():
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating sql db directory {db_file.parent}')
db_file.parent.mkdir(parents=True)
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating sqlite db file: {db_file}')
created = True
types = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
conn = sqlite3.connect(str(db_file.absolute()), detect_types=types)
if created:
logger.info('initializing database...')
for sql in self.persister.parser.get_init_db_sqls():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'invoking sql: {sql}')
conn.execute(sql)
conn.commit()
return conn
def drop(self):
"""Delete the SQLite database file from the file system.
"""
logger.info(f'deleting: {self.db_file}')
if self.db_file.exists():
self.db_file.unlink()
return True
return False | zensols.db | /zensols.db-1.1.0-py3-none-any.whl/zensols/db/sqlite.py | sqlite.py |
__author__ = 'Paul Landes'
from typing import Any, Tuple, Union, Callable
from dataclasses import dataclass, field
import logging
import psycopg2
from psycopg2.extras import RealDictCursor
from psycopg2 import ProgrammingError
import pandas as pd
from zensols.db import DBError, ConnectionManager
logger = logging.getLogger(__name__)
@dataclass
class PostgresConnectionManager(ConnectionManager):
"""An Postgres connection factory.
"""
EXISTS_SQL = 'select count(*) from information_schema.tables where table_schema = \'public\''
DROP_SQL = 'drop owned by {user}'
db_name: str = field()
"""Database name on the server."""
host: str = field()
"""The host name of the database."""
port: str = field()
"""The host port of the database."""
user: str = field()
"""The user (if any) to log in with."""
password: str = field()
"""The login password."""
create_db: bool = field(default=True)
"""If ``True`` create the database if it does not already exist."""
capture_lastrowid: bool = field(default=False)
"""If ``True``, select the last row for each query."""
fast_insert: bool = field(default=False)
"""If ``True`` use `insertmany` on the cursor for fast insert in to the
database.
"""
def _init_db(self, conn, cur):
if logger.isEnabledFor(logging.DEBUG):
logger.info('initializing database...')
for sql in self.persister.parser.get_init_db_sqls():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'invoking sql: {sql}')
cur.execute(sql)
conn.commit()
def create(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating connection to {self.host}:{self.port} ' +
f'with {self.user} on database: {self.db_name}')
conn = psycopg2.connect(
host=self.host, database=self.db_name, port=self.port,
user=self.user, password=self.password)
try:
cur = conn.cursor()
cur.execute(self.EXISTS_SQL, ())
if cur.fetchone()[0] == 0:
self._init_db(conn, cur)
finally:
cur.close()
return conn
def drop(self):
conn = self.create()
cur = conn.cursor()
try:
cur.execute(self.DROP_SQL.format(**self.__dict__))
conn.commit()
finally:
cur.close()
conn.close()
def execute(self, conn: Any, sql: str, params: Tuple[Any],
row_factory: Union[str, Callable],
map_fn: Callable) -> Tuple[Union[dict, tuple, pd.DataFrame]]:
"""See :meth:`~zensols.db.bean.ConnectionManager.execute`.
"""
def other_rf_fn(row):
return row_factory(*row)
def identity_rf_fn(row):
return row
create_fn = None
if row_factory == 'dict':
cur = conn.cursor(cursor_factory=RealDictCursor)
elif row_factory == 'tuple' or row_factory == 'pandas':
cur = conn.cursor()
elif row_factory == 'identity':
create_fn = identity_rf_fn
cur = conn.cursor()
else:
create_fn = other_rf_fn
cur = conn.cursor()
try:
tupify = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'pg exec sql=<{sql}>, params=<{params}>')
cur.execute(sql, params)
res = cur.fetchall()
if create_fn is not None:
res = map(create_fn, res)
if map_fn is not None:
res = map(map_fn, res)
if row_factory == 'pandas':
res = self._to_dataframe(res, cur)
tupify = False
if tupify:
res = tuple(res)
return res
finally:
cur.close()
def execute_no_read(self, conn, sql, params=()) -> int:
cur = conn.cursor()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'execute no read: {sql}')
try:
cur.execute(sql, params)
conn.commit()
if self.capture_lastrowid is not None:
try:
return cur.fetchone()[0]
except ProgrammingError:
# actions like dropping a table will not return a rowid
pass
finally:
cur.close()
def _insert_row(self, conn, cur, sql, row):
cur.execute(sql, row)
conn.commit()
if self.capture_lastrowid:
return cur.fetchall()[0][0]
def _insert_rows_slow(self, conn, sql, rows: list, errors: str,
set_id_fn, map_fn) -> int:
rowid = None
cur = conn.cursor()
try:
for row in rows:
if map_fn is not None:
org_row = row
row = map_fn(row)
if errors == 'raise':
rowid = self._insert_row(conn, cur, sql, row)
elif errors == 'ignore':
try:
rowid = self._insert_row(conn, cur, sql, row)
except Exception as e:
logger.error(f'could not insert row ({len(row)})', e)
else:
raise DBError(f'Unknown errors value: {errors}')
if set_id_fn is not None:
set_id_fn(org_row, cur.lastrowid)
finally:
cur.close()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'inserted with rowid: {rowid}')
return rowid
def _insert_rows_fast(self, conn, sql, rows: list, map_fn) -> int:
cur = conn.cursor()
if logger.isEnabledFor(logging.DEBUG):
logger.debug('inserting rows fast')
try:
if map_fn is not None:
rows = map(map_fn, rows)
cur.executemany(sql, rows)
conn.commit()
finally:
cur.close()
def insert_rows(self, conn, sql, rows: list, errors: str,
set_id_fn, map_fn) -> int:
if self.fast_insert:
return self._insert_rows_fast(conn, sql, rows, map_fn)
else:
return self._insert_rows_slow(
conn, sql, rows, errors, set_id_fn, map_fn) | zensols.dbpg | /zensols.dbpg-1.1.0-py3-none-any.whl/zensols/dbpg/postgres.py | postgres.py |
__author__ = 'Paul Landes'
from typing import Dict, List, Tuple, Union, Any
from dataclasses import dataclass, field
import logging
import numpy as np
from sklearn.preprocessing import normalize, StandardScaler
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.manifold import TSNE
from zensols.util import APIError
from zensols.config import Dictable
from zensols.persist import persisted
logger = logging.getLogger(__name__)
@dataclass
class DimensionReducer(Dictable):
"""Reduce the dimensionality of a dataset.
"""
_DICTABLE_ATTRIBUTES = {'n_points'}
data: np.ndarray = field(repr=False)
"""The data that will be dimensionally reduced."""
dim: int = field()
"""The lowered dimension spaace."""
reduction_meth: str = field(default='pca')
"""One of ``pca``, ``svd``, or ``tsne``."""
normalize: str = field(default='unit')
"""One of:
* ``unit``: normalize to unit vectors
* ``standardize``: standardize by removing the mean and scaling to unit
variance
* ``None``: make no modifications to the data
"""
model_args: Dict[str, Any] = field(default_factory=dict)
"""Additional kwargs to pass to the model initializer."""
def _normalize(self, data: np.ndarray) -> np.ndarray:
if self.normalize == 'standarize':
x = StandardScaler().fit_transform(data)
elif self.normalize == 'unit':
x = normalize(data)
return x
@persisted('_dim_reduced')
def _dim_reduce(self) -> np.ndarray:
model = None
data = self.data
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using {self.reduction_meth} ({self.dim}) ' +
f'on {data.shape}')
if self.normalize:
if self.normalize == 'standardize':
data = StandardScaler().fit_transform(data)
elif self.normalize == 'unit':
data = normalize(data)
else:
raise APIError(
f'Unknown normalization method: {self.normalize}')
if self.reduction_meth == 'pca':
model = PCA(self.dim, **self.model_args)
data = model.fit_transform(data)
elif self.reduction_meth == 'svd':
model = TruncatedSVD(self.dim, **self.model_args)
data = model.fit_transform(data)
elif self.reduction_meth == 'tsne':
if data.shape[-1] > 50:
data = PCA(50).fit_transform(data)
params = dict(init='pca', learning_rate='auto')
params.update(self.model_args)
model = TSNE(self.dim, **params)
data = model.fit_transform(data)
else:
raise APIError('Unknown dimension reduction method: ' +
self.reduction_meth)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'reduced shape: {data.shape}')
return data, model
@property
def n_points(self) -> Tuple[int]:
return self.data.shape[0]
@property
@persisted('_reduced')
def reduced(self) -> np.ndarray:
return self._dim_reduce()[0]
@property
def model(self) -> Union[PCA, TruncatedSVD, TSNE]:
return self._dim_reduce()[1]
def _get_reduced_data(self, data: np.ndarray) -> np.ndarray:
data: np.ndarray = self.reduced if data is None else data
if data.shape[-1] != self.data.shape[-1]:
X = self.model.inverse_transform(data)
else:
X: np.ndarray = data
return X
@dataclass
class DecomposeDimensionReducer(DimensionReducer):
"""A dimensionality reducer that uses eigenvector decomposition such as PCA or
SVD.
"""
_DICTABLE_ATTRIBUTES = DimensionReducer._DICTABLE_ATTRIBUTES | \
{'description'}
def __post_init__(self):
assert self.is_decompose_method(self.reduction_meth)
@staticmethod
def is_decompose_method(reduction_meth: str) -> bool:
"""Return whether the reduction is a decomposition method.
:see: :obj:`reduction_meth`
"""
return reduction_meth == 'pca' or reduction_meth == 'svd'
def get_components(self, data: np.ndarray = None,
one_dir: bool = True) -> Tuple[np.ndarray, np.ndarray]:
"""Create a start and end points that make the PCA component, which is useful
for rendering lines for visualization.
:param: use in place of the :obj:`data` for component calculation using
the (already) trained model
:param one_dir: whether or not to create components one way from the
mean, or two way (forward and backward) from the mean
:return: a tuple of numpy arrays, each as a start and end stacked for
each component
"""
comps: List[np.ndarray] = []
X = self._get_reduced_data(data)
# fit a covariance matrix on the data
cov_matrix: np.ndarray = np.cov(X.T)
# find the center from where the PCA component starts
trans_mean: np.ndarray = data.mean(axis=0)
# the components of the model are the eigenvectors of the covarience
# matrix
evecs: np.ndarray = self.model.components_
# the eigenvalues of the covariance matrix
evs: np.ndarray = self.model.explained_variance_
for n_comp, (eigenvector, eigenvalue) in enumerate(zip(evecs, evs)):
# map a data point as a component back to the original data space
end: np.ndarray = np.dot(cov_matrix, eigenvector) / eigenvalue
# map to the reduced dimensional space
end = self.model.transform([end])[0]
start = trans_mean
if not one_dir:
# make the component "double sided"
start = start - end
comps.append(np.stack((start, end)))
return comps
@property
def description(self) -> Dict[str, Any]:
"""A object graph of data that describes the results of the model."""
tot_ev = 0
model = self.model
evs = []
for i, ev in enumerate(model.explained_variance_ratio_):
evs.append(ev)
tot_ev += ev
noise: float = None
if hasattr(model, 'noise_variance_'):
noise = model.noise_variance_
return {'components': len(model.components_),
'noise': noise,
'total_variance': tot_ev,
'explained_varainces': evs} | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataset/dimreduce.py | dimreduce.py |
__author__ = 'Paul Landes'
import sys
import logging
from typing import Iterable, Dict, Set, Callable, Tuple, Any, List
from dataclasses import dataclass, field
from itertools import chain
from collections import OrderedDict
from io import TextIOBase
from zensols.util import time
from zensols.config import Writable
from zensols.persist import (
PersistedWork,
PersistableContainer,
persisted,
Stash,
DelegateStash,
PreemptiveStash,
)
from zensols.dataset import SplitStashContainer, SplitKeyContainer
from . import DatasetError
logger = logging.getLogger(__name__)
@dataclass
class DatasetSplitStash(DelegateStash, SplitStashContainer,
PersistableContainer, Writable):
"""A default implementation of :class:`.SplitStashContainer`. However, it
needs an instance of a :class:`.SplitKeyContainer`. This implementation
generates a separate stash instance for each data set split (i.e. ``train``
vs ``test``). Each split instance holds the data (keys and values) for
each split.
Stash instances by split are obtained with ``splits``, and will have
a ``split`` attribute that give the name of the split.
To maintain reproducibility, key ordering must be considered (see
:class:`.SortedDatasetSplitStash`).
:see: :meth:`.SplitStashContainer.splits`
"""
split_container: SplitKeyContainer = field()
"""The instance that provides the splits in the dataset."""
def __post_init__(self):
super().__post_init__()
PersistableContainer.__init__(self)
if not isinstance(self.split_container, SplitKeyContainer):
raise DatasetError('Expecting type SplitKeyContainer but ' +
f'got: {type(self.split_container)}')
self._inst_split_name = None
self._keys_by_split = PersistedWork('_keys_by_split', self)
self._splits = PersistedWork('_splits', self)
def _add_keys(self, split_name: str, to_populate: Dict[str, str],
keys: List[str]):
to_populate[split_name] = tuple(keys)
@persisted('_keys_by_split')
def _get_keys_by_split(self) -> Dict[str, Tuple[str]]:
"""Return keys by split type (i.e. ``train`` vs ``test``) for only those keys
available by the delegate backing stash.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating in memory available keys data structure')
with time('created key data structures', logging.DEBUG):
delegate_keys = set(self.delegate.keys())
avail_kbs = OrderedDict()
for split, keys in self.split_container.keys_by_split.items():
ks = list()
for k in keys:
if k in delegate_keys:
ks.append(k)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{split} has {len(ks)} keys')
self._add_keys(split, avail_kbs, ks)
return avail_kbs
def _get_counts_by_key(self) -> Dict[str, int]:
return dict(map(lambda i: (i[0], len(i[1])),
self.keys_by_split.items()))
def check_key_consistent(self) -> bool:
"""Return if the :obj:`split_container` have the same key count divisiion as
this stash's split counts.
"""
return self.counts_by_key == self.split_container.counts_by_key
def keys(self) -> Iterable[str]:
self.prime()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'keys for {self.split_name}')
kbs = self.keys_by_split
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'obtained keys for {self.split_name}')
if self.split_name is None:
return chain.from_iterable(kbs.values())
else:
return kbs[self.split_name]
def exists(self, name: str) -> bool:
if self.split_name is None:
return super().exists(name)
else:
return name in self.keys_by_split[self.split_name]
def load(self, name: str) -> Any:
if self.split_name is None or \
name in self.keys_by_split[self.split_name]:
return super().load(name)
def get(self, name: str, default: Any = None) -> Any:
if self.split_name is None or \
name in self.keys_by_split[self.split_name]:
return super().get(name)
return default
def prime(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('priming ds split stash')
super().prime()
self.keys_by_split
def _delegate_has_data(self):
return not isinstance(self.delegate, PreemptiveStash) or \
self.delegate.has_data
def deallocate(self):
if id(self.delegate) != id(self.split_container):
self._try_deallocate(self.delegate)
self._try_deallocate(self.split_container)
self._keys_by_split.deallocate()
if self._splits.is_set():
splits = tuple(self._splits().values())
self._splits.clear()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocating: {len(splits)} stash data splits')
for v in splits:
self._try_deallocate(v, recursive=True)
self._splits.deallocate()
super().deallocate()
def clear_keys(self):
"""Clear any cache state for keys, and keys by split. It does this by clearing
the key state for stash, and then the :meth:`clear` of the
:obj:`split_container`.
"""
self.split_container.clear()
self._keys_by_split.clear()
def clear(self):
"""Clear and destory key and delegate data.
"""
del_has_data = self._delegate_has_data()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'clearing: {del_has_data}')
if del_has_data:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('clearing delegate and split container')
super().clear()
self.clear_keys()
def _get_split_names(self) -> Set[str]:
return self.split_container.split_names
def _get_split_name(self) -> str:
return self._inst_split_name
@persisted('_splits')
def _get_splits(self) -> Dict[str, Stash]:
"""Return an instance of ta stash that contains only the data for a split.
:param split: the name of the split of the instance to get
(i.e. ``train``, ``test``).
"""
self.prime()
stashes = OrderedDict()
for split_name in self.split_names:
clone = self.__class__(
delegate=self.delegate, split_container=self.split_container)
clone._keys_by_split.deallocate()
clone._splits.deallocate()
clone.__dict__.update(self.__dict__)
clone._inst_split_name = split_name
stashes[split_name] = clone
return stashes
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_delegate: bool = False):
self._write_line('split stash splits:', depth, writer)
t = 0
for ks in self.split_container.keys_by_split.values():
t += len(ks)
for k, ks in self.split_container.keys_by_split.items():
ln = len(ks)
self._write_line(f'{k}: {ln} ({ln/t*100:.1f}%)',
depth + 1, writer)
self._write_line(f'total: {t}', depth + 1, writer)
ckc = self.check_key_consistent()
self._write_line(f'total this instance: {len(self)}', depth, writer)
self._write_line(f'keys consistent: {ckc}', depth, writer)
if include_delegate and isinstance(self.delegate, Writable):
self._write_line('delegate:', depth, writer)
self.delegate.write(depth + 1, writer)
@dataclass
class SortedDatasetSplitStash(DatasetSplitStash):
"""A sorted version of a :class:`DatasetSplitStash`, where keys, values, items
and iterations are sorted by key. This is important for reproducibility of
results.
An alternative is to use :class:`.DatasetSplitStash` with an instance of
:class:`.StashSplitKeyContainer` set as the :obj:`delegate` since the key
container keeps key ordering consistent.
Any shuffling of the dataset, for the sake of training on non-uniform data,
needs to come *before* using this class. This class also sorts the keys in
each split given in :obj:`splits`.
"""
ATTR_EXP_META = ('sort_function',)
sort_function: Callable = field(default=None)
"""A function, such as ``int``, used to sort keys per data set split."""
def __iter__(self):
return map(lambda x: (x, self.__getitem__(x),), self.keys())
def values(self) -> Iterable[Any]:
return map(lambda k: self.__getitem__(k), self.keys())
def items(self) -> Tuple[str, Any]:
return map(lambda k: (k, self.__getitem__(k)), self.keys())
def _add_keys(self, split_name: str, to_populate: Dict[str, str],
keys: List[str]):
to_populate[split_name] = tuple(sorted(keys, key=self.sort_function))
def keys(self) -> Iterable[str]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sort function: {self.sort_function} ' +
f'({self.sort_function})')
keys = super().keys()
if self.sort_function is None:
keys = sorted(keys)
else:
keys = sorted(keys, key=self.sort_function)
return keys | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataset/stash.py | stash.py |
__author__ = 'Paul Landes'
from typing import List, Union, Iterable
from dataclasses import dataclass, field
import itertools as it
import pandas as pd
import numpy as np
from sklearn.covariance import MinCovDet
from scipy import stats
from zensols.persist import persisted
from . import DatasetError
@dataclass
class OutlierDetector(object):
"""Simple outlier detection utility that provides a few differnt methods of
calculation. These include :meth:`z-score`, :meth:`mahalanobis` and
:meth:`robust_mahalanobis`.
This class removes either using a method specific :obj:`threshold` or by a
:obj:`proportion` of the data set.
"""
DETECTION_METHODS = frozenset({
'z_score', 'mahalanobis', 'robust_mahalanobis'})
data: Union[np.ndarray, pd.DataFrame] = field()
"""The dataframe on which to find outliers given the data. Data points are
rows and the feature vectors are columns.
"""
default_method: str = field(default='mahalanobis')
"""The method used when invoking as a :class:`.Callable` with the
:meth:`__call__` method. This must be one of :obj:`DETECTION_METHODS`.
"""
threshold: float = field(default=None)
"""The outlier threshold, which is method dependent. This is ignored if
:obj:`proportion` is set.
"""
proportion: float = field(default=None)
"""The proportion of the dataset to use for outliers. The higher the number
the more outliers.
:see: :obj:`threshold`
"""
return_indicators: bool = field(default=None)
"""Whether to return a list of ``False`` (not outlier) or ``True`` (outlier)
instead of indexes in to the input matrix/dataframe (:obj:`data`).
"""
def __post_init__(self):
if self.default_method not in self.DETECTION_METHODS:
raise DatasetError(
f'No such detection method: {self.default_method}')
@property
@persisted('_numpy')
def numpy(self) -> np.ndarray:
"""The numpy form of :obj:`data`. If :obj:`data` is a dataframe, it is
converted to a numpy array.
"""
return self._get_arr()
def _get_arr(self) -> np.ndarray:
data = self.data
if isinstance(data, pd.DataFrame):
data = self.data.to_numpy()
return data
def _to_indicators(self, indicies: np.ndarray) -> np.ndarray:
"""Convert row indexes in to a mask usable in :meth:`numpy.where`.
:param indicies: row indexes in to :obj:`numpy`
"""
# shape: (R, C)
arr: np.ndarray = self.numpy
mask: np.ndarray = np.repeat(False, arr.shape[0])
for oix in indicies:
mask[oix] = True
return mask
def _select_indicies(self, dists: Iterable[Union[int, float]],
threshold: Union[int, float]) -> np.ndarray:
"""Find outliers."""
if self.proportion is None:
threshold = threshold if self.threshold is None else self.threshold
outliers: List[int] = []
for i, v in enumerate(dists):
if v > threshold:
outliers.append(i)
else:
drs = sorted(zip(dists, it.count()), key=lambda x: x[0])
take = 1 - int(self.proportion * len(drs))
outliers = sorted(map(lambda x: x[1], drs[take:]))
if self.return_indicators:
outliers = self._to_indicators(outliers)
return outliers
def z_score(self, column: Union[int, str]) -> np.ndarray:
"""Use a Z-score to detect anomolies.
:param column: the column to use for the z-score analysis.
:param threshold: the threshold above which a data point is considered
an outlier
:return: indexes in to :obj:`data` rows (indexes of a dataframe) of the
outliers
"""
if isinstance(column, str):
if not isinstance(self.data, pd.DataFrame):
raise DatasetError(
'Can not index numpy arrays as string column: {column}')
column = self.data.columns.get_loc(column)
# shape: (R, C)
arr: np.ndarray = self.numpy
z = np.abs(stats.zscore(arr[:, column]))
return self._select_indicies(z, 3.)
def _set_chi_threshold(self, sig: float) -> float:
# shape: (R, C)
arr: np.ndarray = self.numpy
# degrees of freedom (df parameter) are number of variables
C = np.sqrt(stats.chi2.ppf((1. - sig), df=arr.shape[1]))
return C
def mahalanobis(self, significance: float = 0.001) -> np.ndarray:
"""Detect outliers using the Mahalanbis distince in high dimension.
Assuming a multivariate normal distribution of the data with K
variables, the Mahalanobis distance follows a chi-squared distribution
with K degrees of freedom. For this reason, the cut-off is defined by
the square root of the Chi^2 percent pointwise function.
:param significance: 1 - the Chi^2 percent point function (inverse of
cdf / percentiles) outlier threshold; reasonable
values include 2.5%, 1%, 0.01%); if `None` use
:obj:`threshold` or :obj:`proportion`
:return: indexes in to :obj:`data` rows (indexes of a dataframe) of the
outliers
"""
# shape: (R, C)
arr: np.ndarray = self.numpy
# M-Distance, shape: (R,)
x_minus_mu: pd.DataFrame = arr - np.mean(arr, axis=0)
# covariance, shape: (C, C)
cov: np.ndarray = np.cov(arr.T)
# inverse covariance, shape: (C, C)
inv_cov: np.ndarray = np.linalg.inv(cov)
# shape: (R, C)
left_term: np.ndarray = np.dot(x_minus_mu, inv_cov)
# shape: (R, R)
dist: np.ndarray = np.dot(left_term, x_minus_mu.T)
# shape (R,)
md: np.ndarray = np.sqrt(dist.diagonal())
C = self._set_chi_threshold(significance)
return self._select_indicies(md, C)
def robust_mahalanobis(self, significance: float = 0.001,
random_state: int = 0) -> np.ndarray:
"""Like :meth:`mahalanobis` but use a robust mean and covarance matrix by
sampling the dataset.
:param significance: 1 - the Chi^2 percent point function (inverse of
cdf / percentiles) outlier threshold; reasonable
values include 2.5%, 1%, 0.01%); if `None` use
:obj:`threshold` or :obj:`proportion`
:return: indexes in to :obj:`data` rows (indexes of a dataframe) of the
outliers
"""
arr: np.ndarray = self.numpy
# minimum covariance determinant
rng = np.random.RandomState(random_state)
# random sample of data, shape: (R, C)
X: np.ndarray = rng.multivariate_normal(
mean=np.mean(arr, axis=0),
cov=np.cov(arr.T),
size=arr.shape[0])
# get robust estimates for the mean and covariance
cov = MinCovDet(random_state=random_state).fit(X)
# robust covariance metric; shape: (C, C)
mcd: np.ndarray = cov.covariance_
# robust mean, shape: (C,)
rmean: np.ndarray = cov.location_
# inverse covariance metric, shape: (C, C)
inv_cov: np.ndarray = np.linalg.inv(mcd)
# robust M-Distance, shape: (R, C)
x_minus_mu: np.ndarray = arr - rmean
# shape: (R, C)
left_term: np.ndarray = np.dot(x_minus_mu, inv_cov)
# m distance: shape: (R, R)
dist: np.ndarray = np.dot(left_term, x_minus_mu.T)
# distances: shape: (R,)
md: np.ndarray = np.sqrt(dist.diagonal())
C = self._set_chi_threshold(significance)
return self._select_indicies(md, C)
def __call__(self, *args, **kwargs) -> np.ndarray:
"""Return the output of the method provided by :obj:`default_method`. All
(keyword) arguments are passed on to the respective method.
:return: indexes in to :obj:`data` rows (indexes of a dataframe) of the
outliers
"""
meth = getattr(self, self.default_method)
return meth(*args, **kwargs) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataset/outlier.py | outlier.py |
__author__ = 'Paul Landes'
from typing import Dict, Tuple, Sequence, Set, List
from dataclasses import dataclass, field
from abc import abstractmethod, ABCMeta
import sys
import logging
import collections
from functools import reduce
import math
from io import TextIOBase
from pathlib import Path
import shutil
import parse
import random
import pandas as pd
from zensols.config import Writable
from zensols.persist import (
Primeable, persisted, PersistedWork, Stash, PersistableContainer
)
from zensols.dataset import SplitKeyContainer
from . import DatasetError
logger = logging.getLogger(__name__)
@dataclass
class AbstractSplitKeyContainer(PersistableContainer, SplitKeyContainer,
Primeable, Writable, metaclass=ABCMeta):
"""A default implementation of a :class:`.SplitKeyContainer`. This
implementation keeps the order of the keys consistent as well, which is
stored at the path given in :obj:`key_path`. Once the keys are generated
for the first time, they will persist on the file system.
This abstract class requires an implementation of :meth:`_create_splits`.
.. document private functions
.. automethod:: _create_splits
"""
key_path: Path = field()
"""The directory to store the split keys."""
pattern: str = field()
"""The file name pattern to use for the keys file :obj:`key_path` on the
file system, each file is named after the key split. For example, if
``{name}.dat`` is used, ``train.dat`` will be a file with the ordered keys.
"""
def __post_init__(self):
super().__init__()
def prime(self):
self._get_keys_by_split()
@abstractmethod
def _create_splits(self) -> Dict[str, Tuple[str]]:
"""Create the key splits using keys as the split name (i.e. ``train``)
and the values as a list of the keys for the corresponding split.
"""
pass
def _create_splits_and_write(self):
"""Write the keys in order to the file system.
"""
self.key_path.mkdir(parents=True, exist_ok=True)
for name, keys in self._create_splits().items():
fname = self.pattern.format(**{'name': name})
key_path = self.key_path / fname
with open(key_path, 'w') as f:
for k in keys:
f.write(k + '\n')
def _read_splits(self):
"""Read the keys in order from the file system.
"""
by_name = {}
for path in self.key_path.iterdir():
p = parse.parse(self.pattern, path.name)
if p is not None:
p = p.named
if 'name' in p:
with open(path) as f:
by_name[p['name']] = tuple(
map(lambda ln: ln.strip(), f.readlines()))
return by_name
@persisted('_get_keys_by_split_pw')
def _get_keys_by_split(self) -> Dict[str, Tuple[str]]:
if not self.key_path.exists():
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating key splits in {self.key_path}')
self._create_splits_and_write()
return self._read_splits()
def clear(self):
logger.debug('clearing split stash')
if self.key_path.is_dir():
logger.debug('removing key path: {self.key_path}')
shutil.rmtree(self.key_path)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
by_name = self.counts_by_key
total = sum(by_name.values())
self._write_line('key splits:', depth, writer)
for name, cnt in by_name.items():
self._write_line(f'{name}: {cnt} ({cnt/total*100:.1f}%)',
depth + 1, writer)
self._write_line(f'total: {total}', depth, writer)
@dataclass
class StashSplitKeyContainer(AbstractSplitKeyContainer):
"""A default implementation of :class:`.AbstractSplitKeyContainer` that uses
a delegate stash for source of the keys.
"""
stash: Stash = field()
"""The delegate stash from where to get the keys to store."""
distribution: Dict[str, float] = field(
default_factory=lambda: {'train': 0.8, 'validate': 0.1, 'test': 0.1})
"""The distribution as a percent across all key splits. The distribution
values must add to 1.
"""
shuffle: bool = field(default=True)
"""If ``True``, shuffle the keys when creating the key splits.
"""
def __post_init__(self):
super().__post_init__()
sm = float(sum(self.distribution.values()))
err, errm = (1. - sm), 1e-1
if sm < 0 or sm > 1 or err > errm:
raise DatasetError('Distriubtion must add to 1: ' +
f'{self.distribution} (err={err} > errm)')
def prime(self):
super().prime()
if isinstance(self.stash, Primeable):
self.stash.prime()
@persisted('_split_names_pw')
def _get_split_names(self) -> Tuple[str]:
return frozenset(self.distribution.keys())
def _create_splits(self) -> Dict[str, Tuple[str]]:
if self.distribution is None:
raise DatasetError('Must either provide `distribution` or ' +
'implement `_create_splits`')
by_name = {}
keys = list(self.stash.keys())
if self.shuffle:
random.shuffle(keys)
klen = len(keys)
dists = tuple(self.distribution.items())
if len(dists) > 1:
dists, last = dists[:-1], dists[-1]
else:
dists, last = (), dists[0]
start = 0
end = len(dists)
for name, dist in dists:
end = start + int((klen * dist))
by_name[name] = tuple(keys[start:end])
start = end
by_name[last[0]] = keys[start:]
for k, v in by_name.items():
print(k, len(v), len(v)/klen)
assert sum(map(len, by_name.values())) == klen
return by_name
@dataclass
class StratifiedStashSplitKeyContainer(StashSplitKeyContainer):
"""Like :class:`.StashSplitKeyContainer` but data is stratified by a label
(:obj:`partition_attr`) across each split.
"""
partition_attr: str = field(default=None)
"""The label used to partition the strata across each split"""
stratified_write: bool = field(default=True)
"""Whether or not to include the stratified counts when writing with
:meth:`write`.
"""
split_labels_path: Path = field(default=None)
"""If provided, the path is a pickled cache of
:obj:`stratified_count_dataframe`.
"""
def __post_init__(self):
super().__post_init__()
if self.partition_attr is None:
raise DatasetError("Missing 'partition_attr' field")
dfpath = self.split_labels_path
if dfpath is None:
dfpath = '_strat_split_labels'
self._strat_split_labels = PersistedWork(dfpath, self, mkdir=True)
def _create_splits(self) -> Dict[str, Tuple[str]]:
dist_keys: Sequence[str] = self.distribution.keys()
dist_last: str = next(iter(dist_keys))
dists: Set[str] = set(dist_keys) - {dist_last}
rows = []
for k, v in self.stash.items():
rows.append((k, getattr(v, self.partition_attr)))
df = pd.DataFrame(rows, columns=['key', self.partition_attr])
lab_splits: Dict[str, Set[str]] = collections.defaultdict(set)
for lab, dfg in df.groupby(self.partition_attr):
splits = {}
keys: List[str] = dfg['key'].to_list()
if self.shuffle:
random.shuffle(keys)
count = len(keys)
for dist in dists:
prop = self.distribution[dist]
n_samples = math.ceil(float(count) * prop)
samp = set(keys[:n_samples])
splits[dist] = samp
lab_splits[dist].update(samp)
keys = keys[n_samples:]
samp = set(keys)
splits[dist_last] = samp
lab_splits[dist_last].update(samp)
assert sum(map(len, lab_splits.values())) == len(df)
assert reduce(lambda a, b: a | b, lab_splits.values()) == \
set(df['key'].tolist())
shuf_splits = {}
for lab, keys in lab_splits.items():
if self.shuffle:
keys = list(keys)
random.shuffle(keys)
shuf_splits[lab] = tuple(keys)
return shuf_splits
def _count_proportions_by_split(self) -> Dict[str, Dict[str, str]]:
lab_counts = {}
kbs = self.keys_by_split
for split_name in sorted(kbs.keys()):
keys = kbs[split_name]
counts = collections.defaultdict(lambda: 0)
for k in keys:
item = self.stash[k]
lab = getattr(item, self.partition_attr)
counts[lab] += 1
lab_counts[split_name] = counts
return lab_counts
@property
@persisted('_strat_split_labels')
def stratified_split_labels(self) -> pd.DataFrame:
"""A dataframe with all keys, their respective labels and split.
"""
kbs = self.keys_by_split
rows = []
for split_name in sorted(kbs.keys()):
keys = kbs[split_name]
for k in keys:
item = self.stash[k]
lab = getattr(item, self.partition_attr)
rows.append((split_name, k, lab))
return pd.DataFrame(rows, columns='split_name id label'.split())
def clear(self):
super().clear()
self._strat_split_labels.clear()
@property
def stratified_count_dataframe(self) -> pd.DataFrame:
"""A count summarization of :obj:`stratified_split_labels`.
"""
df = self.stratified_split_labels
df = df.groupby('split_name label'.split()).size().\
reset_index(name='count')
df['proportion'] = df['count'] / df['count'].sum()
df = df.sort_values('split_name label'.split()).reset_index(drop=True)
return df
def _fmt_prop_by_split(self) -> Dict[str, Dict[str, str]]:
df = self.stratified_count_dataframe
tot = df['count'].sum()
dsets: Dict[str, Dict[str, str]] = collections.OrderedDict()
for split_name, dfg in df.groupby('split_name'):
dfg['fmt'] = df['count'].apply(lambda x: f'{x/tot*100:.2f}%')
dsets[split_name] = dict(dfg[['label', 'fmt']].values)
return dsets
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
if self.stratified_write:
lab_counts: Dict[str, Dict[str, str]] = self._fmt_prop_by_split()
self._write_dict(lab_counts, depth, writer)
self._write_line(f'Total: {len(self.stash)}', depth, writer)
else:
super().write(depth, writer) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataset/split.py | split.py |
__author__ = 'Paul Landes'
from typing import Dict, Tuple, Set
from dataclasses import dataclass, field
import sys
import random as rand
import itertools as it
from collections import deque
from pathlib import Path
from zensols.persist import persisted, PersistedWork, Stash
from . import DatasetError, SplitKeyContainer
@dataclass
class LeaveNOutSplitKeyContainer(SplitKeyContainer):
"""A split key container that leaves one out of the dataset. By default, this
creates a dataset that has one data point for validation, another for test,
and the rest of the data for training.
"""
delegate: Stash = field()
"""The source for keys to generate the splits."""
distribution: Dict[str, int] = field(
default_factory=lambda: {'train': -1, 'validation': 1, 'test': 1})
"""The number of data points by each split type. If the value is an integer,
that number of data points are used. Otherwise, if it is a float, then
that percentage of the entire key set is used.
"""
shuffle: bool = field(default=True)
"""If ``True``, shuffle the keys obtained from :obj:`delegate` before creating
the splits.
"""
path: Path = field(default=None)
"""If not ``None``, persist the keys after shuffling (if enabled) to the path
specified, for reproducibility of key partitions.
"""
def __post_init__(self):
path = '_key_queue' if self.path is None else self.path
self._key_queue = PersistedWork(path, self, mkdir=True)
self._iter = 0
@persisted('_key_queue')
def _get_key_queue(self) -> deque:
keys = list(self.delegate.keys())
if self.shuffle:
rand.shuffle(keys)
return deque(keys)
def next_split(self) -> bool:
"""Create the next split so that the next access to properties such as
:obj:`keys_by_split` provide the next key split permutation.
"""
key_queue = self._get_key_queue()
key_queue.rotate(-1)
self._iter += 1
return (self._iter % len(key_queue)) == 0
@persisted('_split_names')
def _get_split_names(self) -> Set[str]:
return frozenset(self.distribution.keys())
def _get_keys_by_split(self) -> Dict[str, Tuple[str]]:
kq = self._get_key_queue()
keys = iter(kq)
klen = len(kq)
ds = self.distribution.items()
filled = False
by_split = {}
for name, n in sorted(ds, key=lambda x: x[1], reverse=True):
if n < 0:
if filled:
raise DatasetError("Distribution has more than one " +
f"'fill' (-1) value: {ds}")
filled = True
n = sys.maxsize
elif isinstance(n, float):
n = int(n * klen)
by_split[name] = tuple(it.islice(keys, n))
total = sum(map(lambda x: len(x), by_split.values()))
if total != klen:
raise DatasetError(
f'Number of allocated keys to the distribution ({total}) ' +
f'does not equal total keys ({klen})')
return by_split | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataset/leaveout.py | leaveout.py |
__author__ = 'Paul Landes'
from typing import Dict, Set, Tuple
from dataclasses import dataclass
from abc import abstractmethod, ABCMeta
import logging
import sys
from io import TextIOBase
from zensols.util import APIError
from zensols.config import Writable
from zensols.persist import Stash, PrimeableStash
logger = logging.getLogger(__name__)
class DatasetError(APIError):
"""Thrown when any dataset related is raised."""
@dataclass
class SplitKeyContainer(Writable, metaclass=ABCMeta):
"""An interface defining a container that partitions data sets
(i.e. ``train`` vs ``test``). For instances of this class, that data are
the unique keys that point at the data.
"""
def _get_split_names(self) -> Set[str]:
return self._get_keys_by_split().keys()
def _get_counts_by_key(self) -> Dict[str, int]:
ks = self._get_keys_by_split()
return {k: len(ks[k]) for k in ks.keys()}
@abstractmethod
def _get_keys_by_split(self) -> Dict[str, Tuple[str]]:
pass
@property
def split_names(self) -> Set[str]:
"""Return the names of each split in the dataset.
"""
return self._get_split_names()
@property
def counts_by_key(self) -> Dict[str, int]:
"""Return data set splits name to count for that respective split.
"""
return self._get_counts_by_key()
@property
def keys_by_split(self) -> Dict[str, Tuple[str]]:
"""Generate a dictionary of split name to keys for that split. It is
expected this method will be very expensive.
"""
return self._get_keys_by_split()
def clear(self):
"""Clear any cached state."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_delegate: bool = False):
self._write_line('split stash splits:', depth, writer)
t = 0
for ks in self.keys_by_split.values():
t += len(ks)
for k, ks in self.keys_by_split.items():
ln = len(ks)
self._write_line(f'{k}: {ln} ({ln/t*100:.1f}%)',
depth + 1, writer)
self._write_line(f'total: {t}', depth + 1, writer)
@dataclass
class SplitStashContainer(PrimeableStash, SplitKeyContainer,
metaclass=ABCMeta):
"""An interface like ``SplitKeyContainer``, but whose implementations are of
``Stash`` containing the instance data.
For a default implemetnation, see :class:`.DatasetSplitStash`.
"""
@abstractmethod
def _get_split_name(self) -> str:
pass
@abstractmethod
def _get_splits(self) -> Dict[str, Stash]:
pass
@property
def split_name(self) -> str:
"""Return the name of the split this stash contains. Thus, all
data/items returned by this stash are in the data set given by this name
(i.e. ``train``).
"""
return self._get_split_name()
@property
def splits(self) -> Dict[str, Stash]:
"""Return a dictionary with keys as split names and values as the
stashes represented by that split.
:see: :meth:`split_name`
"""
return self._get_splits() | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataset/interface.py | interface.py |
__author__ = 'Paul Landes'
from typing import Dict, Any
from pathlib import Path
import pandas as pd
from zensols.config import DictionaryConfig
class DataframeConfig(DictionaryConfig):
"""A :class:`~zensols.config.Configurable` that dataframes as sources. This
is useful for providing labels to nominial label vectorizers.
"""
def __init__(self, csv_path: Path, default_section: str,
columns: Dict[str, str] = None, column_eval: str = None,
counts: Dict[str, str] = None):
"""Initialize the configuration from a dataframe (see parameters).
:param csv_path: the path to the CSV file to create the dataframe
:param default_section: the singleton section name, which has as options
a list of the columns of the dataframe
:param columns: the columns to add to the configuration from the
dataframe with ``key, values`` as ``column names, option
names``
:param column_eval: Python code to evaluate and apply to each column if
provided
:param counts: additional option entries in the section to add as counts
of respective columns with ``key, values`` as ``column
option names, new entry option names; where the ``column
option names`` are those given as values from the
``columns`` :class:`dict`
"""
df: pd.DataFrame = pd.read_csv(csv_path)
sec: Dict[str, Any] = {}
if columns is None:
columns = dict(map(lambda x: (x, x), df.columns))
col_name: str
for df_col, sec_name in columns.items():
col: pd.Series = df[df_col]
if column_eval is not None:
col = eval(column_eval)
if isinstance(col, pd.Series):
col = col.tolist()
sec[sec_name] = col
if counts is not None:
for col_name, sec_name in counts.items():
sec[sec_name] = len(sec[col_name])
super().__init__(config={default_section: sec},
default_section=default_section) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataframe/config.py | config.py |
__author__ = 'Paul Landes'
from typing import Iterable, Dict, Set, Tuple
from dataclasses import dataclass, field
import logging
import sys
from io import TextIOBase
from abc import abstractmethod, ABCMeta
from collections import OrderedDict
from pathlib import Path
import pandas as pd
from sklearn.model_selection import train_test_split
from zensols.util import APIError
from zensols.config import Writable
from zensols.persist import (
Deallocatable,
PersistedWork,
persisted,
ReadOnlyStash,
PrimeableStash,
)
from zensols.install import Installer, Resource
from zensols.dataset import SplitKeyContainer
logger = logging.getLogger(__name__)
class DataframeError(APIError):
"""Thrown for dataframe stash issues."""
@dataclass
class DataframeStash(ReadOnlyStash, Deallocatable, Writable,
PrimeableStash, metaclass=ABCMeta):
"""A factory stash that uses a Pandas data frame from which to load. It uses
the data frame index as the keys and :class:`pandas.Series` as values. The
dataframe is usually constructed by reading a file (i.e.CSV) and doing some
transformation before using it in an implementation of this stash.
The dataframe created by :meth:`_get_dataframe` must have a string or
integer index since keys for all stashes are of type :class:`str`. The
index will be mapped to a string if it is an int automatically.
"""
dataframe_path: Path = field()
"""The path to store the pickeled version of the generated dataframe
created with :meth:`_get_dataframe`.
"""
def __post_init__(self):
super().__post_init__()
Deallocatable.__init__(self)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'split stash post init: {self.dataframe_path}')
self._dataframe = PersistedWork(self.dataframe_path, self, mkdir=True)
def deallocate(self):
super().deallocate()
self._dataframe.deallocate()
@abstractmethod
def _get_dataframe(self) -> pd.DataFrame:
"""Get or create the dataframe
"""
pass
def _prepare_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
dt = df.index.dtype
if dt != object:
if dt != int:
s = f'Data frame index must be a string or int, but got: {dt}'
raise DataframeError(s)
else:
df.index = df.index.map(str)
return df
@property
@persisted('_dataframe')
def dataframe(self):
df = self._get_dataframe()
df = self._prepare_dataframe(df)
return df
def prime(self):
super().prime()
self.dataframe
def clear(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('clearing dataframe stash')
self._dataframe.clear()
def load(self, name: str) -> pd.Series:
return self.dataframe.loc[name]
def exists(self, name: str) -> bool:
return name in self.dataframe.index
def keys(self) -> Iterable[str]:
return map(str, self.dataframe.index)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
df = self.dataframe
self._write_line(f'rows: {df.shape[0]}', depth, writer)
self._write_line(f'cols: {", ".join(df.columns)}', depth, writer)
@dataclass
class SplitKeyDataframeStash(DataframeStash, SplitKeyContainer):
"""A stash and split key container that reads from a dataframe.
"""
key_path: Path = field()
"""The path where the key splits (as a ``dict``) is pickled."""
split_col: str = field()
"""The column name in the dataframe used to indicate the split
(i.e. ``train`` vs ``test``).
"""
def __post_init__(self):
super().__post_init__()
self._keys_by_split = PersistedWork(self.key_path, self, mkdir=True)
def deallocate(self):
super().deallocate()
self._keys_by_split.deallocate()
def _create_keys_for_split(self, split_name: str, df: pd.DataFrame) -> \
Iterable[str]:
"""Generate an iterable of string keys. It is expected this method to be
potentially very expensive, so the results are cached to disk. This
implementation returns the dataframe index.
:param split_name: the name of the split (i.e. ``train`` vs ``test``)
:param df: the data frame for the grouping of keys from CSV of data
"""
return df.index
def _get_counts_by_key(self) -> Dict[str, int]:
sc = self.split_col
return dict(self.dataframe.groupby([sc])[sc].count().items())
@persisted('_split_names')
def _get_split_names(self) -> Set[str]:
return set(self.dataframe[self.split_col].unique())
@persisted('_keys_by_split')
def _get_keys_by_split(self) -> Dict[str, Tuple[str]]:
keys_by_split = OrderedDict()
split_col = self.split_col
for split, df in self.dataframe.groupby([split_col]):
logger.info(f'parsing keys for {split}')
keys = self._create_keys_for_split(split, df)
keys_by_split[split] = tuple(keys)
return keys_by_split
def clear(self):
super().clear()
self.clear_keys()
def clear_keys(self):
"""Clear only the cache of keys generated from the group by.
"""
self._keys_by_split.clear()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
total = self.dataframe.shape[0]
self._write_line('data frame splits:', depth, writer)
for split, cnt in self.counts_by_key.items():
self._write_line(f'{split}: {cnt} ({cnt/total*100:.1f}%)',
depth, writer)
self._write_line(f'total: {total}', depth, writer)
@dataclass
class AutoSplitDataframeStash(SplitKeyDataframeStash):
"""Automatically a dataframe in to train, test and validation datasets by
adding a :obj:`split_col` with the split name.
"""
distribution: Dict[str, float] = field()
"""The distribution as a percent across all key splits. The distribution
values must add to 1. The keys must have ``train``, ``test`` and
``validate``.
"""
def __post_init__(self):
super().__post_init__()
sm = float(sum(self.distribution.values()))
err_low, err_high, errm = (1. - sm), (1. + sm), 1e-1
if err_low > errm:
raise APIError('distriubtion must add to 1: ' +
f'{self.distribution} (err={err_low} > errm)')
if err_high < errm:
raise APIError('distriubtion must add to 1: ' +
f'{self.distribution} (err={err_low} > errm)')
def _prepare_dataframe(self, df: pd.DataFrame) -> pd.DataFrame:
n_train = self.distribution['train']
n_test = self.distribution['test']
n_val = self.distribution['validate']
n_test_val = n_test + n_val
n_test = n_test / n_test_val
train, test_val = train_test_split(df, test_size=1-n_train)
test, val = train_test_split(test_val, test_size=n_test)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'split dataframe: train: {train.size}, ' +
f'test: {test.size}, validation: {val.size}')
# pandas complains about modifying a slice
train = train.copy()
test = test.copy()
val = val.copy()
train[self.split_col] = 'train'
test[self.split_col] = 'test'
val[self.split_col] = 'validation'
df = pd.concat([train, test, val], ignore_index=False)
df = super()._prepare_dataframe(df)
return df
@dataclass
class DefaultDataframeStash(SplitKeyDataframeStash):
"""A default implementation of :class:`.DataframeSplitStash` that creates the
Pandas dataframe by simply reading it from a specificed CSV file. The
index is a string type appropriate for a stash.
"""
input_csv_path: Path = field()
"""A path to the CSV of the source data."""
def _get_dataframe(self) -> pd.DataFrame:
return pd.read_csv(self.input_csv_path)
@dataclass
class ResourceFeatureDataframeStash(DataframeStash):
"""Create the dataframe by reading the newline delimited set of clickbate
headlines from the corpus files.
"""
installer: Installer = field()
"""The installer used to download and uncompress dataset."""
resource: Resource = field()
"""Use to resolve the corpus file."""
def _get_dataframe(self) -> pd.DataFrame:
self.installer()
path: Path = self.installer[self.resource]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading CSV from resource path: {path}')
df = pd.read_csv(path)
df = df.rename(columns=dict(
zip(df.columns, map(str.lower, df.columns))))
return df | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/dataframe/stash.py | stash.py |
__author__ = 'Paul Landes'
from typing import Dict, Iterable, Any, Tuple, Union, Type, List
import sys
import logging
import gc
from io import TextIOBase
import random
import torch
import torch.cuda as cuda
from torch import Tensor
from torch import nn
import torch.multiprocessing as mp
import numpy as np
from zensols.config import Writable
from zensols.persist import persisted, PersistableContainer, PersistedWork
from . import TorchTypes
logger = logging.getLogger(__name__)
class CudaInfo(Writable):
"""A utility class that provides information about the CUDA configuration for
the current (hardware) environment.
"""
@property
@persisted('_gpu_available', cache_global=True)
def gpu_available(self) -> bool:
return cuda.is_available()
@property
@persisted('_num_devices', cache_global=True)
def num_devices(self) -> int:
"""Return number of devices connected.
"""
return cuda.device_count()
def get_devices(self, format: bool = False) -> Dict[int, Dict[str, Any]]:
devs = {}
for i in range(self.num_devices):
memory = dict(
reserved=cuda.memory_reserved(i),
allocated=cuda.memory_allocated(i),
total=cuda.get_device_properties(i).total_memory,
)
if format:
for k, v in memory.items():
memory[k] = f'{memory[k]/1e9:.2f} GB'
devs[i] = dict(name=cuda.get_device_name(i), memory=memory)
return devs
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
"""Class representation as number of devices connected and about them.
:see: cuda
"""
num = self.num_devices
self._write_line(f'GPU available: {self.gpu_available}', depth, writer)
if self.gpu_available:
self._write_line(f'devices: ({num})', depth, writer)
self._write_object(self.get_devices(True), depth + 1, writer)
def __str__(self):
return f'CUDA devices: {self.num_devices}'
class TorchConfig(PersistableContainer, Writable):
"""A utility class that provides access to CUDA APIs. It provides information
on the current CUDA configuration and convenience methods to create, copy
and modify tensors. These are handy for any given CUDA configuration and
can back off to the CPU when CUDA isn't available.
"""
_CPU_DEVICE: str = 'cpu'
_RANDOM_SEED: dict = None
_CPU_WARN: bool = False
def __init__(self, use_gpu: bool = True, data_type: type = torch.float32,
cuda_device_index: int = None, device_name: str = None):
"""Initialize this configuration.
:param use_gpu: whether or not to use CUDA/GPU
:param data_type: the default data type to use when creating new
tensors in this configuration
:param cuda_device_index: the CUDA device to use, which defaults to 0
if CUDA if ``use_gpu`` is ``True``
:param device_name: the string name of the device to use (i.e. ``cpu``
or ``mps``); if provided, overrides
``cuda_device_index``
"""
super().__init__()
logger.debug(f'use_gpu: {use_gpu}')
self.use_gpu = use_gpu
self.data_type = data_type
if device_name is not None:
self._device = torch.device(device_name)
# we can't globally cache this in case there are multiple instances of
# this class for which have different values of `use_gpu`
self._init_device_pw = PersistedWork('_init_device_pw', self)
self._cpu_device_pw = PersistedWork(
'_cpu_device_pw', self, cache_global=True)
self._cpu_device_pw._mark_deallocated()
self._cuda_device_index = cuda_device_index
@persisted('_init_device_pw')
def _init_device(self) -> torch.device:
"""Attempt to initialize CUDA, and if successful, return the CUDA device.
"""
is_avail = torch.cuda.is_available()
use_gpu = self.use_gpu and is_avail
logger.debug(f'use cuda: {self.use_gpu}, is avail: {is_avail}')
if use_gpu:
if logger.isEnabledFor(logging.DEBUG):
logger.info('successfully initialized CUDA')
cuda_dev = torch.cuda.current_device()
device = torch.device('cuda', cuda_dev)
self.cuda_device_index = cuda_dev
else:
device = torch.device(self._CPU_DEVICE)
if self.use_gpu and not is_avail:
if not self.__class__._CPU_WARN:
logger.info('requested GPU but not available--using CPU')
self.__class__._CPU_WARN = True
self.use_gpu = False
self._cuda_device_index = None
return device
@property
@persisted('_cpu_device_pw')
def cpu_device(self) -> torch.device:
"""Return the CPU CUDA device, which is the device type configured to utilize
the CPU (rather than the GPU).
"""
return torch.device(self._CPU_DEVICE)
@classmethod
def cpu_device_name(cls) -> str:
"""The string name of the torch CPU device."""
return cls._CPU_DEVICE
@property
def device(self) -> torch.device:
"""Return the torch device configured.
"""
if not hasattr(self, '_device'):
if self.use_gpu:
self._device = self._init_device()
if self._cuda_device_index is not None:
self._device = torch.device(
'cuda', self._cuda_device_index)
else:
self._device = self.cpu_device
return self._device
@device.setter
def device(self, device: torch.device):
"""Set (force) the device to be used in this configuration."""
self._device = device
torch.cuda.set_device(device)
logger.info(f'using device: {device}')
@property
def using_cpu(self) -> bool:
"""Return ``True`` if this configuration is using the CPU device."""
return self.device.type == self._CPU_DEVICE
@classmethod
def is_on_cpu(cls, arr: Tensor) -> bool:
"""Return ``True`` if the passed tensor is on the CPU."""
return arr.device.type == cls._CPU_DEVICE
@property
def gpu_available(self) -> bool:
"""Return whether or not CUDA GPU access is available."""
return self._init_device().type != self._CPU_DEVICE
@property
def cuda_devices(self) -> Tuple[torch.device]:
"""Return all cuda devices.
"""
return tuple(map(lambda n: torch.device('cuda', n),
range(torch.cuda.device_count())))
@property
def cuda_device_index(self) -> Union[int, None]:
"""Return the CUDA device index if CUDA is being used for this configuration.
Otherwise return ``None``.
"""
device = self.device
if device.type == 'cuda':
return device.index
@cuda_device_index.setter
def cuda_device_index(self, device: int):
"""Set the CUDA device index for this configuration."""
self.device = torch.device('cuda', device)
def same_device(self, tensor_or_model) -> bool:
"""Return whether or not a tensor or model is in the same memory space as this
configuration instance.
"""
device = self.device
return hasattr(tensor_or_model, 'device') and \
tensor_or_model.device == device
@staticmethod
def in_memory_tensors() -> List[Tensor]:
"""Returns all in-memory tensors and parameters.
:see: :meth:`~zensols.deeplearn.cli.app.show_leaks`
"""
arrs: List[Tensor] = []
for obj in gc.get_objects():
try:
if torch.is_tensor(obj) or \
(hasattr(obj, 'data') and torch.is_tensor(obj.data)):
arrs.append(obj)
except Exception:
pass
return arrs
@classmethod
def write_in_memory_tensors(cls: Type, writer: TextIOBase = sys.stdout,
filter_device: torch.device = None):
"""Prints in-memory tensors and parameters.
:param filter_device: if given, write only tensors matching this device
:see: :class:`~zensols.deeplearn.torchconfig.TorchConfig`
"""
objs: List[Tensor] = cls.in_memory_tensors()
for obj in objs:
if filter_device is None or filter_device == obj.device:
writer.write(
f'{type(obj)}: {tuple(obj.shape)} on {obj.device}\n')
def write_device_tensors(self, writer: TextIOBase = sys.stdout):
"""Like :meth:`write_in_memory_tensors`, but filter on this instance's device.
:param filter_device: if given, write only tensors matching this device
:see: :class:`~zensols.deeplearn.torchconfig.TorchConfig`
"""
self.write_in_memory_tensors(writer=writer, filter_device=self.device)
@staticmethod
def empty_cache():
"""Empty the CUDA torch cache. This releases memory in the GPU and should not
be necessary to call for normal use cases.
"""
torch.cuda.empty_cache()
@property
def info(self) -> CudaInfo:
"""Return the CUDA information, which include specs of the device.
"""
self._init_device()
return CudaInfo()
@property
def tensor_class(self) -> Type[torch.dtype]:
"""Return the class type based on the current configuration of this instance.
For example, if using ``torch.float32`` on the GPU,
``torch.cuda.FloatTensor`` is returned.
"""
return TorchTypes.get_tensor_class(self.data_type, self.using_cpu)
@property
def numpy_data_type(self) -> Type[torch.dtype]:
"""Return the numpy type that corresponds to this instance's configured
``data_type``.
"""
return TorchTypes.get_numpy_type(self.data_type)
def to(self, tensor_or_model: Union[nn.Module, Tensor]) -> \
Union[nn.Module, Tensor]:
"""Copy the tensor or model to the device this to that of this configuration.
"""
if not self.same_device(tensor_or_model):
tensor_or_model = tensor_or_model.to(self.device)
if isinstance(tensor_or_model, nn.Module) and \
hasattr(tensor_or_model, 'dtype') and \
tensor_or_model.dtype != self.data_type:
tensor_or_model.type(self.data_type)
return tensor_or_model
@classmethod
def to_cpu_deallocate(cls, *arrs: Tuple[Tensor]) -> \
Union[Tuple[Tensor], Tensor]:
"""Safely copy detached memory to the CPU and delete local instance (possibly
GPU) memory to speed up resource deallocation. If the tensor is
already on the CPU, it's simply passed back. Otherwise the tensor is
deleted.
This method is robust with ``None``, which are skipped and substituted
as ``None`` in the output.
:param arrs: the tensors the copy to the CPU (if not already)
:return: the singleton tensor if only one ``arrs`` is passed;
otherwise, the CPU copied tensors from the input
"""
cpus = []
for arr in arrs:
if arr is None or (cls.is_on_cpu(arr) and not arr.requires_grad):
cpu_arr = arr
else:
cpu_arr = arr.detach().clone().cpu()
# suggest to interpreter to mark for garbage collection
# immediately
del arr
cpus.append(cpu_arr)
return cpus[0] if len(cpus) == 1 else tuple(cpus)
def clone(self, tensor: Tensor, requires_grad: bool = True) -> Tensor:
"""Clone a tensor.
"""
return tensor.detach().clone().requires_grad_(requires_grad)
def _populate_defaults(self, kwargs):
"""Add keyword arguments to typical torch tensor creation functions.
"""
if 'dtype' not in kwargs:
kwargs['dtype'] = self.data_type
kwargs['device'] = self.device
def from_iterable(self, array: Iterable[Any]) -> Tensor:
"""Return a one dimenstional tensor created from ``array`` using the type and
device in the current instance configuration.
"""
cls = self.tensor_class
if not isinstance(array, tuple) and not isinstance(array, list):
array = tuple(array)
return cls(array)
def singleton(self, *args, **kwargs) -> Tensor:
"""Return a new tensor using ``torch.tensor``.
"""
self._populate_defaults(kwargs)
return torch.tensor(*args, **kwargs)
def float(self, *args, **kwargs) -> Tensor:
"""Return a new tensor using ``torch.tensor`` as a float type.
"""
kwargs['dtype'] = self.float_type
self._populate_defaults(kwargs)
return torch.tensor(*args, **kwargs)
def int(self, *args, **kwargs) -> Tensor:
"""Return a new tensor using ``torch.tensor`` as a int type.
"""
kwargs['dtype'] = self.int_type
self._populate_defaults(kwargs)
return torch.tensor(*args, **kwargs)
def sparse(self, indicies: Tuple[int], values: Tuple[float],
shape: Tuple[int, int]):
"""Create a sparce tensor from indexes and values.
"""
i = torch.LongTensor(indicies)
v = torch.FloatTensor(values)
cls = TorchTypes.get_sparse_class(self.data_type)
return cls(i, v, shape, device=self.device)
def is_sparse(self, arr: Tensor) -> bool:
"""Return whether or not a tensor a sparse.
"""
return arr.layout == torch.sparse_coo
def empty(self, *args, **kwargs) -> Tensor:
"""Return a new tesor using ``torch.empty``.
"""
self._populate_defaults(kwargs)
return torch.empty(*args, **kwargs)
def zeros(self, *args, **kwargs) -> Tensor:
"""Return a new tensor of zeros using ``torch.zeros``.
"""
self._populate_defaults(kwargs)
return torch.zeros(*args, **kwargs)
def ones(self, *args, **kwargs) -> Tensor:
"""Return a new tensor of zeros using ``torch.ones``.
"""
self._populate_defaults(kwargs)
return torch.ones(*args, **kwargs)
def from_numpy(self, arr: np.ndarray) -> Tensor:
"""Return a new tensor generated from a numpy aray using ``torch.from_numpy``.
The array type is converted if necessary.
"""
tarr = torch.from_numpy(arr)
if arr.dtype != self.numpy_data_type:
tarr = tarr.type(self.data_type)
return self.to(tarr)
def cat(self, *args, **kwargs) -> Tensor:
"""Concatenate tensors in to one tensor using ``torch.cat``.
"""
return self.to(torch.cat(*args, **kwargs))
def to_type(self, arr: Tensor) -> Tensor:
"""Convert the type of the given array to the type of this instance.
"""
if self.data_type != arr.dtype:
arr = arr.type(self.data_type)
return arr
@property
def float_type(self) -> Type:
"""Return the float type that represents this configuration, converting to the
corresponding precision from integer if necessary.
:return: the float that represents this data, or ``None`` if neither
float nor int
"""
dtype = self.data_type
if TorchTypes.is_int(dtype):
return TorchTypes.int_to_float(dtype)
elif TorchTypes.is_float(dtype):
return dtype
@property
def int_type(self) -> Type:
"""Return the int type that represents this configuration, converting to the
corresponding precision from integer if necessary.
:return: the int that represents this data, or ``None`` if neither
int nor float
"""
dtype = self.data_type
if TorchTypes.is_float(dtype):
return TorchTypes.float_to_int(dtype)
elif TorchTypes.is_int(dtype):
return dtype
@staticmethod
def equal(a: Tensor, b: Tensor) -> bool:
"""Return whether or not two tensors are equal. This does an exact cell
comparison.
"""
return torch.all(a.eq(b)).item()
@staticmethod
def close(a: Tensor, b: Tensor) -> bool:
"""Return whether or not two tensors are equal. This does an exact cell
comparison.
"""
return torch.allclose(a, b)
@persisted('_cross_entropy_pad_pw')
def _cross_entropy_pad(self) -> Tensor:
ix = nn.CrossEntropyLoss().ignore_index
return torch.tensor([ix], device=self.device, dtype=self.data_type)
def cross_entropy_pad(self, size: Tuple[int]) -> Tensor:
"""Create a padded tensor of size ``size`` using the repeated pad
:obj:`~torch.nn.CrossEntropyLoss.ignore_index`.
"""
pad = self._cross_entropy_pad()
return pad.repeat(size)
@classmethod
def get_random_seed(cls: Type) -> int:
"""Get the cross system random seed, meaning the seed applied to CUDA and the
Python *random* library.
"""
if cls._RANDOM_SEED is not None:
return cls._RANDOM_SEED['seed']
@classmethod
def get_random_seed_context(cls: Type) -> Dict[str, Any]:
"""Return the random seed context given to :py:meth:`set_random_seed` to
restore across models for consistent results.
"""
return cls._RANDOM_SEED
@classmethod
def set_random_seed(cls: Type, seed: int = 0, disable_cudnn: bool = True,
rng_state: bool = True):
"""Set the random number generator for PyTorch.
:param seed: the random seed to be set
:param disable_cudnn: if ``True`` disable NVidia's backend cuDNN
hardware acceleration, which might have
non-deterministic features
:param rng_state: set the CUDA random state array to zeros
:see: `Torch Random Seed <https://discuss.pytorch.org/t/random-seed-initialization/7854>`_
:see: `Reproducibility <https://discuss.pytorch.org/t/non-reproducible-result-with-gpu/1831>`_
"""
cls._RANDOM_SEED = {'seed': seed,
'disable_cudnn': disable_cudnn,
'rng_state': rng_state}
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
if rng_state:
new_states = []
for state in torch.cuda.get_rng_state_all():
zeros = torch.zeros(state.shape, dtype=state.dtype)
new_states.append(zeros)
torch.cuda.set_rng_state_all(new_states)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(0)
if disable_cudnn:
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
@classmethod
def init(cls: Type, spawn_multiproc: str = 'spawn',
seed_kwargs: Dict[str, Any] = {}):
"""Initialize the PyTorch framework. This includes:
* Configuration of PyTorch multiprocessing so subprocesses can access
the GPU, and
* Setting the random seed state.
The needs to be initialized at the very beginning of your program.
Example::
def main():
from zensols.deeplearn import TorchConfig
TorchConfig.init()
**Note**: this method is separate from :meth:`set_random_seed` because
that method is called by the framework to reset the seed after a model
is unpickled.
:see: :mod:`torch.multiprocessing`
:see: :meth:`set_random_seed`
"""
if cls._RANDOM_SEED is None:
cls.set_random_seed(**seed_kwargs)
try:
cur = mp.get_sharing_strategy()
if logger.isEnabledFor(logging.INFO):
logger.info('invoking pool with torch spawn ' +
f'method: {spawn_multiproc}, current: {cur}')
if spawn_multiproc:
mp.set_start_method('spawn')
else:
mp.set_start_method('forkserver', force=True)
except RuntimeError as e:
msg = str(e)
if msg != 'context has already been set':
logger.warning(f'could not invoke spawn on pool: {e}')
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
if self.gpu_available:
self.info.write(depth, writer)
else:
self._write_line('CUDA is not available', depth, writer)
self._write_line(f'selected device: {self.device}', depth, writer)
def __str__(self):
return f'use cuda: {self.use_gpu}, device: {self.device}'
def __repr__(self):
return self.__str__()
class printopts(object):
"""Object used with a ``with`` scope that sets options, then sets them back.
Example::
with printopts(profile='full', linewidth=120):
print(tensor)
:see: `PyTorch Documentation <https://pytorch.org/docs/master/generated/torch.set_printoptions.html>`_
"""
DEFAULTS = {'precision': 4,
'threshold': 1000,
'edgeitems': 3,
'linewidth': 80,
'profile': 'default',
'sci_mode': None}
def __init__(self, **kwargs):
if len(kwargs) > 0:
torch.set_printoptions(**kwargs)
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
torch.set_printoptions(**self.DEFAULTS) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/torchconfig.py | torchconfig.py |
__author__ = 'Paul Landes'
from typing import List, Any, Set, Tuple, Dict, Union, Type
from dataclasses import dataclass, field
from abc import ABC, abstractmethod
import logging
from pathlib import Path
from datetime import datetime
import pandas as pd
from zensols.introspect import ClassImporter
from zensols.config import ConfigurationError
mod_logger = logging.getLogger(__name__ + '.status')
"""Logger for this module."""
event_logger = logging.getLogger(__name__ + '.event')
"""Event logger for the :class:`.LogModelObserver."""
class ModelObserver(ABC):
"""Recipient of notifications by the model framework.
"""
@abstractmethod
def notify(self, event: str, caller: Any, context: Any = None):
"""Notify all registered observers of an event.
:param event: the unique identifier of the event using underscore
spacing and prefixed by a unique identifier per caller
:param caller: the object calling this method
:param context: any object specific to the call and understood by the
client on a per client basis
"""
pass
@dataclass
class FilterModelObserver(ModelObserver):
"""Filters messages from the client to a delegate observer.
"""
delegate: ModelObserver = field()
"""The delegate observer to notify on notifications from this observer."""
include_events: Set[str] = field(default_factory=set)
"""A set of events used to indicate to notify :obj:`delegate`."""
def notify(self, event: str, caller: Any, context: Any = None):
if event in self.include_events:
self.delegate(event, caller, context)
@dataclass
class LogModelObserver(ModelObserver):
"""Logs notifications to :mod:`logging` system.
"""
logger: logging.Logger = field(default=event_logger)
"""The logger that receives notifications."""
level: int = field(default=logging.INFO)
"""The level used for logging."""
add_context_format: str = field(default='{event}: {context}')
"""If not ``None``, use the string to format the log message."""
def notify(self, event: str, caller: Any, context: Any = None):
if self.logger.isEnabledFor(self.level):
if self.add_context_format is not None and context is not None:
event = self.add_context_format.format(
**{'event': event, 'context': context})
self.logger.log(self.level, event)
@dataclass
class RecorderObserver(ModelObserver):
"""Records notifications and provides them as output.
"""
events: List[Tuple[datetime, str, Any, Any]] = field(default_factory=list)
"""All events received by this observer thus far."""
flatten: bool = field(default=True)
"""Whether or not make the caller and context in to a strings before storing
them in :obj:`events`.
"""
flatten_short_classes: bool = field(default=True)
"""If ``True``, then only use the class name sans module. Otherwise, use the
fully qualified class name.
"""
def _flatten(self, event: str, caller: Any, context: Any = None):
if self.flatten:
if self.flatten_short_classes:
caller = caller.__class__.__name__
else:
caller = ClassImporter.full_classname(caller.__class__)
if not isinstance(context, (str, bool, int, float)):
context = str(context)
return event, caller, context
def notify(self, event: str, caller: Any, context: Any = None):
now = datetime.now()
event, caller, context = self._flatten(event, caller, context)
self.events.append((now, event, caller, context))
def events_as_df(self) -> pd.DataFrame:
return pd.DataFrame(
self.events, columns='time event caller context'.split())
@dataclass
class DumperObserver(RecorderObserver):
"""A class that dumps all data when certain events are received as a CSV to the
file sytsem.
"""
_EVENT_IX_COL = 'index'
output_file: Path = field(default=Path('dumper-observer.csv'))
"""The path to where the (flattened data) is written."""
file_mode: str = field(default='append')
"""If ``append``, then append data to the output .CSV file. Otherwise, if
``overwrite`` then overwrite the data.
"""
trigger_events: Set[str] = field(default_factory=set)
"""A set of all events received that trigger a dump."""
trigger_callers: Set[Union[str, Type]] = field(default=None)
"""A set of all callers' *fully qualified* class names. If set to ``None`` the
caller is not a constraint that precludes the dump.
"""
mkdir: bool = field(default=True)
"""If ``True`` then create the parent directories if they don't exist."""
add_columns: Dict[str, Any] = field(default=None)
"""Additional columns to add to the data frame across all rows if given."""
def __post_init__(self):
fms = {'append', 'overwrite'}
if self.file_mode not in fms:
raise ConfigurationError(
f'Expecting one of {fms}, but got: {self.file_mode}')
if self.trigger_callers is not None:
self.trigger_callers = set(
map(lambda t: ClassImporter(t).get_class(),
self.trigger_callers))
if mod_logger.isEnabledFor(logging.DEBUG):
mod_logger.debug(f'trigger callers: {self.trigger_callers}')
def _tc_inst_of(self, caller: Any) -> bool:
for tc in self.trigger_callers:
if isinstance(caller, tc):
if mod_logger.isEnabledFor(logging.DEBUG):
mod_logger.debug(f'triggered callers {caller.__class__} type of {tc}')
return True
return False
def _should_dump(self, event: str, caller: Any, context: Any) -> bool:
if event in self.trigger_events:
dump = True
if self.trigger_callers is not None:
if mod_logger.isEnabledFor(logging.DEBUG):
mod_logger.debug(f'filtering on {self.trigger_callers}')
dump = False
ctype = caller.__class__
if ctype in self.trigger_callers or self._tc_inst_of(caller):
if mod_logger.isEnabledFor(logging.DEBUG):
mod_logger.debug(f'triggered callers: {caller}')
dump = True
else:
dump = False
return dump
def _dump(self, event, caller, context):
df: pd.DataFrame = self.events_as_df()
if self.mkdir:
self.output_file.parent.mkdir(parents=True, exist_ok=True)
if self.add_columns is not None:
for k in sorted(self.add_columns.keys()):
df[k] = self.add_columns[k]
if self.file_mode == 'overwrite' and self.output_file.exists():
df_old = pd.read_csv(
self.output_file, index_col=self._EVENT_IX_COL)
df = pd.concat((df_old, df))
df.to_csv(self.output_file, index_label=self._EVENT_IX_COL)
if mod_logger.isEnabledFor(logging.INFO):
mod_logger.info(f'wrote events: {self.output_file}')
def notify(self, event: str, caller: Any, context: Any = None):
super().notify(event, caller)
if self._should_dump(event, caller, context):
self._dump(event, caller, context)
@dataclass
class ModelObserverManager(object):
observers: List[ModelObserver] = field(default_factory=list)
"""A list of observers that get notified of all model lifecycle and process
events.
"""
def add(self, observer: ModelObserver):
"""Add an observer to be notified of event changes.
"""
self.observers.append(observer)
def notify(self, event: str, caller: Any, context: Any = None):
"""Notify all registered observers of an event.
:param event: the unique identifier of the event using underscore
spacing and prefixed by a unique identifier per caller
:param caller: the object calling this method
:param context: any object specific to the call and understood by the
client on a per client basis
"""
for obs in self.observers:
obs.notify(event, caller, context) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/observer.py | observer.py |
__author__ = 'Paul Landes'
from typing import Any, Dict, Union, Callable
from dataclasses import dataclass, field, InitVar
from abc import ABCMeta, abstractmethod
from enum import Enum, auto
import sys
import logging
from pathlib import Path
import torch.nn.functional as F
from torch import nn
from zensols.util import APIError
from zensols.config import Writeback, ConfigFactory
from zensols.persist import persisted, PersistableContainer, FileTextUtil
from . import ModelObserverManager
logger = logging.getLogger(__name__)
class DeepLearnError(APIError):
"""Raised for any frame originated error."""
pass
class ModelError(DeepLearnError):
"""Raised for any model related error."""
pass
class EarlyBailError(DeepLearnError):
"""Convenience used for helping debug the network.
"""
def __init__(self):
super().__init__('early bail to debug the network')
class DatasetSplitType(Enum):
"""Indicates an action on the model, which is first trained, validated, then
tested.
*Implementation note:* for now :obj:`test` is used for both testing the
model and ad-hoc prediction
"""
train = auto()
validation = auto()
test = auto()
@dataclass
class NetworkSettings(Writeback, PersistableContainer, metaclass=ABCMeta):
"""A container settings class for network models. This abstract class must
return the fully qualified (with module name) PyTorch `model
(`torch.nn.Module``) that goes along with these settings. An instance of
this class is saved in the model file and given back to it when later
restored.
**Note**: Instances of this class are pickled as parts of the results in
:class:`zensols.deeplearn.result.domain.ModelResult`, so they must be able
to serialize. However, they are not used to restore the executor or model,
which are instead, recreated from the configuration for each (re)load (see
the package documentation for more information).
:see: :class:`.ModelSettings`
"""
config_factory: ConfigFactory = field()
"""The configuration factory used to create the module."""
def __post_init__(self):
PersistableContainer.__init__(self)
def _allow_config_adds(self) -> bool:
return True
def create_module(self, *args, **kwargs) -> nn.Module:
"""Create a new instance of the network model.
"""
cls_name = self.get_module_class_name()
resolver = self.config_factory.class_resolver
cls = resolver.find_class(cls_name)
model = cls(self, *args, **kwargs)
# force the model on the CPU to let the executor manage, otherwise, the
# model could be on the GPU but only certain parameters on the CPU
# after load in `load_model_optim_weights'
model = model.cpu()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created model {cls} on device: {model.device}')
return model
@abstractmethod
def get_module_class_name(self) -> str:
"""Returns the fully qualified class name of the module to create by
:class:`~zensols.deeplearn.model.ModelManager`. This module takes as
the first parameter an instance of this class.
**Important**: This method is not used for nested modules. You must
declare specific class names in the configuration for those nested
class naems.
"""
pass
@dataclass
class ActivationNetworkSettings(NetworkSettings):
"""A network settings that contains a activation setting and creates a
activation layer.
"""
activation: Union[Callable, nn.Module, str] = field()
"""The function between all layers, or ``None`` for no activation.
"""
def _set_option(self, name: str, value: Any):
super()._set_option(name, value)
if name == 'activation' and hasattr(self, '_activation_function'):
self._activation_function.clear()
@property
@persisted('_activation_function', transient=True)
def activation_function(self) -> Callable:
if isinstance(self.activation, str):
return self.get_activation_function(self.activation)
else:
return self.activation
@staticmethod
def get_activation_function(activation: str):
if activation == 'relu':
activation = F.relu
elif activation == 'leaky_relu':
activation = F.leaky_relu
elif activation == 'softmax':
activation = F.softmax
elif activation == 'sigmoid':
activation = nn.Sigmoid()
elif activation is None:
activation = None
else:
raise ModelError(f'Known activation function: {activation}')
return activation
def __str__(self):
return f'{super().__str__()}, activation={self.activation}'
@dataclass
class DropoutNetworkSettings(NetworkSettings):
"""A network settings that contains a dropout setting and creates a dropout
layer.
"""
dropout: float = field()
"""The droput used in all layers or ``None`` to disable."""
def _set_option(self, name: str, value: Any):
super()._set_option(name, value)
if name == 'dropout' and hasattr(self, '_dropout_layer'):
self._dropout_layer().p = value
@property
@persisted('_dropout_layer', transient=True)
def dropout_layer(self):
if self.dropout is not None:
return nn.Dropout(self.dropout)
@dataclass
class BatchNormNetworkSettings(NetworkSettings):
"""A network settings that contains a batchnorm setting and creates a batchnorm
layer.
"""
batch_norm_d: int = field()
"""The dimension of the batch norm or ``None`` to disable. Based on this one
of the following is used as a layer:
* :class:`torch.nn.BatchNorm1d`
* :class:`torch.nn.BatchNorm2d`
* :class:`torch.nn.BatchNorm3d`
"""
batch_norm_features: int = field()
"""The number of features to use in the batch norm layer."""
@staticmethod
def create_batch_norm_layer(batch_norm_d: int, batch_norm_features: int):
cls = {None: None,
1: nn.BatchNorm1d,
2: nn.BatchNorm2d,
3: nn.BatchNorm3d}[batch_norm_d]
if cls is not None:
if batch_norm_features is None:
raise ModelError('Missing batch norm features')
return cls(batch_norm_features)
def create_new_batch_norm_layer(self, batch_norm_d: int = None,
batch_norm_features: int = None):
if batch_norm_d is None:
batch_norm_d = self.batch_norm_d
if batch_norm_features is None:
batch_norm_features = self.batch_norm_features
return self.create_batch_norm_layer(batch_norm_d, batch_norm_features)
@property
@persisted('_batch_norm_layer', transient=True)
def batch_norm_layer(self):
return self.create_new_batch_norm_layer()
@dataclass
class ModelSettings(Writeback, PersistableContainer):
"""This configures and instance of :class:`.ModelExecutor`. This differes from
:class:`.NetworkSettings` in that it configures the model parameters, and
not the neural network parameters.
Another reason for these two separate classes is data in this class is not
needed to rehydrate an instance of :class:`torch.nn.Module`.
The loss function strategy across parameters ``nominal_labels``,
``criterion_class`` and ``optimizer_class``, must be consistent. The
defaults uses nominal labels, which means a single integer, rather than one
hot encoding, is used for the labels. Most loss function, including the
default :class:`torch.nn.CrossEntropyLoss`` uses nominal labels. The
optimizer defaults to :class:`torch.optim.Adam`.
However, if ``nominal_labels`` is set to ``False``, it is expected that the
label output is a ``Long`` one hot encoding of the class label that must be
decoded with :meth:`.BatchIterator._decode_outcomes` and uses a loss
function such as :class:`torch.nn.BCEWithLogitsLoss`, which applies a
softmax over the output to narow to a nominal.
If the ``criterion_class`` is left as the default, the class the
corresponding class across these two is selected based on
``nominal_labels``.
**Note**: Instances of this class are pickled as parts of the results in
:class:`zensols.deeplearn.result.domain.ModelResult`, so they must be able
to serialize. However, they are not used to restore the executor or model,
which are instead, recreated from the configuration for each (re)load (see
the package documentation for more information).
:see: :class:`.NetworkSettings`
"""
model_name: str = field()
"""A human readable name for the model."""
path: Path = field()
"""The path to save and load the model."""
learning_rate: float = field()
"""Learning_rate used for the gradient descent step (done in the optimzer).
"""
epochs: int = field()
"""The number of epochs to train the network."""
append_model_path: str = field(default=None)
"""Whether and how to append the model's name to the end of :obj:`path`. If
this value is ``verbatim``, append the model name as provided with
:obj:`model_name`, if ``normalize`` use :meth:`normalize_name` to normalize
it, and if ``None`` do not append anything.
"""
max_consecutive_increased_count: int = field(default=sys.maxsize)
"""The maximum number of times the validation loss can increase per epoch
before the executor "gives up" and early stops training.
"""
nominal_labels: bool = field(default=True)
"""``True`` if using numbers to identify the class as an enumeration rather
than a one hot encoded array.
"""
batch_iteration_class_name: InitVar[str] = field(default=None)
"""A string fully qualified class name of type :class:`.BatchIterator`. This
must be set to a class such as :class:`.ScoredBatchIterator` to handle
descrete states in the output layer such as terminating CRF states. The
default is :class:`.BatchIterator`, which expects continuous output layers.
"""
criterion_class_name: InitVar[str] = field(default=None)
"""The loss function class name (see class doc)."""
optimizer_class_name: InitVar[str] = field(default=None)
"""The optimization algorithm class name (see class doc)."""
optimizer_params: Dict[str, Any] = field(default=None)
"""The parameters given as ``**kwargs`` when creating the optimizer. Do
**not** add the learning rate, instead see :obj:`learning_rate`."""
clip_gradient_threshold: float = field(default=None)
"""Parameters passed to :func:`torch.nn.utils.clip_grad_value_` to clip
gradients above this threshold.
"""
scale_gradient_params: Dict[str, Union[float, bool]] = field(default=None)
"""Parameters passed to :func:`torch.nn.utils.clip_grad_norm_` to scale
the gradient norm.
"""
scheduler_class_name: str = field(default=None)
"""The fully qualified class name of the learning rate scheduler used for the
optimizer (if not ``None``) such as:
* :class:`torch.optim.lr_scheduler.StepLR` or,
* :class:`torch.optim.lr_scheduler.ReduceLROnPlateau`.
:see: :obj:`scheduler_params`
"""
scheduler_params: Dict[str, Any] = field(default=None)
"""The parameters given as ``**kwargs`` when creating the scheduler (if any).
:see: :obj:`scheduler_class_name`
"""
reduce_outcomes: str = field(default='argmax')
"""The method by which the labels and output is reduced. The output is
optionally reduced, which is one of the following:
* ``argmax``: uses the index of the largest value,
which is used for classification models and the
default
* ``softmax``: just like ``argmax`` but applies a
softmax
* ``none``: return the identity.
"""
shuffle_training: bool = field(default=False)
"""If ``True`` shuffle the training data set split before the training process
starts. The shuffling only happens once for all epocs.
"""
batch_limit: Union[int, float] = field(default=sys.maxsize)
"""The max number of batches to train, validate and test on, which is useful
for limiting while debuging; defaults to `sys.maxsize`. If this value is a
float, it is assumed to be a number between [0, 1] and the number of
batches is multiplied by the value.
"""
batch_iteration: str = field(default='cpu')
"""How the batches are buffered, which is one of:
* ``gpu``, buffers all data in the GPU
* ``cpu``, which means keep all batches in CPU memory (the default)
* ``buffered`` which means to buffer only one batch at a time (only
for *very* large data).
"""
prediction_mapper_name: str = field(default=None)
"""Creates data points from a client for the purposes of prediction. This
value is the string class name of an instance of :class:`.PredictionMapper`
used to create predictions. While optional, if not set, ad-hoc predictions
(i.e. from the command line) can not be created.
Instances of :class:`.PredictionMapper` are created and managed in the
:class:`~zensols.deeplearn.model.ModelFacade`.
"""
cache_batches: bool = field(default=True)
"""If ``True`` cache unthawed/processed batches when possible."""
gc_level: int = field(default=0)
"""The frequency by with the garbage collector is invoked. The *higher* the
value, the more often it will be run during training, testing and
validation.
"""
observer_manager: ModelObserverManager = field(
default_factory=ModelObserverManager)
"""The model observer used by the entire train, test, validation process.
"""
def __post_init__(self,
batch_iteration_class_name: str,
criterion_class_name: str,
optimizer_class_name: str):
if batch_iteration_class_name is None:
self.batch_iteration_class_name = 'zensols.deeplearn.model.BatchIterator'
else:
self.batch_iteration_class_name = batch_iteration_class_name
if criterion_class_name is None:
if self.nominal_labels:
self.criterion_class_name = 'torch.nn.CrossEntropyLoss'
else:
self.criterion_class_name = 'torch.nn.BCEWithLogitsLoss'
else:
self.criterion_class_name = criterion_class_name
if optimizer_class_name is None:
self.optimizer_class_name = 'torch.optim.Adam'
else:
self.optimizer_class_name = optimizer_class_name
if self.append_model_path is not None:
if self.append_model_path == 'verbatim':
self.path = self.path / self.model_name
elif self.append_model_path == 'normalize':
self.path = self.path / self.normalize_name(self.model_name)
else:
raise ModelError("Unknown 'append_model_path' " +
f"value: '{self.append_model_path}'")
@staticmethod
def normalize_name(name: str) -> str:
"""Normalize the name in to a string that is more file system friendly. This
is used for the :obj:`model_name` by API components that write data to
the file system about the model this class configures such as
:class:`~zensols.deeplearn.result.ModelResultManager`.
:return: the normalized name
"""
return FileTextUtil.normalize_text(name)
@property
def normal_model_name(self) -> str:
"""Return the normalized :obj:`model_name` using :meth:`normalize_name`.
"""
return self.normalize_name(self.model_name)
def _allow_config_adds(self) -> bool:
return True | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Type
import torch
import numpy as np
class TorchTypes(object):
"""A utility class to convert betwen numpy and torch classes. It also provides
metadata for types that make other conversions, such as same precision
cross types (i.e. int64 -> float64).
"""
TYPES = [{'desc': '32-bit floating point',
'name': 'float32',
'types': set([torch.float32, torch.float]),
'numpy': np.float32,
'sparse': torch.sparse.FloatTensor,
'cpu': torch.FloatTensor,
'gpu': torch.cuda.FloatTensor},
{'desc': '64-bit floating point',
'name': 'float64',
'types': set([torch.float64, torch.double]),
'numpy': np.float64,
'sparse': torch.sparse.DoubleTensor,
'cpu': torch.DoubleTensor,
'gpu': torch.cuda.DoubleTensor},
{'desc': '16-bit floating point',
'name': 'float16',
'types': set([torch.float16, torch.half]),
'numpy': np.float16,
'sparse': torch.sparse.HalfTensor,
'cpu': torch.HalfTensor,
'gpu': torch.cuda.HalfTensor},
{'desc': '8-bit integer (unsigned)',
'name': 'uint8',
'types': set([torch.uint8]),
'numpy': np.uint8,
'sparse': torch.sparse.ByteTensor,
'cpu': torch.ByteTensor,
'gpu': torch.cuda.ByteTensor},
{'desc': '8-bit integer (signed)',
'name': 'int8',
'types': set([torch.int8]),
'numpy': np.int8,
'sparse': torch.sparse.CharTensor,
'cpu': torch.CharTensor,
'gpu': torch.cuda.CharTensor},
{'desc': '16-bit integer (signed)',
'name': 'int16',
'types': set([torch.int16, torch.short]),
'numpy': np.int16,
'sparse': torch.sparse.ShortTensor,
'cpu': torch.ShortTensor,
'gpu': torch.cuda.ShortTensor},
{'desc': '32-bit integer (signed)',
'name': 'int32',
'types': set([torch.int32, torch.int]),
'numpy': np.int32,
'sparse': torch.sparse.IntTensor,
'cpu': torch.IntTensor,
'gpu': torch.cuda.IntTensor},
{'desc': '64-bit integer (signed)',
'name': 'int64',
'types': set([torch.int64, torch.long]),
'numpy': np.int64,
'sparse': torch.sparse.LongTensor,
'cpu': torch.LongTensor,
'gpu': torch.cuda.LongTensor},
{'desc': 'Boolean',
'name': 'bool',
'types': set([torch.bool]),
'numpy': bool,
'cpu': torch.BoolTensor,
'gpu': torch.cuda.BoolTensor}]
"""A list of dicts containig conversions between types."""
NAME_TO_TYPE = {t['name']: t for t in TYPES}
"""A map of type to metadata."""
FLOAT_TO_INT = {torch.float16: torch.int16,
torch.float32: torch.int32,
torch.float64: torch.int64}
INT_TO_FLOAT = {torch.int16: torch.float16,
torch.int32: torch.float32,
torch.int64: torch.float64}
FLOAT_TYPES = frozenset(FLOAT_TO_INT.keys())
INT_TYPES = frozenset(INT_TO_FLOAT.keys())
@classmethod
def all_types(self) -> List[dict]:
return self.TYPES
@classmethod
def types(self) -> Dict[str, List[dict]]:
if not hasattr(self, '_types'):
types = {}
for d in self.all_types():
for t in d['types']:
types[t] = d
self._types = types
return self._types
@classmethod
def type_from_string(self, type_name: str) -> torch.dtype:
types = self.NAME_TO_TYPE[type_name]['types']
return next(iter(types))
@classmethod
def get_tensor_class(self, torch_type: torch.dtype, cpu_type: bool) -> Type:
types = self.types()
entry = types[torch_type]
key = 'cpu' if cpu_type else 'gpu'
return entry[key]
@classmethod
def get_sparse_class(self, torch_type: torch.dtype) -> Type:
types = self.types()
entry = types[torch_type]
return entry['sparse']
@classmethod
def get_numpy_type(self, torch_type: torch.dtype) -> Type:
types = self.types()
entry = types[torch_type]
return entry['numpy']
@classmethod
def float_to_int(self, torch_type: torch.dtype) -> Type:
return self.FLOAT_TO_INT[torch_type]
@classmethod
def int_to_float(self, torch_type: torch.dtype) -> Type:
return self.INT_TO_FLOAT[torch_type]
@classmethod
def is_float(self, torch_type: torch.dtype) -> bool:
return torch_type in self.FLOAT_TYPES
@classmethod
def is_int(self, torch_type: Type) -> bool:
return torch_type in self.INT_TYPES | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/torchtype.py | torchtype.py |
__author__ = 'plandes'
from typing import Dict, Any, List, Type, Union
from dataclasses import dataclass, field
from enum import Enum, auto
import logging
import gc
import sys
import itertools as it
import copy as cp
from io import TextIOBase
from pathlib import Path
import pandas as pd
from zensols.persist import dealloc, Deallocatable, PersistedWork, persisted
from zensols.config import (
Writable, Configurable, ImportConfigFactory, DictionaryConfig
)
from zensols.cli import (
ApplicationError, Application, ApplicationFactory,
ActionCliManager, Invokable, CliHarness,
)
from zensols.datdesc import DataDescriber
from zensols.dataset import (
SplitStashContainer, StratifiedStashSplitKeyContainer
)
from zensols.deeplearn import DeepLearnError, TorchConfig, ModelSettings
from zensols.deeplearn.model import ModelFacade, ModelError, ModelPacker
from zensols.deeplearn.result import (
ModelResultManager, ModelResultReporter, PredictionsDataFrameFactory,
ModelResultComparer
)
logger = logging.getLogger(__name__)
class InfoItem(Enum):
"""Indicates what information to dump in
:meth:`.FacadeInfoApplication.print_information`.
"""
meta = auto()
param = auto()
model = auto()
config = auto()
batch = auto()
class ClearType(Enum):
"""Indicates what type of data to delete (clear).
"""
none = auto()
batch = auto()
source = auto()
@dataclass
class FacadeApplication(Deallocatable):
"""Base class for applications that use :class:`.ModelFacade`.
"""
CLI_META = {'mnemonic_excludes': {'get_cached_facade', 'create_facade',
'deallocate', 'clear_cached_facade'},
'option_overrides': {'model_path': {'long_name': 'model',
'short_name': None}}}
"""Tell the command line app API to igonore subclass and client specific use
case methods.
"""
config: Configurable = field()
"""The config used to create facade instances."""
facade_name: str = field(default='facade')
"""The client facade."""
# simply copy this field and documentation to the implementation class to
# add model path location (for those subclasses that don't have the
# ``CLASS_INSPECTOR`` class level attribute set (see
# :obj:`~zensols.util.introspect.inspect.ClassInspector.INSPECT_META`);
# this can also be set as a parameter such as with
# :methd:`.FacadeModelApplication.test`
model_path: Path = field(default=None)
"""The path to the model or use the last trained model if not provided.
"""
config_factory_args: Dict[str, Any] = field(default_factory=dict)
"""The arguments given to the :class:`~zensols.config.ImportConfigFactory`,
which could be useful for reloading all classes while debugingg.
"""
config_overwrites: Configurable = field(default=None)
"""A configurable that clobbers any configuration in :obj:`config` for those
sections/options set.
"""
cache_global_facade: bool = field(default=True)
"""Whether to globally cache the facade returned by
:meth:`get_cached_facade`.
"""
model_config_overwrites: Configurable = field(default=None)
"""Configuration that is injected into the model loaded by the
:class:`..model.ModelManager`.
"""
def __post_init__(self):
self.dealloc_resources = []
self._cached_facade = PersistedWork(
'_cached_facade', self,
cache_global=self.cache_global_facade)
def _enable_cli_logging(self, facade: ModelFacade = None):
if facade is None:
with dealloc(self.create_facade()) as facade:
self._enable_cli_logging(facade)
else:
facade.progress_bar = False
facade.configure_cli_logging()
def _get_model_path(self) -> Path:
"""Return the path to the model, which defaults to :obj:`model_path`."""
return self.model_path
def create_facade(self) -> ModelFacade:
"""Create a new instance of the facade."""
# we must create a new (non-shared) instance of the facade since it
# will get deallcated after complete.
config = self.config
model_path = self._get_model_path()
if self.config_overwrites is not None:
config = cp.deepcopy(config)
config.merge(self.config_overwrites)
if model_path is None:
cf = ImportConfigFactory(config, **self.config_factory_args)
facade: ModelFacade = cf.instance(self.facade_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created facade: {facade}')
self.dealloc_resources.extend((cf, facade))
else:
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading model from {model_path}')
mconf = ImportConfigFactory(config, **self.config_factory_args)
with dealloc(mconf) as cf:
cls: Type[ModelFacade] = cf.get_class(self.facade_name)
facade: ModelFacade = cls.load_from_path(
path=model_path,
model_config_overwrites=self.model_config_overwrites)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created facade: {type(facade)} ' +
f'from path: {model_path}')
self.dealloc_resources.append(facade)
return facade
@persisted('_cached_facade')
def get_cached_facade(self) -> ModelFacade:
"""Return a created facade that is cached in this application instance.
"""
return self.create_facade()
def clear_cached_facade(self):
"""Clear any cached facade this application instance.
"""
if self._cached_facade.is_set():
self._cached_facade().deallocate()
self._cached_facade.clear()
def deallocate(self):
super().deallocate()
self._try_deallocate(self.dealloc_resources, recursive=True)
self._cached_facade.deallocate()
@dataclass
class FacadeInfoApplication(FacadeApplication):
"""Contains methods that provide information about the model via the facade.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'mnemonic_overrides': {'print_information': 'info'},
'option_overrides': {'info_item': {'long_name': 'item',
'short_name': 'i'},
'debug_value': {'long_name': 'execlevel',
'short_name': None}}})
def print_information(self, info_item: InfoItem = None,
model_path: Path = None):
"""Output facade data set, vectorizer and other configuration
information.
:param info_item: what to print
:param model_path: the path to the model or use the last trained model
if not provided
"""
# see :class:`.FacadeApplicationFactory'
def write_batch():
for batch in it.islice(facade.batch_stash.values(), 2):
batch.write()
def write_model_config():
if self.model_path is not None:
# if the model path is given, we a facade model was created
facade.model_config.write()
else:
# otherwise, use whatever configuration was used in this app
facade.config.write()
# inspect a model specified by a path
self.model_path = model_path
if not hasattr(self, '_no_op'):
with dealloc(self.create_facade()) as facade:
print(f'{facade.model_settings.model_name}:')
fn_map = \
{None: facade.write,
InfoItem.meta: facade.batch_metadata.write,
InfoItem.param: facade.executor.write_settings,
InfoItem.model: facade.executor.write_model,
InfoItem.config: write_model_config,
InfoItem.batch: write_batch}
fn = fn_map.get(info_item)
if fn is None:
raise DeepLearnError(f'No such info item: {info_item}')
fn()
def debug(self, debug_value: int = None):
"""Debug the model.
:param debug_value: the executor debugging level
"""
debug_value = True if debug_value is None else debug_value
with dealloc(self.create_facade()) as facade:
facade.debug(debug_value)
@dataclass
class FacadeResultApplication(FacadeApplication):
"""Contains methods that dump previous results.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'mnemonic_overrides': {'result_summary': 'summary',
'result_ids': 'resids',
'metrics': 'results',
#'save_results': 'save',
'majority_label_metrics': 'majlab',
'compare_results': 'cmpres'},
'option_overrides': {'include_validation': {'long_name': 'validation',
'short_name': None},
'describe': {'short_name': None},
'out_file': {'long_name': 'outfile',
'short_name': 'o'}}})
def result_summary(self, out_file: Path = None,
include_validation: bool = False):
"""Create a summary of all archived results.
:param out_file: the output path
:param include_validation: whether to include validation results
"""
if out_file is None:
out_file = Path('result-summary.csv')
with dealloc(self.create_facade()) as facade:
rm: ModelResultManager = facade.result_manager
self._enable_cli_logging(facade)
reporter = ModelResultReporter(rm)
reporter.include_validation = include_validation
return reporter.dump(out_file)
def metrics(self, sort: str = 'wF1', res_id: str = None,
out_file: Path = None, describe: bool = False):
"""Write a spreadhseet of label performance metrics for a previously
trained and tested model.
:param sort_col: the column to sort results
:param res_id: the result ID or use the last if not given
:param out_file: the output path
:param describe: whether to create Zensols LaTeX ready results
"""
if describe:
if out_file is None:
out_file = Path('model-results')
with dealloc(self.create_facade()) as facade:
dd: DataDescriber = facade.get_described_results(res_id)
dd.output_dir = out_file
dd.save(include_excel=True)
else:
if out_file is None:
out_file = Path('metrics.csv')
with dealloc(self.create_facade()) as facade:
df: pd.DataFrame = facade.get_predictions_factory(name=res_id).\
metrics_dataframe.sort_values(sort, ascending=False).\
reset_index(drop=True)
df.to_csv(out_file)
self._enable_cli_logging(facade)
logger.info(f'wrote: {out_file}')
def result_ids(self):
"""Show all archived result IDs."""
with dealloc(self.create_facade()) as facade:
rm: ModelResultManager = facade.result_manager
print('\n'.join(rm.results_stash.keys()))
def result(self, res_id: str = None):
"""Show the last results.
:param res_id: the result ID or use the last if not given
"""
with dealloc(self.create_facade()) as facade:
df_fac: PredictionsDataFrameFactory = \
facade.get_predictions_factory(name=res_id)
df_fac.result.write()
def majority_label_metrics(self, res_id: str = None):
"""Show majority label metrics of the test dataset using a previous
result set.
:param res_id: the result ID or use the last if not given
"""
with dealloc(self.create_facade()) as facade:
pred_factory: PredictionsDataFrameFactory = \
facade.get_predictions_factory(name=res_id)
pred_factory.majority_label_metrics.write()
def compare_results(self, res_id_a: str, res_id_b: str):
"""Compare two previous archived result sets.
:param res_id_a: the first result ID to compare
:param res_id_b: the second result ID to compare
"""
with dealloc(self.create_facade()) as facade:
rm: ModelResultComparer = facade.result_manager
diff = ModelResultComparer(rm, res_id_a, res_id_b)
diff.write()
@dataclass
class FacadePackageApplication(FacadeApplication):
"""Contains methods that package models.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'mnemonic_overrides': {'pack_model': 'pack'},
'option_overrides': {'output_model_dir': {'long_name': 'modeldir'}},
'option_excludes': {'packer'}})
packer: ModelPacker = field(default=None)
"""The model packer used to create the model distributions from this app."""
def pack_model(self, res_id: str = None,
output_model_dir: Path = Path('.')):
"""Package a distribution model.
:param res_id: the result ID or use the last if not given
:param output_model_dir: the directory where the packaged model is
written
"""
if res_id is None:
with dealloc(self.create_facade()) as facade:
self._enable_cli_logging(facade)
res_id: str = facade.result_manager.get_last_id()
self._enable_cli_logging()
self.packer.pack(res_id, output_model_dir)
@dataclass
class FacadeBatchApplication(FacadeApplication):
"""Test, train and validate models.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'option_overrides':
{'clear_type': {'long_name': 'ctype',
'short_name': None},
'clear': {'short_name': None},
'split': {'short_name': None},
'limit': {'short_name': None}},
'mnemonic_overrides':
{'batch': {'option_includes': {'limit', 'clear_type', 'split'}}}})
def _write_batch_splits(self, facade: ModelFacade):
scont: SplitStashContainer = facade.batch_stash.split_stash_container
if hasattr(scont, 'split_container') and \
isinstance(scont.split_container, StratifiedStashSplitKeyContainer):
stash: StratifiedStashSplitKeyContainer = scont.split_container
stash.stratified_write = True
stash.write()
def batch(self, limit: int = None, clear_type: ClearType = ClearType.none,
split: bool = False):
"""Create batches if not already, print statistics on the dataset.
:param clear_type: what to delete to force recreate
:param limit: the number of batches to create
:param split: also write the stratified splits if available
"""
with dealloc(self.create_facade()) as facade:
self._enable_cli_logging(facade)
if clear_type == ClearType.batch:
logger.info('clearing batches')
facade.batch_stash.clear()
elif clear_type == ClearType.source:
facade.batch_stash.clear_all()
facade.batch_stash.clear()
facade.dataset_stash.write()
if split:
self._write_batch_splits(facade)
@dataclass
class FacadeModelApplication(FacadeApplication):
"""Test, train and validate models.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'option_overrides': {'use_progress_bar': {'long_name': 'progress',
'short_name': 'p'}},
'mnemonic_overrides': {'train_production': 'trainprod',
'early_stop': {'option_includes': {},
'name': 'stop'}}})
use_progress_bar: bool = field(default=False)
"""Display the progress bar."""
def create_facade(self) -> ModelFacade:
"""Create a new instance of the facade."""
facade = super().create_facade()
facade.progress_bar = self.use_progress_bar
facade.configure_cli_logging()
return facade
def train(self):
"""Train the model and dump the results, including a graph of the
train/validation loss.
"""
with dealloc(self.create_facade()) as facade:
facade.train()
facade.persist_result()
def test(self, model_path: Path = None):
"""Test an existing model the model and dump the results of the test.
:param model_path: the path to the model or use the last trained model
if not provided
"""
self.model_path = self._get_model_path()
with dealloc(self.create_facade()) as facade:
facade.test()
def train_test(self):
"""Train, test the model, then dump the results with a graph.
"""
with dealloc(self.create_facade()) as facade:
facade.train()
facade.test()
facade.persist_result()
def train_production(self):
"""Train, test the model on train and test datasets, then dump the
results with a graph.
"""
with dealloc(self.create_facade()) as facade:
facade.train_production()
facade.test()
facade.persist_result()
def early_stop(self):
"""Stops the execution of training the model.
"""
with dealloc(self.create_facade()) as facade:
facade.stop_training()
class FacadePredictApplication(FacadeApplication):
"""An applicaiton that provides prediction funtionality.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication, {'mnemonic_overrides':
{'predictions': {'name': 'preds'}}})
def predictions(self, res_id: str = None, out_file: Path = None):
"""Write predictions to a CSV file.
:param res_id: the result ID or use the last if not given
:param out_file: the output path
"""
with dealloc(self.create_facade()) as facade:
if out_file is None:
model_settings: ModelSettings = facade.executor.model_settings
model_name = model_settings.normal_model_name
out_file = Path(f'{model_name}.csv')
try:
df = facade.get_predictions(name=res_id)
except ModelError as e:
raise ApplicationError(
'Could not predict, probably need to train a model ' +
f'first: {e}') from e
df.to_csv(out_file)
self._enable_cli_logging(facade)
if logger.isEnabledFor(logging.INFO):
logger.info(f'wrote predictions: {out_file}')
@dataclass
class FacadeApplicationFactory(ApplicationFactory):
"""This is a utility class that creates instances of
:class:`.FacadeApplication`. It's only needed if you need to create a
facade without wanting invoke the command line attached to the
applications.
It does this by only invoking the first pass applications so all the
correct initialization happens before returning factory artifacts.
There mst be a :obj:`.FacadeApplication.facade_name` entry in the
configuration tied to an instance of :class:`.FacadeApplication`.
:see: :meth:`create_facade`
"""
def create_facade(self, args: List[str] = None,
app_args: Dict[str, Any] = None) -> ModelFacade:
"""Create the facade tied to the application without invoking the
command line.
:param args: the (would be) command line arguments used to create the
application
:param app_args: the arguments to set on the the facade application
after it is created and before it creates the facade
"""
create_args = ['info']
if args is not None:
create_args.extend(args)
app: Application = self.create(create_args)
inv: Invokable = app.invoke_but_second_pass()[1]
fac_app: FacadeApplication = inv.instance
if app_args is not None:
for k, v in app_args.items():
setattr(fac_app, k, v)
return fac_app.create_facade()
@dataclass
class FacadeApplicationManager(Writable):
"""A very high level client interface making it easy to configure and run
models from an interactive environment such as a Python REPL or a Jupyter
notebook (see :class:`.JupyterManager`)
"""
cli_harness: CliHarness = field()
"""The CLI harness used to create the facade application."""
cli_args_fn: List[str] = field(default=lambda: [])
"""Creates the arguments used to create the facade from the application
factory.
"""
reset_torch: bool = field(default=True)
"""Reset random state for consistency for each new created facade."""
allocation_tracking: Union[bool, str] = field(default=False)
"""Whether or not to track resource/memory leaks. If set to ``stack``, the
stack traces of the unallocated objects will be printed. If set to
``counts`` only the counts will be printed. If set to ``True`` only the
unallocated objects without the stack will be printed.
"""
logger_name: str = field(default='notebook')
"""The name of the logger to use for logging in the notebook itself."""
default_logging_level: str = field(default='WARNING')
"""If set, then initialize the logging system using this as the default
logging level. This is the upper case logging name such as ``WARNING``.
"""
progress_bar_cols: int = field(default=120)
"""The number of columns to use for the progress bar."""
config_overwrites: Dict[str, Dict[str, str]] = field(default_factory=dict)
"""Clobbers any configuration set by :meth:`config` for those
sections/options set.
"""
def __post_init__(self):
if self.allocation_tracking:
Deallocatable.ALLOCATION_TRACKING = True
if self.logger_name is not None:
self.logger = logging.getLogger(self.logger_name)
else:
self.logger = logger
self._facade = None
def _create_facade(self, args: List[str] = None,
app_args: Dict[str, Any] = None) -> ModelFacade:
"""Create the facade tied to the application without invoking the
command line.
:param args: the (would be) command line arguments used to create the
application
:param app_args: the arguments to set on the the facade application
after it is created and before it creates the facade
"""
create_args = ['info']
if args is not None:
create_args.extend(args)
fac_app: FacadeApplication = self.cli_harness.get_instance(create_args)
assert isinstance(fac_app, FacadeApplication)
if app_args is not None:
for k, v in app_args.items():
setattr(fac_app, k, v)
return fac_app.create_facade()
def cleanup(self, include_cuda: bool = True, quiet: bool = False):
"""Report memory leaks, run the Python garbage collector and optionally
empty the CUDA cache.
:param include_cuda: if ``True`` clear the GPU cache
:param quiet: do not report unallocated objects, regardless of the
setting of :obj:`allocation_tracking`
"""
if self.allocation_tracking and not quiet:
include_stack, only_counts = False, False
if self.allocation_tracking == 'stack':
include_stack, only_counts = True, False
elif self.allocation_tracking == 'counts':
include_stack, only_counts = False, True
include_stack = (self.allocation_tracking == 'stack')
Deallocatable._print_undeallocated(include_stack, only_counts)
self.deallocate()
Deallocatable._deallocate_all()
gc.collect()
if include_cuda:
# free up memory in the GPU
TorchConfig.empty_cache()
def deallocate(self):
"""Deallocate all resources in the CLI factory if it exists."""
if self._facade is not None:
if self.logger.isEnabledFor(logging.INFO):
self.logger.info('deallocating old factory')
self._facade.deallocate()
self._facade = None
def config(self, section: str, **kwargs):
"""Add overwriting configuration used when creating the facade.
:param section: the section to be overwritten (or added)
:param kwargs: the key/value pairs used as the section data to
overwrite
:see: :meth:`create_facade`
"""
if section not in self.config_overwrites:
self.config_overwrites[section] = {}
self.config_overwrites[section].update(kwargs)
def clear(self):
"""Clear all post create configuration set with :meth:`config`."""
self.config_overwrites.clear()
def create_facade(self, *args, **kwargs) -> ModelFacade:
"""Create and return a facade. This deallocates and cleans up state
from any previous facade creation as a side effect.
:param args: given to the :obj:`cli_args_fn` function to create
arguments passed to the CLI
"""
if len(self.config_overwrites) > 0:
dconf = DictionaryConfig(self.config_overwrites)
app_args = {'config_overwrites': dconf}
else:
app_args = None
self.deallocate()
# reclaim memory running GC and GPU cache clear
self.cleanup()
try:
# reset random state for consistency of each new test
if self.reset_torch:
TorchConfig.init()
# create a factory that instantiates Python objects
cli_args_fn = self.cli_args_fn(*args, **kwargs)
# create the facade used for this instance
self._facade: ModelFacade = self._create_facade(
cli_args_fn, app_args)
return self._facade
except Exception as e:
try:
# recover the best we can
self.cleanup(quiet=True)
self._facade = None
except Exception:
pass
raise DeepLearnError(f'Could not create facade: {e}') from e
@property
def facade(self) -> ModelFacade:
"""The current facade for this notebook instance.
:return: the existing facade, or that created by :meth:`create_facade`
if it doesn't already exist
"""
if self._facade is None:
self.create_facade()
self._facade.writer = None
return self._facade
def run(self, display_results: bool = True):
"""Train, test and optionally show results.
:param display_results: if ``True``, write and plot the results
"""
try:
facade = self.facade
facade.train()
facade.test()
if display_results:
facade.write_result()
facade.plot_result()
except Exception as e:
try:
facade = None
# recover the best we can
self.cleanup(quiet=True)
except Exception:
pass
raise DeepLearnError('Could not run the model') from e
def show_leaks(self, output: str = 'counts', fail: bool = True):
"""Show all resources/memory leaks in the current facade. First, this
deallocates the facade, then prints any lingering objects using
:class:`~zensols.persist.Deallocatable`.
**Important**: :obj:`allocation_tracking` must be set to ``True`` for
this to work.
:param output: one of ``stack``, ``counts``, or ``tensors``
:param fail: if ``True``, raise an exception if there are any
unallocated references found
"""
if self._facade is None:
raise DeepLearnError('No facade created yet')
if self.allocation_tracking:
self._facade.deallocate()
if output == 'counts':
Deallocatable._print_undeallocated(only_counts=True, fail=fail)
elif output == 'stack':
Deallocatable._print_undeallocated(include_stack=True, fail=fail)
elif output == 'tensors':
TorchConfig.write_in_memory_tensors()
else:
raise DeepLearnError(f'Unknown output type: {output}')
self._facade = None
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_model=False, include_metadata=False,
include_settings=False, **kwargs):
self.facade.write(
depth, writer,
include_model=include_model,
include_metadata=include_metadata,
include_settings=include_settings,
**kwargs)
@dataclass
class JupyterManager(FacadeApplicationManager):
"""A facade application manager that provides additional convenience
functionality.
"""
reduce_logging: bool = field(default=False)
"""Whether to disable most information logging so the progress bar is more
prevalent.
"""
browser_width: int = field(default=95)
"""The width of the browser windows as a percentage."""
def __post_init__(self):
super().__post_init__()
if self.browser_width is not None:
self.set_browser_width(self.browser_width)
@staticmethod
def set_browser_width(width: int = 95):
"""Use the entire width of the browser to create more real estate.
:param width: the width as a percent (``[0, 100]``) to use as the width
in the notebook
"""
from IPython.core.display import display, HTML
html = f'<style>.container {{ width:{width}% !important; }}</style>'
display(HTML(html))
def _init_jupyter(self):
"""Initialize the a Jupyter notebook by configuring the logging system
and setting the progress bar.
"""
if self.reduce_logging:
logging.getLogger('zensols.deeplearn.model.executor.status').\
setLevel(logging.WARNING)
else:
log_level = None
if self.default_logging_level is not None:
log_level = getattr(logging, self.default_logging_level)
# set console based logging
self.facade.configure_jupyter(
log_level=log_level,
progress_bar_cols=self.progress_bar_cols)
def create_facade(self, *args, **kwargs) -> ModelFacade:
facade = super().create_facade(*args, **kwargs)
# initialize jupyter
self._init_jupyter()
return facade | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/cli/app.py | app.py |
__author__ = 'Paul Landes'
import logging
from typing import Tuple
from dataclasses import dataclass, InitVar
import pandas as pd
import torch
from zensols.deeplearn.batch import (
BatchError,
BatchFeatureMapping,
BatchStash,
DataPoint,
Batch,
)
from zensols.deeplearn.dataframe import DataframeFeatureVectorizerManager
logger = logging.getLogger(__name__)
@dataclass
class DataframeBatchStash(BatchStash):
"""A stash used for batches of data using :class:`.DataframeBatch` instances.
This stash uses an instance of :class:`.DataframeFeatureVectorizerManager`
to vectorize the data in the batches.
"""
@property
def feature_vectorizer_manager(self) -> DataframeFeatureVectorizerManager:
managers = tuple(self.vectorizer_manager_set.values())
if len(managers) != 1:
raise BatchError('Exected only one vector manager but got: ' +
tuple(self.vectorizer_manager_set.keys()))
vec_mng = managers[0]
if not isinstance(vec_mng, DataframeFeatureVectorizerManager):
raise BatchError(
'Expected class of type DataframeFeatureVectorizerManager ' +
f'but got {vec_mng.__class__}')
return vec_mng
@property
def label_shape(self) -> Tuple[int]:
return self.feature_vectorizer_manager.label_shape
@property
def flattened_features_shape(self) -> Tuple[int]:
vec_mng = self.feature_vectorizer_manager
return vec_mng.get_flattened_features_shape(self.decoded_attributes)
@dataclass
class DataframeDataPoint(DataPoint):
"""A data point used in a batch, which contains a single row of data in the
Pandas dataframe. When created, column is saved as an attribute in the
instance.
"""
row: InitVar[pd.Series]
def __post_init__(self, row: pd.Series):
for name, val in row.iteritems():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting attrib: {name}={val}')
setattr(self, name, val)
@dataclass
class DataframeBatch(Batch):
"""A batch of data that contains instances of :class:`.DataframeDataPoint`,
each of which has the row data from the dataframe.
"""
def _get_batch_feature_mappings(self) -> BatchFeatureMapping:
"""Use the dataframe based vectorizer manager
"""
df_vec_mng: DataframeFeatureVectorizerManager = \
self.batch_stash.feature_vectorizer_manager
return df_vec_mng.batch_feature_mapping
def get_features(self) -> torch.Tensor:
"""A utility method to a tensor of all features of all columns in the
datapoints.
:return: a tensor of shape (batch size, feature size), where the
*feaure size* is the number of all features vectorized; that
is, a data instance for each row in the batch, is a flattened
set of features that represent the respective row from the
dataframe
"""
def magic_shape(name: str) -> torch.Tensor:
"""Return a tensor that has two dimenions of the data (the first always with
size 1 since it is a row of data).
"""
arr = attrs[name]
if len(arr.shape) == 1:
arr = arr.unsqueeze(dim=1)
return arr
attrs = self.attributes
label_attr = self._get_batch_feature_mappings().label_attribute_name
attr_names = filter(lambda k: k != label_attr, attrs.keys())
feats = tuple(map(magic_shape, attr_names))
return torch.cat(feats, dim=1) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/dataframe/batch.py | batch.py |
__author__ = 'Paul Landes'
import logging
from typing import Tuple, Dict, List, Iterable, Set
from dataclasses import dataclass, field
import sys
from io import TextIOBase
from itertools import chain
from functools import reduce
import operator
import numpy as np
from zensols.persist import persisted
from zensols.config import Writable
from zensols.deeplearn.vectorize import (
VectorizerError,
OneHotEncodedEncodableFeatureVectorizer,
AttributeEncodableFeatureVectorizer,
FeatureVectorizer,
FeatureVectorizerManager,
)
from zensols.deeplearn.batch import (
BatchFeatureMapping,
ManagerFeatureMapping,
FieldFeatureMapping,
)
from zensols.dataframe import DataframeStash
logger = logging.getLogger(__name__)
@dataclass
class DataframeMetadata(Writable):
"""Metadata for a Pandas dataframe.
"""
prefix: str = field()
"""The prefix to use for all vectorizers in the dataframe (i.e. ``adl_``
for the Adult dataset test case example).
"""
label_col: str = field()
"""The column that contains the label/class."""
label_values: Tuple[str] = field()
"""All classes (unique across ``label_col``)."""
continuous: Tuple[str] = field()
"""The list of data columns that are continuous."""
descrete: Dict[str, Tuple[str]] = field()
"""A mapping of label to nominals the column takes for descrete mappings.
"""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
sp = self._sp(depth)
sp2 = self._sp(depth + 1)
sp3 = self._sp(depth + 2)
writer.write(f'{sp}label: {self.label_col} => ' +
f'{", ".join(self.label_values)}\n')
writer.write(f'{sp}continuous:\n')
for c in self.continuous:
writer.write(f'{sp2}{c}\n')
writer.write(f'{sp}discrete:\n')
for name, labels in self.descrete.items():
writer.write(f'{sp2}{name}:\n')
for label in labels:
writer.write(f'{sp3}{label}\n')
@dataclass
class DataframeFeatureVectorizerManager(FeatureVectorizerManager, Writable):
"""A pure instance based feature vectorizer manager for a Pandas dataframe.
All vectorizers used in this vectorizer manager are dynamically allocated
and attached.
This class not only acts as the feature manager itself to be used in a
:class:`~zensols.deeplearn.vectorize.FeatureVectorizerManager`, but also
provides a batch mapping to be used in a
:class:`~zensols.deeplearn.batch.BatchStash`.
"""
prefix: str = field()
"""The prefix to use for all vectorizers in the dataframe (i.e. ``adl_``
for the Adult dataset test case example).
"""
label_col: str = field()
"""The column that contains the label/class."""
stash: DataframeStash = field()
"""The stash that contains the dataframe."""
include_columns: Tuple[str] = field(default=None)
"""The columns to be included, or if ``None`` (the default), all columns
are used as features.
"""
exclude_columns: Tuple[str] = field(default=None)
"""The columns to be excluded, or if ``None`` (the default), no columns are
excluded as features.
"""
@property
@persisted('_dataset_metadata')
def dataset_metadata(self) -> DataframeMetadata:
"""Create a metadata from the data in the dataframe.
"""
logger.debug('constructing metadata')
df = self.stash.dataframe
skip = set([self.stash.split_col, self.label_col])
labels = tuple(df[self.label_col].unique())
cont = set()
desc = {}
for name, dtype in df.dtypes.iteritems():
if name in skip:
continue
if dtype == np.object:
desc[name] = tuple(df[name].unique())
else:
cont.add(name)
return DataframeMetadata(
self.prefix, self.label_col, labels, cont, desc)
@property
def label_attribute_name(self) -> str:
"""Return the label attribute.
"""
return f'{self.prefix}label'
def column_to_feature_id(self, col: str) -> str:
"""Generate a feature id from the column name. This just attaches the prefix
to the column name.
"""
return f'{self.prefix}{col}'
def _filter_columns(self, cols: Tuple[str]) -> Iterable[str]:
"""Return an interable of the columns to use as features based on
``include_columns`` and ``exclude_columns``.
"""
def inc_vec(col: str):
inc = incs is None or col in incs
exc = excs is not None and col in excs
return inc and not exc
incs = self.include_columns
excs = self.exclude_columns
return filter(inc_vec, cols)
def _create_label_vectorizer(self) -> FeatureVectorizer:
"""Create a vectorizer for the label/class of the dataframe.
"""
label_col = self.label_attribute_name
label_values = self.dataset_metadata.label_values
logger.debug(f'creating label {label_col} => {label_values}')
return OneHotEncodedEncodableFeatureVectorizer(
name=str(self.__class__),
config_factory=self.config_factory,
manager=self,
feature_id=label_col,
categories=label_values,
optimize_bools=False)
def _create_feature_vectorizers(self) -> List[FeatureVectorizer]:
"""Create a vectorizer, one for each column/feature, included as a feature
type based on :meth:`_filter_columns`.
"""
vecs = []
meta = self.dataset_metadata
for col in meta.continuous:
vec = AttributeEncodableFeatureVectorizer(
manager=self,
name=str(self.__class__),
config_factory=self.config_factory,
feature_id=self.column_to_feature_id(col))
vecs.append(vec)
for col in meta.descrete.keys():
vec = OneHotEncodedEncodableFeatureVectorizer(
manager=self,
name=str(self.__class__),
config_factory=self.config_factory,
feature_id=self.column_to_feature_id(col),
categories=meta.descrete[col],
optimize_bools=True)
vecs.append(vec)
return vecs
def _create_vectorizers(self) -> Dict[str, FeatureVectorizer]:
"""Create a mapping of feature id to vectorizer used across all dataframe
columsn.
"""
logger.debug('create vectorizers')
vectorizers = super()._create_vectorizers()
vecs = [self._create_label_vectorizer()]
vecs.extend(self._create_feature_vectorizers())
for vec in vecs:
logger.debug(f'adding vectorizer: {vec.feature_id}')
vectorizers[vec.feature_id] = vec
return vectorizers
@property
@persisted('_batch_feature_mapping')
def batch_feature_mapping(self) -> BatchFeatureMapping:
"""Return the mapping for :class:`zensols.deeplearn.batch.Batch` instances.
"""
def create_fileld_mapping(col: str) -> FieldFeatureMapping:
feature_id = self.column_to_feature_id(col)
return FieldFeatureMapping(col, feature_id, True)
meta = self.dataset_metadata
cols = (meta.continuous, meta.descrete.keys())
fields = list(map(create_fileld_mapping,
chain.from_iterable(
map(self._filter_columns, cols))))
fields.append(FieldFeatureMapping(
self.label_col, self.label_attribute_name, True))
return BatchFeatureMapping(
self.label_col,
[ManagerFeatureMapping(self.name, fields)])
@property
def label_shape(self) -> Tuple[int]:
"""Return the shape if all vectorizers were used.
"""
label_attr = self.batch_feature_mapping.label_feature_id
for k, v in self.items():
if k == label_attr:
return (sum(filter(lambda n: n > 0, v.shape)),)
def get_flattened_features_shape(self, attribs: Set[str]) -> Tuple[int]:
"""Return the shape if all vectorizers were used.
"""
bmapping = self.batch_feature_mapping
label_feature_id = bmapping.label_feature_id
n_flat_neurons = 0
for feature_id, v in self.items():
_, field_map = bmapping.get_field_map_by_feature_id(feature_id)
if field_map is None:
s = f'no feature: {feature_id} in vectorizer {self.name}'
raise VectorizerError(s)
attr = field_map.attr
if feature_id != label_feature_id and \
(attribs is None or attr in attribs):
n = reduce(operator.mul, filter(lambda n: n > 0, v.shape))
n_flat_neurons += n
return (n_flat_neurons,)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
sp = self._sp(depth)
sp2 = self._sp(depth + 1)
writer.write(f'{sp}{self.name}:\n')
writer.write(f'{sp2}included: {self.include_columns}\n')
writer.write(f'{sp2}excluded: {self.exclude_columns}\n')
writer.write(f'{sp2}batch feature metadata:\n')
self.batch_feature_mapping.write(depth + 2, writer) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/dataframe/vectorize.py | vectorize.py |
__author__ = 'Paul Landes'
from typing import List, Tuple
from dataclasses import dataclass, field
import logging
from pathlib import Path
import math
import matplotlib.pyplot as plt
import numpy as np
from zensols.deeplearn import DatasetSplitType
from . import ModelResult
logger = logging.getLogger(__name__)
@dataclass
class ModelResultGrapher(object):
"""Graphs the an instance of ``ModelResult``. This creates subfigures,
one for each of the results given as input to ``plot``.
:param name: the name that goes in the title of the graph
:param figsize: the size of the top level figure (not the panes)
:param split_types: the splits to graph (list of size 2); defaults to
``[DatasetSplitType.train, DatasetSplitType.validation]``
:param title: the title format used to create each sub pane graph.
:see: plot
"""
name: str = field(default=None)
figsize: Tuple[int, int] = (15, 5)
split_types: List[DatasetSplitType] = None
title: str = None
save_path: Path = field(default=None)
def __post_init__(self):
if self.split_types is None:
self.split_types = [DatasetSplitType.train,
DatasetSplitType.validation]
if self.title is None:
self.title = ('Figure {r.name} ' +
'(lr={learning_rate:e}, ' +
'{r.last_test.converged_epoch.metrics})')
def _render_title(self, cont: ModelResult) -> str:
lr = cont.model_settings['learning_rate']
return self.title.format(**{'r': cont, 'learning_rate': lr})
def plot(self, containers: List[ModelResult]):
name = containers[0].name if self.name is None else self.name
ncols = min(2, len(containers))
nrows = math.ceil(len(containers) / ncols)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'plot grid: {nrows} X {ncols}')
fig, axs = plt.subplots(
ncols=ncols, nrows=nrows, sharex=True, figsize=self.figsize)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'ax type: {type(axs)}')
if not isinstance(axs, np.ndarray):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('adding axs dim')
axs = np.array([[axs]])
if axs.shape == (ncols,):
axs = np.expand_dims(axs, axis=0)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'ax shape: {axs.shape}')
fig.suptitle(f'Training and Validation Learning Rates: {name}')
handles = []
row = 0
col = 0
for i, cont in enumerate(containers):
logger.debug(f'plotting {cont}')
es = tuple(
map(lambda n: (n.name.capitalize(), cont.dataset_result[n]),
self.split_types))
x = range(len(es[0][1].losses))
ax = axs[row][col]
ax.plot(x, es[0][1].losses, color='r', label=es[0][0])
ax.plot(x, es[1][1].losses, color='b', label=es[1][0])
ax.set_title(self._render_title(cont))
handles.append(ax)
ax.set(xlabel='Epochs', ylabel='Loss')
col += 1
if col == ncols:
col = 0
row += 1
plt.legend(tuple(map(lambda e: e[0], es)))
def show(self):
plt.show()
def save(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'saving results graph to {self.save_path}')
plt.savefig(self.save_path)
plt.close() | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/result/plot.py | plot.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
List, Dict, Set, Iterable, Any, Type, Tuple, Callable, ClassVar
)
from dataclasses import dataclass, field, InitVar
from enum import Enum
from abc import ABCMeta, abstractmethod
import logging
import sys
import copy as cp
from collections import OrderedDict
from itertools import chain
from datetime import datetime
from io import TextIOBase
import math
import sklearn.metrics as mt
import numpy as np
from torch import Tensor
from zensols.config import Configurable, Dictable
from zensols.deeplearn import (
DeepLearnError, DatasetSplitType, ModelSettings, NetworkSettings
)
from zensols.deeplearn.batch import Batch
logger = logging.getLogger(__name__)
class ModelResultError(DeepLearnError):
""""Thrown when results can not be compiled or computed."""
pass
class NoResultError(ModelResultError):
"""Convenience used for helping debug the network.
"""
def __init__(self, cls: Type):
super().__init__(f'{cls}: no results available')
class ModelType(Enum):
"""The type of model give by the type of its output.
"""
PREDICTION = 0
CLASSIFICTION = 1
RANKING = 2
@dataclass
class Metrics(Dictable):
"""A container class that provides results for data stored in a
:class:`.ResultsContainer`.
"""
labels: np.ndarray = field(repr=False)
"""The labels or ``None`` if none were provided (i.e. during
test/evaluation).
"""
predictions: np.ndarray = field(repr=False)
"""The predictions from the model. This also flattens the predictions in to
a 1D array for the purpose of computing metrics.
"""
@property
def contains_results(self) -> bool:
"""Return ``True`` if this container has results.
"""
return len(self) > 0
def _protect(self, fn: Callable):
if self.contains_results:
return fn()
else:
return math.nan
def __len__(self) -> int:
shape = self.predictions.shape
assert len(shape) == 1
return shape[0]
@dataclass
class PredictionMetrics(Metrics):
"""Real valued prediction results for :obj:`.ModelType.PREDICTION` result.
"""
@property
def root_mean_squared_error(self) -> float:
"""Return the root mean squared error metric.
"""
return self._protect(lambda: math.sqrt(
mt.mean_squared_error(self.labels, self.predictions)))
@property
def mean_absolute_error(self) -> float:
"""Return the mean absolute error metric.
"""
return self._protect(
lambda: mt.mean_absolute_error(self.labels, self.predictions))
@property
def r2_score(self) -> float:
"""Return the R^2 score metric.
"""
return self._protect(
lambda: mt.r2_score(self.labels, self.predictions))
@property
def correlation(self) -> float:
"""Return the correlation metric.
"""
return self._protect(
lambda: np.corrcoef(self.labels, self.predictions)[0][1])
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return (('rmse', 'root_mean_squared_error'),
('mae', 'mean_absolute_error'),
('r2', 'r2_score'),
('correlation', 'correlation'))
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'RMSE: {self.root_mean_squared_error:.3f}',
depth, writer)
self._write_line(f'MAE: {self.mean_absolute_error:.3f}', depth, writer)
self._write_line(f'R^2: {self.r2_score:.3f}', depth, writer)
self._write_line(f"correlation: {self.correlation:.3f}", depth, writer)
def __str__(self):
return (f'rmse: {self.root_mean_squared_error:.3f}, ' +
f'mae: {self.mean_absolute_error:.3f}, ' +
f'r2: {self.r2_score:.3f}, ' +
f'corr: {self.correlation:.3f}')
@dataclass
class ScoreMetrics(Metrics):
"""Classification metrics having an f1, precision and recall for a
configured weighted, micro or macro :obj:`average`.
"""
average: str = field()
"""The type of average to apply to metrics produced by this class, which is
one of ``macro`` or ``micro``.
"""
@property
def f1(self) -> float:
"""Return the F1 metric as either the micro or macro based on the
:obj:`average` attribute.
"""
return self._protect(lambda: mt.f1_score(
self.labels, self.predictions, average=self.average))
@property
def precision(self) -> float:
"""Return the precision metric as either the micro or macro based on the
:obj:`average` attribute.
"""
return self._protect(
lambda: mt.precision_score(
self.labels, self.predictions, average=self.average,
# clean up warning for tests: sklearn complains with
# UndefinedMetricWarning even though the data looks good
zero_division=0))
@property
def recall(self) -> float:
"""Return the recall metric as either the micro or macro based on the
:obj:`average` attribute.
"""
return self._protect(lambda: mt.recall_score(
self.labels, self.predictions, average=self.average))
@property
def long_f1_name(self) -> str:
return f'{self.average}-F1'
@property
def short_f1_name(self) -> str:
name = 'm' if self.average == 'micro' else 'M'
return f'{name}F1'
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return self._split_str_to_attributes('f1 precision recall')
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'{self.average}: ' +
f'F1: {self.f1:.3f}, ' +
f'precision: {self.precision:.3f}, ' +
f'recall: {self.recall:.3f}', depth, writer)
def __str__(self):
return f'{self.short_f1_name}: {self.f1:.3f}'
@dataclass
class ClassificationMetrics(Metrics):
"""Real valued prediction results for :obj:`.ModelType.CLASSIFICATION`
result.
"""
n_outcomes: int = field()
"""The number of outcomes given for this metrics set."""
def _predictions_empty(self):
if self.__len__() == 0:
return np.NaN
@property
def accuracy(self) -> float:
"""Return the accuracy metric (num correct / total).
"""
return self._protect(
lambda: mt.accuracy_score(self.labels, self.predictions))
@property
def n_correct(self) -> int:
"""The number or correct predictions for the classification.
"""
is_eq = np.equal(self.labels, self.predictions)
return self._protect(lambda: np.count_nonzero(is_eq == True))
def create_metrics(self, average: str) -> ScoreMetrics:
"""Create a score metrics with the given average.
"""
return ScoreMetrics(self.labels, self.predictions, average)
@property
def micro(self) -> ScoreMetrics:
"""Compute micro F1, precision and recall.
"""
return self.create_metrics('micro')
@property
def macro(self) -> ScoreMetrics:
"""Compute macro F1, precision and recall.
"""
return self.create_metrics('macro')
@property
def weighted(self) -> ScoreMetrics:
"""Compute weighted F1, precision and recall.
"""
return self.create_metrics('weighted')
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return self._split_str_to_attributes(
'accuracy n_correct micro macro')
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
if self.n_outcomes == 0:
self._write_line('no results', depth, writer)
else:
self._write_line(f'accuracy: {self.accuracy:.3f} ' +
f'({self.n_correct}/{self.n_outcomes})',
depth, writer)
self.micro.write(depth, writer)
self.macro.write(depth, writer)
self.weighted.write(depth, writer)
def __str__(self):
return str(self.micro)
@dataclass
class ResultsContainer(Dictable, metaclass=ABCMeta):
"""The base class for all metrics containers. It helps in calculating loss,
finding labels, predictions and other utility helpers.
Every container has a start and stop time, which demarcates the duration
the for which the populated metrics were being calculated.
"""
FLOAT_TYPES = [np.float32, np.float64, float]
"""Used to determin the :obj:`model_type`."""
def __post_init__(self):
super().__init__()
self.start_time = None
self.end_time = None
@property
def is_started(self) -> bool:
"""The time at which processing started for the metrics populated in
this container.
:see: meth:`start`
"""
return self.start_time is not None
@property
def is_ended(self) -> bool:
"""The time at which processing ended for the metrics populated in this
container.
:see: meth:`end`
"""
return self.end_time is not None
def start(self) -> datetime:
"""Record the time at which processing started for the metrics populated
in this container.
:see: obj:`is_started`
"""
if self.start_time is not None:
raise ModelResultError(
f'Container has already tarted: {self}')
if self.contains_results:
raise ModelResultError(f'Container {self} already has results')
self.start_time = datetime.now()
return self.start_time
def end(self) -> datetime:
"""Record the time at which processing started for the metrics populated
in this container.
:see: obj:`is_ended`
"""
if self.start_time is None:
raise ModelResultError(f'Container has not yet started: {self}')
self._assert_finished(False)
self.end_time = datetime.now()
return self.end_time
def _assert_finished(self, should_be: bool):
"""Make sure we've either finished or not based on ``should_be``."""
if should_be:
if not self.is_ended:
raise ModelResultError(f'Container is not finished: {self}')
else:
if self.is_ended:
raise ModelResultError(
f'Container has finished: {self}')
def clone(self) -> ResultsContainer:
"""Return a clone of the current container. Sub containers (lists) are
deep copied in sub classes, but everything is shallow copied.
This is needed to create a temporary container to persist whose
:meth:`end` gets called by the
:class:`~zensols.deeplearn.model.ModelExecutor`.
"""
return cp.copy(self)
@property
def contains_results(self):
"""``True`` if this container has results.
"""
return len(self) > 0
@property
def min_loss(self) -> float:
"""The lowest loss recorded in this container.
"""
self._assert_finished(True)
return min(self.losses)
@property
def max_loss(self) -> float:
"""The highest loss recorded in this container.
"""
self._assert_finished(True)
return max(self.losses)
@property
def ave_loss(self) -> float:
"""The average loss of this result set.
"""
self._assert_finished(True)
losses = self.losses
d = len(losses)
return (sum(losses) / d) if d > 0 else 0
@property
def n_outcomes(self) -> int:
"""The number of outcomes.
"""
return self.predictions.shape[0]
@property
def n_iterations(self) -> int:
"""The number of iterations, which is different from the
:obj:`n_outcomes` since a single (say training) iteration can produce
multiple outcomes (for example sequence classification).
"""
return self._get_iterations()
@property
def model_type(self) -> ModelType:
"""The type of the model based on what whether the outcome data is a
float or integer.
"""
arr = self.predictions
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'outcomes type: {arr.dtype}')
if arr.dtype in self.FLOAT_TYPES:
return ModelType.PREDICTION
else:
return ModelType.CLASSIFICTION
@abstractmethod
def _get_labels(self) -> np.ndarray:
pass
@abstractmethod
def _get_predictions(self) -> np.ndarray:
pass
@abstractmethod
def _get_iterations(self) -> int:
pass
@property
def labels(self) -> np.ndarray:
"""The labels or ``None`` if none were provided (i.e. during
test/evaluation).
"""
self._assert_finished(True)
return self._get_labels()
@property
def predictions(self) -> np.ndarray:
"""The predictions from the model. This also flattens the predictions
in to a 1D array for the purpose of computing metrics.
:return: the flattened predictions
"""
self._assert_finished(True)
return self._get_predictions()
@property
def prediction_metrics(self) -> PredictionMetrics:
"""Return prediction based metrics.
"""
return PredictionMetrics(self.labels, self.predictions)
@property
def classification_metrics(self) -> ClassificationMetrics:
"""Return classification based metrics.
"""
return ClassificationMetrics(
self.labels, self.predictions, self.n_outcomes)
@property
def metrics(self) -> Metrics:
"""Return the metrics based on the :obj:`model_type`.
"""
mtype = self.model_type
if mtype == ModelType.CLASSIFICTION:
metrics = self.classification_metrics
elif mtype == ModelType.PREDICTION:
metrics = self.prediction_metrics
else:
raise ModelResultError(f'Unknown or unsupported tupe: {mtype}')
return metrics
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return self._split_str_to_attributes('n_outcomes metrics')
def __str__(self):
return (f'{self.__class__.__name__}: ' +
f'start: {self.start_time}, end: {self.end_time}')
def __repr__(self):
return self.__str__()
@dataclass
class EpochResult(ResultsContainer):
"""Contains results recorded from an epoch of a neural network model. This
is during a training/validation or test cycle.
Note that there is a terminology difference between what the model and the
result set call outcomes. For the model, outcomes are the mapped/refined
results, which are usually the argmax of the softmax of the logits. For
results, these are the predictions of the given data to be compared against
the gold labels.
"""
_RES_ARR_NAMES = 'label pred'.split()
index: int = field()
"""The Nth epoch of the run (across training, validation, test)."""
split_type: DatasetSplitType = field()
"""The name of the split type (i.e. ``train`` vs ``test``)."""
batch_losses: List[float] = field(default_factory=list)
"""The losses generated from each iteration of the epoch."""
batch_ids: List[int] = field(default_factory=list)
"""The ID of the batch from each iteration of the epoch."""
n_data_points: List[int] = field(default_factory=list)
"""The number of data points for each batch for the epoch."""
def __post_init__(self):
super().__post_init__()
self._predictions = []
self._labels = []
self._outputs = []
def update(self, batch: Batch, loss: Tensor, labels: Tensor, preds: Tensor,
outputs: Tensor):
"""Add another set of statistics, predictions and gold labels to
:obj:`prediction_updates`.
:param batch: the batch on which the stats/data where trained, tested
or validated; used to update the loss as a multiplier on
its size
:param loss: the loss returned by the loss function
:param labels: the gold labels, or ``None`` if this is a prediction run
:param preds: the predictions, or ``None`` for scored models (see
:obj:`prediction_updates`)
"""
self._assert_finished(False)
shape = preds.shape if labels is None else labels.shape
assert shape is not None
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{self.index}:{self.split_type}: ' +
f'update batch: {batch.id}, ' +
f'label_shape: {shape}')
# object function loss; 'mean' is the default 'reduction' parameter for
# loss functions; we can either muliply it back out or use 'sum' in the
# criterion initialize
if loss is None:
self.batch_losses.append(-1)
else:
self.batch_losses.append(loss.item() * float(batch.size()))
# batches are always the first dimension
self.n_data_points.append(shape[0])
# add predictions that exist
if preds is not None:
self._predictions.append(preds.numpy())
# see end() comments: without predictions, labels are useless
if labels is not None:
self._labels.append(labels.numpy())
if outputs is not None:
self._outputs.append(outputs.numpy())
self.batch_ids.append(batch.id)
def end(self):
super().end()
labs = preds = None
# if there are no predictions (the case from the training phase), don't
# include any data since labels by themselves are useless for all use
# cases (metrics, scoring, certainty assessment, and any analysis etc)
if len(self._predictions) > 0:
if len(self._labels) > 0:
labs = tuple(map(lambda arr: arr.flatten(), self._labels))
labs = np.concatenate(labs, axis=0)
preds = tuple(map(lambda arr: arr.flatten(), self._predictions))
preds = np.concatenate(preds, axis=0)
if labs is None:
labs = np.array([], dtype=np.int64)
if preds is None:
preds = np.array([], dtype=np.int64)
self._all_labels = labs
self._all_predictions = preds
def clone(self) -> ResultsContainer:
cl = cp.copy(self)
for attr in 'batch_losses batch_ids n_data_points'.split():
setattr(cl, attr, list(getattr(self, attr)))
return cl
@property
def batch_predictions(self) -> List[np.ndarray]:
"""The batch predictions given in the shape as output from the model.
"""
return self._predictions
@property
def batch_labels(self) -> List[np.ndarray]:
"""The batch labels given in the shape as output from the model.
"""
return self._labels
@property
def batch_outputs(self) -> List[np.ndarray]:
return self._outputs
def _get_labels(self) -> np.ndarray:
return self._all_labels
def _get_predictions(self) -> np.ndarray:
return self._all_predictions
def _get_iterations(self) -> int:
return int(self.batch_losses)
@property
def losses(self) -> List[float]:
"""Return the loss for each epoch of the run. If used on a
``EpocResult`` it is the Nth iteration.
"""
return self.batch_losses
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return chain.from_iterable(
(super()._get_dictable_attributes(),
self._split_str_to_attributes('index')))
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_metrics: bool = False):
bids = ','.join(self.batch_ids)
dps = ','.join(map(str, self.n_data_points))
self._write_line(f'index: {self.index}', depth, writer)
self._write_line(f'batch IDs: {bids}', depth, writer, True)
self._write_line(f'data point count per batch: {dps}',
depth, writer, True)
if include_metrics:
self._write_line('metrics:', depth, writer)
self._write_dict(self.asdict()['metrics'], depth + 1, writer)
def __len__(self):
return len(self.batch_ids)
def __str__(self):
s = super().__str__()
return f'{s}, type: {self.split_type}'
@dataclass
class DatasetResult(ResultsContainer):
"""Contains results for a dataset, such as training, validating and test.
"""
def __post_init__(self):
super().__post_init__()
self._results: List[EpochResult] = []
def append(self, epoch_result: EpochResult):
self._assert_finished(False)
self._results.append(epoch_result)
@property
def results(self) -> List[EpochResult]:
return self._results
@property
def contains_results(self):
return any(map(lambda r: r.contains_results, self.results))
def end(self):
super().end()
if self.contains_results:
self.start_time = self.results[0].start_time
self.end_time = self.results[-1].end_time
def clone(self) -> ResultsContainer:
cl = cp.copy(self)
cl._results = []
for er in self.results:
cl._results.append(er.clone())
return cl
@property
def losses(self) -> List[float]:
"""Return the loss for each epoch of the run. If used on a
``EpocResult`` it is the Nth iteration.
"""
return tuple(map(lambda r: r.ave_loss, self.results))
def _cat_arrs(self, attr: str) -> np.ndarray:
arrs = tuple(map(lambda r: getattr(r, attr), self.results))
return np.concatenate(arrs, axis=0)
def _get_labels(self) -> np.ndarray:
arrs = tuple(map(lambda r: r.labels, self.results))
return np.concatenate(arrs, axis=0)
def _get_predictions(self) -> np.ndarray:
arrs = tuple(map(lambda r: r.predictions, self.results))
return np.concatenate(arrs, axis=0)
def _get_iterations(self) -> int:
return sum(map(lambda er: len(er.losses), self._results))
@property
def convergence(self) -> int:
"""Return the Nth epoch index this result set convergened. If used on a
``EpocResult`` it is the Nth iteration.
"""
losses = self.losses
lowest = min(losses)
return losses.index(lowest)
@property
def converged_epoch(self) -> EpochResult:
"""Return the last epoch that arrived at the lowest loss.
"""
idx = self.convergence
return self.results[idx]
def _format_time(self, attr: str):
if hasattr(self, attr):
val: datetime = getattr(self, attr)
if val is not None:
return val.strftime("%m/%d/%Y %H:%M:%S:%f")
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return chain.from_iterable(
(super()._get_dictable_attributes(),
self._split_str_to_attributes(
('start_time end_time ave_loss min_loss converged_epoch ' +
'statistics'))))
@property
def statistics(self) -> Dict[str, Any]:
"""Return the statistics of the data set result.
:return:
a dictionary with the following:
* ``n_epochs``: the number of epoch results
* ``n_epoch_converged``: the 0 based index for which epoch
converged (lowest validation loss before it went back up)
* ``n_batches``: the number of batches on which were trained,
tested or validated
* ``ave_data_points``: the average number of data pointes on
which were trained, tested or validated
per batch
* ``n_total_data_points``: the number of data pointes on which
were trained, tested or validated
"""
epochs = self.results
n_data_points = 0
n_batches = 0
if len(epochs) > 0:
epoch: EpochResult = epochs[0]
n_data_points = epoch.n_data_points
n_batches = len(epoch.batch_ids)
for epoch in epochs:
assert n_data_points == epoch.n_data_points
n_total_points = sum(n_data_points)
ave_data_points = n_total_points / len(n_data_points)
return {'n_epochs': len(epochs),
'n_epoch_converged': self.converged_epoch.index + 1,
'n_batches': n_batches,
'ave_data_points': ave_data_points,
'n_total_data_points': n_total_points}
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_details: bool = False, converged_epoch: bool = True,
include_metrics: bool = True, include_all_metrics: bool = False):
"""Write the results data.
:param depth: the number of indentation levels
:param writer: the data sink
:param include_settings: whether or not to include model and network
settings in the output
:param include_config: whether or not to include the configuration in
the output
"""
er: EpochResult = self.converged_epoch
res = er if converged_epoch else self
self._write_line(
f'min/ave/max loss: {res.min_loss:.5f}/{res.ave_loss:.5f}/' +
f'{er.max_loss:.5f}',
depth, writer)
if include_all_metrics:
self._write_line('classification:', depth, writer)
res.classification_metrics.write(depth + 1, writer)
self._write_line('prediction:', depth, writer)
res.prediction_metrics.write(depth + 1, writer)
elif include_metrics:
res.metrics.write(depth, writer)
if include_details:
self._write_line('epoch details:', depth, writer)
self.results[0].write(depth + 1, writer)
@dataclass
class ModelResult(Dictable):
"""A container class used to capture the training, validation and test
results. The data captured is used to report and plot curves.
"""
RUNS: ClassVar[int] = 1
config: Configurable = field()
"""Useful for retrieving hyperparameter settings later after unpersisting
from disk.
"""
name: str = field()
"""The name of this result set."""
model_settings: InitVar[Dict[str, Any]] = field()
"""The setttings used to configure the model."""
net_settings: InitVar[Dict[str, Any]] = field()
"""The network settings used by the model for this result set."""
decoded_attributes: Set[str] = field()
"""The attributes that were coded and used in this model."""
dataset_result: Dict[DatasetSplitType, DatasetResult] = \
field(default_factory=dict)
"""The dataset (i.e. ``validation``, ``test``) level results."""
def __post_init__(self, model_settings: ModelSettings,
net_settings: NetworkSettings):
self.RUNS += 1
self.index = self.RUNS
splits = tuple(DatasetSplitType)
self.dataset_result = {k: DatasetResult() for k in splits}
self.model_settings = model_settings.asdict('class_name')
self.net_settings = net_settings.asdict('class_name')
self.net_settings['module_class_name'] = \
net_settings.get_module_class_name()
@classmethod
def reset_runs(self):
"""Reset the run counter.
"""
self.RUNS = 1
@classmethod
def get_num_runs(self):
return self.RUNS
def clone(self) -> ModelResult:
cl = cp.copy(self)
cl.dataset_result = {}
for k, v in self.dataset_result.items():
cl.dataset_result[k] = v.clone()
return cl
def get_intermediate(self) -> ModelResult:
cl = self.clone()
for ds in cl.dataset_result.values():
if not ds.is_started:
ds.start()
if not ds.is_ended:
ds.end()
return cl
@property
def train(self) -> DatasetResult:
"""Return the training run results.
"""
return self.dataset_result[DatasetSplitType.train]
@property
def validation(self) -> DatasetResult:
"""Return the validation run results.
"""
return self.dataset_result[DatasetSplitType.validation]
@property
def test(self) -> DatasetResult:
"""Return the testing run results.
"""
return self.dataset_result[DatasetSplitType.test]
def reset(self, name: DatasetSplitType):
"""Clear all results for data set ``name``.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'restting dataset result \'{name}\'')
self.dataset_result[name] = DatasetResult()
@property
def contains_results(self) -> bool:
return len(self.test) > 0 or len(self.validation) > 0
@property
def non_empty_dataset_result(self) -> Dict[str, DatasetResult]:
dct = OrderedDict()
for split_name in 'train validation test'.split():
ds = getattr(self, split_name)
if ds.contains_results:
dct[split_name] = ds
return dct
@property
def last_test_name(self) -> str:
"""Return the anem of the dataset that exists in the container, and
thus, the last to be populated. In order, this is test and then
validation.
"""
if self.test.contains_results:
return DatasetSplitType.test
if self.validation.contains_results:
return DatasetSplitType.validation
raise NoResultError(self.__class__)
@property
def last_test(self) -> DatasetResult:
"""Return either the test or validation results depending on what is
available.
"""
return self.dataset_result[self.last_test_name]
def write_result_statistics(self, split_type: DatasetSplitType,
depth: int = 0, writer=sys.stdout):
ds: DatasetResult = self.dataset_result[split_type]
stats = ds.statistics
ave_dps = stats['ave_data_points']
n_dps = stats['n_total_data_points']
self._write_line(f"batches: {stats['n_batches']}",
depth, writer)
self._write_line(f"ave data points per batch/total: {ave_dps:.1f}/" +
f'{n_dps}', depth, writer)
if split_type == DatasetSplitType.validation:
self._write_line('converged/epochs: ' +
f"{stats['n_epoch_converged']}/" +
f"{stats['n_epochs']}", depth, writer)
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return chain.from_iterable(
(self._split_str_to_attributes(
'name index model_settings net_settings'),
(('dataset_result', 'non_empty_dataset_result'),)))
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_settings: bool = False, include_converged: bool = False,
include_config: bool = False, include_all_metrics: bool = False):
"""Generate a human readable format of the results.
"""
lr = self.model_settings["learning_rate"]
self._write_line(f'Name: {self.name}', depth, writer)
self._write_line(f'Run index: {self.index}', depth, writer)
self._write_line(f'Learning rate: {lr}', depth, writer)
ds_res: DatasetResult
for split_type, ds_res in self.dataset_result.items():
self._write_line(f'{split_type.name}:', depth + 1, writer)
if ds_res.contains_results:
start_time = ds_res._format_time('start_time')
end_time = ds_res._format_time('end_time')
if start_time is not None:
self._write_line(f'started: {start_time}',
depth + 2, writer)
self._write_line(f'ended: {end_time}',
depth + 2, writer)
self.write_result_statistics(split_type, depth + 2, writer)
multi_epic = len(self.dataset_result[split_type].results) > 1
if include_converged and multi_epic:
self._write_line('average over epoch:', depth + 2, writer)
ds_res.write(depth + 3, writer, include_details=True,
converged_epoch=False)
self._write_line('converged epoch:', depth + 2, writer)
ds_res.write(depth + 3, writer, include_details=False,
converged_epoch=True)
else:
all_metrics = (include_all_metrics and
split_type == DatasetSplitType.test)
# don't write useless training metrics since training
# doesn't produce predictions
metrics = (split_type != DatasetSplitType.train)
ds_res.write(
depth + 2, writer,
include_metrics=metrics,
include_all_metrics=all_metrics)
else:
self._write_line('no results', depth + 2, writer)
if include_settings:
if self.decoded_attributes is None:
dattribs = None
else:
dattribs = sorted(self.decoded_attributes)
self._write_line('settings:', depth, writer)
self._write_line(f'attributes: {dattribs}', depth + 1, writer)
self._write_line('model:', depth + 1, writer)
self._write_dict(self.model_settings, depth + 2, writer)
self._write_line('network:', depth + 1, writer)
self._write_dict(self.net_settings, depth + 2, writer)
if include_config:
self._write_line('configuration:', depth, writer)
self.config.write(depth + 1, writer)
def __str__(self):
model_name = self.net_settings['module_class_name']
return f'{model_name} ({self.index})'
def __repr__(self):
return self.__str__() | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/result/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Dict, Tuple, ClassVar
from dataclasses import dataclass, field
from pathlib import Path
import logging
import pandas as pd
from zensols.util.time import time
from zensols.datdesc import DataFrameDescriber
from zensols.deeplearn import DatasetSplitType
from . import (
ModelResult, EpochResult, DatasetResult, ModelResultManager, ArchivedResult,
Metrics, PredictionsDataFrameFactory,
)
logger = logging.getLogger(__name__)
@dataclass
class ModelResultReporter(object):
"""Summarize all results in a directory from the output of model execution
from :class:`~zensols.deeplearn.model.ModelExectuor`.
The class iterates through the pickled binary output files from the run and
summarizes in a Pandas dataframe, which is handy for reporting in papers.
"""
METRIC_DESCRIPTIONS: ClassVar[Dict[str, str]] = \
PredictionsDataFrameFactory.METRIC_DESCRIPTIONS
"""Dictionary of performance metrics column names to human readable
descriptions.
"""
result_manager: ModelResultManager = field()
"""Contains the results to report on--and specifically the path to directory
where the results were persisted.
"""
include_validation: bool = field(default=True)
"""Whether or not to include validation performance metrics."""
@property
def dataframe(self) -> pd.DataFrame:
"""Return the summarized results (see class docs).
:return: the Pandas dataframe of the results
"""
rows = []
cols = 'name file start train_duration converged features'.split()
cols.extend('wF1t wPt wRt mF1t mPt mRt MF1t MPt MRt acct'.split())
if self.include_validation:
cols.extend('wF1v wPv wRv mF1v mPv mRv MF1v MPv MRv accv'.split())
cols.extend('train_occurs validation_occurs test_occurs'.split())
dpt_key = 'n_total_data_points'
arch_res: ArchivedResult
for fname, arch_res in self.result_manager.results_stash.items():
res: ModelResult = arch_res.model_result
train: DatasetResult = res.dataset_result.get(DatasetSplitType.train)
validate: DatasetResult = res.dataset_result.get(DatasetSplitType.validation)
test: DatasetResult = res.dataset_result.get(DatasetSplitType.test)
if train is not None:
dur = train.end_time - train.start_time
hours, remainder = divmod(dur.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
dur = f'{hours:02}:{minutes:02}:{seconds:02}'
if validate is not None:
conv_epoch: int = validate.statistics['n_epoch_converged']
ver: EpochResult = validate.converged_epoch
else:
conv_epoch = None
ver: EpochResult = None
if test is not None:
vm: Metrics = ver.metrics
tm: Metrics = test.metrics
features = ', '.join(res.decoded_attributes)
row = [res.name, fname, train.start_time, dur,
conv_epoch, features]
row.extend([
tm.weighted.f1, tm.weighted.precision, tm.weighted.recall,
tm.micro.f1, tm.micro.precision, tm.micro.recall,
tm.macro.f1, tm.macro.precision, tm.macro.recall,
tm.accuracy])
if self.include_validation:
row.extend([
vm.weighted.f1, vm.weighted.precision, vm.weighted.recall,
vm.micro.f1, vm.micro.precision, vm.micro.recall,
vm.macro.f1, vm.macro.precision, vm.macro.recall,
vm.accuracy])
row.extend([
train.statistics[dpt_key], validate.statistics[dpt_key],
test.statistics[dpt_key]])
rows.append(row)
if logger.isEnabledFor(logging.INFO):
logger.info('result calculation complete for ' +
f'{res.name} ({fname})')
return pd.DataFrame(rows, columns=cols)
@property
def dataframe_describer(self) -> DataFrameDescriber:
"""Get a dataframe describer of metrics (see :obj:`metrics_dataframe`).
"""
df: pd.DataFrame = self.dataframe
meta: Tuple[Tuple[str, str], ...] = \
tuple(map(lambda c: (c, self.METRIC_DESCRIPTIONS[c]), df.columns))
name: str = (self.result_manager.name.capitalize() +
' Summarized Model Results')
return DataFrameDescriber(
name='Summarized Model Results',
df=df,
desc=name,
meta=meta)
def dump(self, path: Path) -> pd.DataFrame:
"""Create the summarized results and write them to the file system.
"""
with time(f'wrote results summary: {path}'):
df: pd.DataFrame = self.dataframe
df.to_csv(path)
return df | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/result/report.py | report.py |
from __future__ import annotations
"""A class that persists results in various formats.
"""
__author__ = 'Paul Landes'
from typing import Tuple, Iterable, Set
from dataclasses import dataclass, field
import logging
import re
import pickle
import shutil
from pathlib import Path
from tkinter import TclError
from zensols.persist import (
persisted,
DirectoryStash, Stash, ReadOnlyStash, IncrementKeyDirectoryStash,
)
from zensols.config import Dictable
from .. import ModelError, ModelSettings
from . import ModelResult, ModelResultGrapher
logger = logging.getLogger(__name__)
@dataclass
class ArchivedResult(Dictable):
"""An archived result that provides access to the outcomes the training,
validation and optionally test phases of a model execution
:see: :class:`.ModelResultManager`
"""
_DICTABLE_ATTRIBUTES = {'model_result'}
_DICTABLE_WRITE_EXCLUDES = _DICTABLE_ATTRIBUTES
_EXTENSIONS = frozenset('txt model png json'.split())
id: int = field()
"""The result incremented identitifer."""
name: str = field()
"""The result's unique name, which includes :obj:`id`."""
txt_path: Path = field()
"""The path results as a text file."""
result_path: Path = field()
"""The path to pickled results file."""
model_path: Path = field()
"""The path to the directory with the PyTorch model and state files."""
png_path: Path = field()
"""The path to the training/validation loss results."""
json_path: Path = field()
"""The path to the results as a parsable JSON file."""
@property
@persisted('_result')
def model_result(self) -> ModelResult:
"""The results container of the run."""
with open(self.result_path, 'rb') as f:
return pickle.load(f)
def get_paths(self, excludes: Set[str] = frozenset()) -> Iterable[Path]:
"""Get all paths in the result as an iterable.
:param excludes: the extensions to exclude from the returned paths
"""
exts: Set[str] = set(self._EXTENSIONS) - excludes
return map(lambda at: getattr(self, f'{at}_path'), exts)
@dataclass
class _ArchivedResultStash(ReadOnlyStash):
"""Creates instances of :class:`.ArchivedResult` using a delegate
:class:`~zensols.persist.stash.DirectoryStash` for getting path values.
"""
manager: ModelResultManager = field()
"""The manager containing the results."""
stash: DirectoryStash = field()
"""The stash that reads the results persisted by
:class:`.ModelResultManager`.
"""
def load(self, name: str) -> ArchivedResult:
path: Path = self.stash.key_to_path(name)
m: re.Match = self.manager.file_regex.match(path.name)
if m is None:
raise ModelError(f'Unknown model results name: {name}')
name, id, ext = m.groups()
params = dict(id=int(id), name=name, result_path=path)
for ext in self.manager._EXTENSIONS:
k = f'{ext}_path'
params[k] = self.manager._get_next_path(ext=ext, key=id)
return ArchivedResult(**params)
def exists(self, name: str) -> bool:
return self.stash.exists(name)
def keys(self) -> Iterable[str]:
return self.stash.keys()
@dataclass
class ModelResultManager(IncrementKeyDirectoryStash):
"""Saves and loads results from runs (:class:`.ModelResult`) of the
:class:`~zensols.deeplearn.model.executor.ModelExecutor`. Keys incrementing
integers, one for each save, which usually corresponds to the run of the
model executor.
The stash's :obj:`path` points to where results are persisted with all file
format versions.
"""
_EXTENSIONS = ArchivedResult._EXTENSIONS
name: str = field(default=None)
"""The name of the manager in the configuration."""
model_path: Path = field(default=True)
"""The path to where the results are stored."""
save_text: bool = field(default=True)
"""If ``True`` save the results as a text file."""
save_plot: bool = field(default=True)
"""If ``True`` save the plot to the file system."""
save_json: bool = field(default=True)
"""If ``True`` save the results as a JSON file."""
file_pattern: str = field(default='{prefix}-{key}.{ext}')
"""The pattern used to store the model and results files."""
file_regex: re.Pattern = field(
default=re.compile(r'^(.+)-(.+?)\.([^.]+)$'))
"""An regular expression analogue to :obj:`file_pattern`."""
def __post_init__(self):
self.prefix = self.to_file_name(self.name)
super().__post_init__(self.prefix)
@property
@persisted('_read_stash')
def results_stash(self) -> Stash:
"""Return a stash that provides access to previous results (not just the last
results). The stash iterates over the model results directory with
:class:`.ArchivedResult` values.
"""
return _ArchivedResultStash(self, DirectoryStash(path=self.path))
@staticmethod
def to_file_name(name: str) -> str:
"""Return a file name string from human readable ``name``."""
return ModelSettings.normalize_name(name)
def _get_next_path(self, ext: str, key: str = None) -> Path:
if key is None:
key = self.get_last_key(False)
params = {'prefix': self.prefix, 'key': key, 'ext': ext}
fname = self.file_pattern.format(**params)
path = self.path / fname
return path
def get_last_id(self) -> str:
"""Get the last result ID."""
key: str = self.get_last_key(False)
return self.key_to_path(key).stem
def get_next_text_path(self) -> Path:
"""Return a path to the available text file to be written."""
return self._get_next_path('txt')
def get_next_model_path(self) -> Path:
"""Return a path to the available model file to be written."""
return self._get_next_path('model')
def get_next_graph_path(self) -> Path:
"""Return a path to the available graph file to be written."""
return self._get_next_path('png')
def get_next_json_path(self) -> Path:
"""Return a path to the available JSON file to be written."""
return self._get_next_path('json')
def dump(self, result: ModelResult):
super().dump(result)
if self.model_path is not None:
src = self.model_path
dst = self.get_next_model_path()
if logger.isEnabledFor(logging.INFO):
logger.info(f'copying model {src} -> {dst}')
if dst.exists():
logger.warning(f'already exists--deleting: {dst}')
shutil.rmtree(dst)
if not src.is_dir():
raise ModelError(
f'No such directory: {src}--' +
'possibly because the model never learned')
shutil.copytree(src, dst)
if self.save_text:
self.save_text_result(result)
if self.save_json:
self.save_json_result(result)
if self.save_plot:
self.save_plot_result(result)
def get_grapher(self, figsize: Tuple[int, int] = (15, 5),
title: str = None) -> ModelResultGrapher:
"""Return an instance of a model grapher. This class can plot results of
``res`` using ``matplotlib``.
:see: :class:`.ModelResultGrapher`
"""
title = self.name if title is None else title
path = self.get_next_graph_path()
return ModelResultGrapher(title, figsize, save_path=path)
def save_plot_result(self, result: ModelResult):
"""Plot and save results of the validation and training loss.
"""
try:
grapher = self.get_grapher()
grapher.plot([result])
grapher.save()
except TclError as e:
# _tkinter.TclError: couldn't connect to display <IP>
logger.warning('could not render plot, probably because ' +
f'disconnected from display: {e}')
def save_text_result(self, result: ModelResult):
"""Save the text results of the model.
"""
path = self.get_next_text_path()
if logger.isEnabledFor(logging.INFO):
logger.info(f'saving text results to {path}')
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, 'w') as f:
result.write(writer=f, include_settings=True,
include_config=True, include_converged=True)
def save_json_result(self, result: ModelResult):
"""Save the results of the model in JSON format.
"""
path = self.get_next_json_path()
if logger.isEnabledFor(logging.INFO):
logger.info(f'saving json results to {path}')
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, 'w') as f:
result.asjson(writer=f, indent=4) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/result/manager.py | manager.py |
__author__ = 'Paul Landes'
from typing import Callable, List, Iterable, Any, ClassVar, Dict, Tuple
from dataclasses import dataclass, field
import logging
import sys
import itertools as it
from pathlib import Path
from frozendict import frozendict
import numpy as np
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from zensols.persist import persisted
from zensols.datdesc import DataFrameDescriber
from zensols.deeplearn.vectorize import (
CategoryEncodableFeatureVectorizer,
FeatureVectorizerManagerSet,
)
from zensols.deeplearn.batch import Batch, BatchStash, DataPoint
from . import (
ModelResultError, ModelResult, EpochResult, ClassificationMetrics
)
logger = logging.getLogger(__name__)
@dataclass
class PredictionsDataFrameFactory(object):
"""Create a Pandas data frame containing results from a result as output from a
``ModelExecutor``. The data frame contains the feature IDs, labels,
predictions mapped back to their original value from the feature data item.
Currently only classification models are supported.
"""
METRIC_DESCRIPTIONS: ClassVar[Dict[str, str]] = frozendict({
'wF1': 'weighted F1',
'wP': 'weighted precision',
'wR': 'weighted recall',
'mF1': 'micro F1',
'mP': 'micro precision',
'mR': 'micro recall',
'MF1': 'macro F1',
'MP': 'macro precision',
'MR': 'macro recall',
'correct': 'the number of correct classifications',
'count': 'the number of data points in the test set',
'acc': 'accuracy',
'wF1t': 'weighted F1 on the test set',
'wPt': 'weighted precision on the test set',
'wRt': 'weighted recall on the test set',
'mF1t': 'micro F1 on the test set',
'mPt': 'micro precision on the test set',
'mRt': 'micro recall on the test set',
'MF1t': 'macro F1 on the test set',
'MPt': 'macro precision on the test set',
'MRt': 'macro recall on the test set',
'acct': 'accuracy on the test set',
'wF1v': 'weighted F1 on the validation set',
'wPv': 'weighted precision on the validation set',
'wRv': 'weighted recall on the validation set',
'mF1v': 'micro F1 on the validation set',
'mPv': 'micro precision on the validation set',
'mRv': 'micro recall on the validation set',
'MF1v': 'macro F1 on the validation set',
'MPv': 'macro precision on the validation set',
'MRv': 'macro recall on the validation set',
'accv': 'accuracy on the validation set',
'train_occurs': 'the number of data points used to train the model',
'test_occurs': 'the number of data points used to test the model',
'validation_occurs': 'the number of data points used to validate the model',
'label': 'the model class',
'name': 'the model or result set name',
'file': 'the directory name of the results',
'start': 'when the test started',
'train_duration': 'the time it took to train the model in HH:MM:SS',
'converged': 'the last epoch with the lowest loss',
'features': 'the features used in the model'})
"""Dictionary of performance metrics column names to human readable
descriptions.
"""
ID_COL: ClassVar[str] = 'id'
"""The data point ID in the generated dataframe in :obj:`dataframe` and
:obj:`metrics_dataframe`.
"""
LABEL_COL: ClassVar[str] = 'label'
"""The gold label column in the generated dataframe in :obj:`dataframe` and
:obj:`metrics_dataframe`.
"""
PREDICTION_COL: ClassVar[str] = 'pred'
"""The prediction column in the generated dataframe in :obj:`dataframe` and
:obj:`metrics_dataframe`.
"""
CORRECT_COL: ClassVar[str] = 'correct'
"""The correct/incorrect indication column in the generated dataframe in
:obj:`dataframe` and :obj:`metrics_dataframe`.
"""
METRICS_DF_WEIGHTED_COLUMNS: ClassVar[Tuple[str, ...]] = tuple(
'wF1 wP wR'.split())
"""Weighed performance metrics columns."""
METRICS_DF_MICRO_COLUMNS: ClassVar[Tuple[str, ...]] = tuple(
'mF1 mP mR'.split())
"""Micro performance metrics columns."""
METRICS_DF_MACRO_COLUMNS: ClassVar[Tuple[str, ...]] = tuple(
'MF1 MP MR'.split())
"""Macro performance metrics columns."""
METRICS_DF_COLUMNS: ClassVar[Tuple[str, ...]] = tuple(
'label wF1 wP wR mF1 mP mR MF1 MP MR correct acc count'.split())
"""
:see: :obj:`metrics_dataframe`
"""
source: Path = field()
"""The source file from where the results were unpickled."""
result: ModelResult = field()
"""The epoch containing the results."""
stash: BatchStash = field()
"""The batch stash used to generate the results from the
:class:`~zensols.deeplearn.model.ModelExecutor`. This is used to get the
vectorizer to reverse map the labels.
"""
column_names: List[str] = field(default=None)
"""The list of string column names for each data item the list returned from
``data_point_transform`` to be added to the results for each
label/prediction
"""
data_point_transform: Callable[[DataPoint], tuple] = field(default=None)
"""A function that returns a tuple, each with an element respective of
``column_names`` to be added to the results for each label/prediction; if
``None`` (the default), ``str`` used (see the `Iris Jupyter Notebook
<https://github.com/plandes/deeplearn/blob/master/notebook/iris.ipynb>`_
example)
"""
batch_limit: int = sys.maxsize
"""The max number of batche of results to output."""
epoch_result: EpochResult = field(default=None)
"""The epoch containing the results. If none given, take it from the test
results..
"""
label_vectorizer_name: str = field(default=None)
"""The name of the vectorizer that encodes the labels, which is used to reverse
map from integers to their original string nominal values.
"""
def __post_init__(self):
if self.column_names is None:
self.column_names = ('data',)
if self.data_point_transform is None:
self.data_point_transform = lambda dp: (str(dp),)
if self.epoch_result is None:
self.epoch_result = self.result.test.results[0]
@property
def name(self) -> str:
"""The name of the results taken from :class:`.ModelResult`."""
return self.result.name
def _transform_dataframe(self, batch: Batch, labs: List[str],
preds: List[str]):
transform: Callable = self.data_point_transform
rows = []
for dp, lab, pred in zip(batch.data_points, labs, preds):
row = [dp.id, lab, pred, lab == pred]
row.extend(transform(dp))
rows.append(row)
cols = [self.ID_COL, self.LABEL_COL, self.PREDICTION_COL,
self.CORRECT_COL]
cols = cols + list(self.column_names)
return pd.DataFrame(rows, columns=cols)
def _calc_len(self, batch: Batch) -> int:
return len(batch)
def _narrow_encoder(self, batch: Batch) -> LabelEncoder:
vec: CategoryEncodableFeatureVectorizer = None
if self.label_vectorizer_name is None:
vec = batch.get_label_feature_vectorizer()
while True:
if not isinstance(vec, CategoryEncodableFeatureVectorizer) \
and hasattr(vec, 'delegate'):
vec = vec.delegate
else:
break
else:
vms: FeatureVectorizerManagerSet = \
batch.batch_stash.vectorizer_manager_set
vec = vms.get_vectorizer(self.label_vectorizer_name)
if not isinstance(vec, CategoryEncodableFeatureVectorizer):
raise ModelResultError(
'Expecting a category feature vectorizer but got: ' +
f'{vec} ({vec.name if vec else "none"})')
return vec.label_encoder
def _batch_dataframe(self, inv_trans: bool) -> Iterable[pd.DataFrame]:
"""Return a data from for each batch.
"""
epoch_labs: List[np.ndarray] = self.epoch_result.labels
epoch_preds: List[np.ndarray] = self.epoch_result.predictions
start = 0
for bid in it.islice(self.epoch_result.batch_ids, self.batch_limit):
batch: Batch = self.stash[bid]
end = start + self._calc_len(batch)
preds: List[int] = epoch_preds[start:end]
labs: List[int] = epoch_labs[start:end]
if inv_trans:
le: LabelEncoder = self._narrow_encoder(batch)
inv_trans: Callable = le.inverse_transform
preds: List[str] = inv_trans(preds)
labs: List[str] = inv_trans(labs)
df = self._transform_dataframe(batch, labs, preds)
df['batch_id'] = bid
assert len(df) == len(labs)
start = end
yield df
def _create_dataframe(self, inv_trans: bool) -> pd.DataFrame:
return pd.concat(self._batch_dataframe(inv_trans), ignore_index=True)
@property
@persisted('_dataframe')
def dataframe(self) -> pd.DataFrame:
"""The predictions and labels as a dataframe. The first columns are generated
from ``data_point_tranform``, and the remaining columns are:
- id: the ID of the feature (not batch) data item
- label: the label given by the feature data item
- pred: the prediction
- correct: whether or not the prediction was correct
"""
return self._create_dataframe(True)
def _to_metric_row(self, lab: str, mets: ClassificationMetrics) -> \
List[Any]:
return [lab, mets.weighted.f1, mets.weighted.precision,
mets.weighted.recall,
mets.micro.f1, mets.micro.precision, mets.micro.recall,
mets.macro.f1, mets.macro.precision, mets.macro.recall,
mets.n_correct, mets.accuracy, mets.n_outcomes]
def _add_metric_row(self, le: LabelEncoder, df: pd.DataFrame, ann_id: str,
rows: List[Any]):
lab: str = le.inverse_transform([ann_id])[0]
data = df[self.LABEL_COL], df[self.PREDICTION_COL]
mets = ClassificationMetrics(*data, len(data[0]))
row = self._to_metric_row(lab, mets)
rows.append(row)
def metrics_to_series(self, lab: str, mets: ClassificationMetrics) -> \
pd.Series:
"""Create a single row dataframe from classification metrics."""
row = self._to_metric_row(lab, mets)
return pd.Series(row, index=self.METRICS_DF_COLUMNS)
@property
def metrics_dataframe(self) -> pd.DataFrame:
"""Performance metrics by comparing the gold label to the predictions.
"""
rows: List[Any] = []
df = self._create_dataframe(False)
dfg = df.groupby(self.LABEL_COL).agg({self.LABEL_COL: 'count'}).\
rename(columns={self.LABEL_COL: 'count'})
bids = self.epoch_result.batch_ids
batch: Batch = self.stash[bids[0]]
le: LabelEncoder = self._narrow_encoder(batch)
for ann_id, dfg in df.groupby(self.LABEL_COL):
try:
self._add_metric_row(le, dfg, ann_id, rows)
except ValueError as e:
logger.error(f'Could not create metrics for {ann_id}: {e}')
dfr = pd.DataFrame(rows, columns=self.METRICS_DF_COLUMNS)
dfr = dfr.sort_values(self.LABEL_COL).reset_index(drop=True)
return dfr
@property
def majority_label_metrics(self) -> ClassificationMetrics:
"""Compute metrics of the majority label of the test dataset.
"""
df: pd.DataFrame = self.dataframe
le = LabelEncoder()
gold: np.ndarray = le.fit_transform(df[self.ID_COL].to_list())
max_id: str = df.groupby(self.ID_COL)[self.ID_COL].agg('count').idxmax()
majlab: np.ndarray = np.repeat(le.transform([max_id])[0], gold.shape[0])
return ClassificationMetrics(gold, majlab, gold.shape[0])
@property
def metrics_dataframe_describer(self) -> DataFrameDescriber:
"""Get a dataframe describer of metrics (see :obj:`metrics_dataframe`).
"""
df: pd.DataFrame = self.metrics_dataframe
meta: Tuple[Tuple[str, str], ...] = \
tuple(map(lambda c: (c, self.METRIC_DESCRIPTIONS[c]), df.columns))
return DataFrameDescriber(
name=self.name,
df=df,
desc=f'{self.name.capitalize()} Model Results',
meta=meta)
@dataclass
class SequencePredictionsDataFrameFactory(PredictionsDataFrameFactory):
"""Like the super class but create predictions for sequence based models.
:see: :class:`~zensols.deeplearn.model.sequence.SequenceNetworkModule`
"""
def _calc_len(self, batch: Batch) -> int:
return sum(map(len, batch.data_points))
def _transform_dataframe(self, batch: Batch, labs: List[str],
preds: List[str]):
dfs: List[pd.DataFrame] = []
start: int = 0
transform: Callable = self.data_point_transform
for dp, lab, pred in zip(batch.data_points, labs, preds):
end = start + len(dp)
df = pd.DataFrame({
self.ID_COL: dp.id,
self.LABEL_COL: labs[start:end],
self.PREDICTION_COL: preds[start:end]})
df[list(self.column_names)] = transform(dp)
dfs.append(df)
start = end
return pd.concat(dfs) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/result/pred.py | pred.py |
__author__ = 'Paul Landes'
from typing import Tuple, List, Sequence
from dataclasses import dataclass
from itertools import chain
from functools import reduce
import torch
from torch import Tensor
from . import TorchConfig
@dataclass
class NonUniformDimensionEncoder(object):
"""Encode a sequence of tensors, each of arbitrary dimensionality, as a 1-D
array. Then decode the 1-D array back to the original.
"""
torch_config: TorchConfig
def encode(self, arrs: Sequence[Tensor]) -> Tensor:
"""Encode a sequence of tensors, each of arbitrary dimensionality, as a 1-D
array.
"""
def map_tensor_meta(arr: Tensor) -> Tuple[int]:
sz = arr.shape
tm = [len(sz)]
tm.extend(sz)
return tm
tmeta = [len(arrs)]
tmeta.extend(chain.from_iterable(map(map_tensor_meta, arrs)))
tmeta = self.torch_config.singleton(tmeta, dtype=arrs[0].dtype)
arrs = [tmeta] + list(map(lambda t: t.flatten(), arrs))
enc = torch.cat(arrs)
return enc
def decode(self, arr: Tensor) -> Tuple[Tensor]:
"""Decode the 1-D array back to the original.
"""
ix_type = torch.long
shapes_len = arr[0].type(ix_type)
one = torch.tensor([1], dtype=ix_type, device=arr.device)
one.autograd = False
start = one.clone()
shapes: List[int] = []
for i in range(shapes_len):
sz_len = arr[start].type(ix_type)
start += one
end = (start + sz_len).type(ix_type)
sz = arr[start:end]
shapes.append(sz)
start = end
arrs = []
for shape in shapes:
ln = reduce(lambda x, y: x.type(ix_type) * y.type(ix_type), shape)
end = (start + ln).type(ix_type)
shape = tuple(map(int, shape))
x = arr[start:end]
x = x.view(shape)
arrs.append(x)
start = end
return tuple(arrs) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/vectorize/util.py | util.py |
__author__ = 'Paul Landes'
from typing import Set, List, Iterable, Union, Any, Tuple, Dict
from dataclasses import dataclass, field
import logging
import pandas as pd
import numpy as np
import itertools as it
from sklearn.preprocessing import LabelEncoder
import torch
from torch import Tensor
from torch import nn
from zensols.persist import persisted
from zensols.deeplearn import TorchTypes, TorchConfig
from . import (
VectorizerError,
FeatureVectorizer,
EncodableFeatureVectorizer,
TensorFeatureContext,
FeatureContext,
MultiFeatureContext,
)
logger = logging.getLogger(__name__)
@dataclass
class IdentityEncodableFeatureVectorizer(EncodableFeatureVectorizer):
"""An identity vectorizer, which encodes tensors verbatim, or concatenates a
list of tensors in to one tensor of the same dimension.
"""
DESCRIPTION = 'identity function encoder'
def _get_shape(self) -> Tuple[int]:
return -1,
def _encode(self, obj: Union[list, Tensor]) -> Tensor:
if isinstance(obj, Tensor):
arr = obj
else:
tc = self.torch_config
if len(obj[0].shape) == 0:
arr = tc.singleton(obj, dtype=obj[0].dtype)
else:
arr = torch.cat(obj)
return TensorFeatureContext(self.feature_id, arr)
@dataclass
class CategoryEncodableFeatureVectorizer(EncodableFeatureVectorizer):
"""A base class that vectorizies nominal categories in to integer indexes.
"""
categories: Set[str] = field()
"""A list of string enumerated values."""
def __post_init__(self):
super().__post_init__()
if len(self.categories) == 0:
raise VectorizerError(f'No categories given: <{self.categories}>')
self.label_encoder = LabelEncoder()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding categories: <{self.categories}>')
self.label_encoder.fit(self.categories)
@property
@persisted('_by_label')
def by_label(self) -> Dict[str, int]:
le = self.label_encoder
return dict(zip(le.classes_, le.transform(le.classes_)))
def get_classes(self, nominals: Iterable[int]) -> List[str]:
"""Return the label string values for indexes ``nominals``.
:param nominals: the integers that map to the respective string class
"""
return self.label_encoder.inverse_transform(nominals)
@dataclass
class NominalEncodedEncodableFeatureVectorizer(CategoryEncodableFeatureVectorizer):
"""Map each label to a nominal, which is useful for class labels.
:shape: (1, 1)
"""
DESCRIPTION = 'nominal encoder'
data_type: Union[str, None, torch.dtype] = field(default=None)
"""The type to use for encoding, which if a string, must be a key in of
:obj:`.TorchTypes.NAME_TO_TYPE`.
"""
decode_one_hot: bool = field(default=False)
"""If ``True``, during decoding create a one-hot encoded tensor of shape
``(N, |labels|)``.
"""
def __post_init__(self):
super().__post_init__()
self.data_type = self._str_to_dtype(self.data_type, self.torch_config)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'init categories: {self.categories}')
def _get_shape(self) -> Tuple[int]:
return (1, 1)
def _str_to_dtype(self, data_type: str,
torch_config: TorchConfig) -> torch.dtype:
if data_type is None:
data_type = torch.int64
else:
data_type = TorchTypes.type_from_string(data_type)
return data_type
def _encode(self, category_instances: List[str]) -> FeatureContext:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encode categories: {category_instances} ' +
f'(one of {self.categories})')
if not isinstance(category_instances, (tuple, list)):
raise VectorizerError(
f'expecting list but got: {type(category_instances)}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'instances: {category_instances}')
indicies = self.label_encoder.transform(category_instances)
singleton = self.torch_config.singleton
arr = singleton(indicies, dtype=self.data_type)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding cat arr: {arr.dtype}')
return TensorFeatureContext(self.feature_id, arr)
def _decode(self, context: FeatureContext) -> Tensor:
arr = super()._decode(context)
if self.decode_one_hot:
batches = arr.shape[0]
he = self.torch_config.zeros((batches, len(self.categories)),
dtype=torch.long)
for row in range(batches):
idx = arr[row]
he[row][idx] = 1
del arr
arr = he
return arr
@dataclass
class OneHotEncodedEncodableFeatureVectorizer(CategoryEncodableFeatureVectorizer):
"""Vectorize from a list of nominals. This is useful for encoding labels for
the categorization machine learning task.
:shape: (1,) when optimizing bools and classes = 2, else (1, |categories|)
"""
DESCRIPTION = 'category encoder'
optimize_bools: bool = field()
"""If ``True``, more efficiently represent boolean encodings."""
def __post_init__(self):
super().__post_init__()
le = self.label_encoder
llen = len(le.classes_)
if not self.optimize_bools or llen != 2:
arr = self.torch_config.zeros((llen, llen))
for i in range(llen):
arr[i][i] = 1
self.identity = arr
def _get_shape(self) -> Tuple[int]:
n_classes = len(self.label_encoder.classes_)
if self.optimize_bools and n_classes == 2:
return (1,)
else:
return (-1, n_classes)
def _encode_cats(self, category_instances: List[str], arr: Tensor) -> \
Tuple[int, FeatureContext]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding: {self.category_instances}')
tc = self.torch_config
indicies = self.label_encoder.transform(category_instances)
is_one_row = self.shape[0] == 1
if is_one_row:
if arr is None:
arr = tc.singleton(indicies)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating: {self.identity.shape}')
if arr is None:
arr = tc.empty(
(len(category_instances), self.identity.shape[0]))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created: {arr.dtype}')
for i, idx in enumerate(it.islice(indicies, arr.size(0))):
arr[i] = self.identity[idx]
return is_one_row, arr
def _encode(self, category_instances: List[str]) -> FeatureContext:
is_one_row, arr = self._encode_cats(category_instances, None)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding cat arr: {arr.dtype}')
return TensorFeatureContext(self.feature_id, arr)
@dataclass
class AggregateEncodableFeatureVectorizer(EncodableFeatureVectorizer):
"""Use another vectorizer to vectorize each instance in an iterable. Each
iterable is then concatenated in to a single tensor on decode.
**Important**: you must add the delegate vectorizer to the same vectorizer
manager set as this instance since it uses the manager to find it.
:shape: (-1, delegate.shape[1] * (2 ^ add_mask))
"""
DESCRIPTION = 'aggregate vectorizer'
DEFAULT_PAD_LABEL = nn.CrossEntropyLoss().ignore_index
"""The default value used for :obj:`pad_label`, which is used since this
vectorizer is most often used to encode labels.
"""
delegate_feature_id: str = field()
"""The feature ID of the delegate vectorizer to use (configured in same
vectorizer manager).
"""
size: int = field(default=-1)
"""The second dimension size of the tensor to create when decoding."""
pad_label: int = field(default=DEFAULT_PAD_LABEL)
"""The numeric label to use for padded elements. This defaults to
:obj:`~torch.nn.CrossEntry.ignore_index`."""
def _get_shape(self):
return -1, *self.delegate.shape[1:]
@property
def delegate(self) -> EncodableFeatureVectorizer:
return self.manager[self.delegate_feature_id]
def _encode(self, datas: Iterable[Iterable[Any]]) -> MultiFeatureContext:
vec = self.delegate
ctxs = tuple(map(lambda d: vec.encode(d), datas))
return MultiFeatureContext(self.feature_id, ctxs)
@persisted('_pad_tensor_pw')
def _pad_tensor(self, data_type: torch.dtype,
device: torch.device) -> Tensor:
return torch.tensor([self.pad_label], device=device, dtype=data_type)
def create_padded_tensor(self, size: torch.Size,
data_type: torch.dtype = None,
device: torch.device = None):
"""Create a tensor with all elements set to :obj:`pad_label`.
:param size: the dimensions of the created tensor
:param data_type: the data type of the new tensor
"""
data_type = self.delegate.data_type if data_type is None else data_type
device = self.torch_config.device if device is None else device
pad = self._pad_tensor(data_type, device)
if pad.dtype != data_type or pad.device != device:
pad = torch.tensor(
[self.pad_label], device=device, dtype=data_type)
return pad.repeat(size)
def _decode(self, context: MultiFeatureContext) -> Tensor:
vec: FeatureVectorizer = self.delegate
srcs: Tuple[Tensor] = tuple(
map(lambda c: vec.decode(c), context.contexts))
clen: int = len(srcs)
first: Tensor = srcs[0]
dtype: torch.dtype = first.dtype
mid_dims: int = first.shape[1:]
sz: int
if self.size > 0:
sz = self.size
else:
sz = max(map(lambda t: t.size(0), srcs))
arr = self.create_padded_tensor((clen, sz, *mid_dims), dtype)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'num contexts: {clen}, dtype={dtype}, ' +
f'src={first.shape}, dst={arr.shape}, ' +
f'mid_dims={mid_dims}')
rowix = 0
ctx: TensorFeatureContext
for carr in srcs:
lsz = min(carr.size(0), sz)
if carr.dim() == 1:
arr[rowix, :lsz] = carr[:lsz]
elif carr.dim() == 2:
arr[rowix, :lsz, :] = carr[:lsz, :]
elif carr.dim() == 3:
arr[rowix, :lsz, :, :] = carr[:lsz, :, :]
rowix += 1
return arr
@dataclass
class MaskFeatureContext(FeatureContext):
"""A feature context used for the :class:`.MaskFeatureVectorizer` vectorizer.
:param sequence_lengths: the lengths of all each row to mask
"""
sequence_lengths: Tuple[int]
@dataclass
class MaskFeatureVectorizer(EncodableFeatureVectorizer):
"""Creates masks where the first N elements of a vector are 1's with the rest
0's.
:shape: (-1, size)
"""
DESCRIPTION = 'mask'
size: int = field(default=-1)
"""The length of all mask vectors or ``-1`` make the length the max size of the
sequence in the batch.
"""
data_type: Union[str, None, torch.dtype] = field(default='bool')
"""The mask tensor type. To use the int type that matches the resolution of
the manager's :obj:`torch_config`, use ``DEFAULT_INT``.
"""
def __post_init__(self):
super().__post_init__()
self.data_type = self.str_to_dtype(self.data_type, self.torch_config)
if self.size > 0:
tc = self.torch_config
self.ones = tc.ones((self.size,), dtype=self.data_type)
else:
self.ones = None
@staticmethod
def str_to_dtype(data_type: str, torch_config: TorchConfig) -> torch.dtype:
if data_type == 'DEFAULT_INT':
data_type = torch_config.int_type
else:
data_type = TorchTypes.type_from_string(data_type)
return data_type
def _get_shape(self):
return -1, self.size,
def _encode(self, datas: Iterable[Iterable[Any]]) -> FeatureContext:
lens = tuple(map(lambda d: sum(1 for _ in d), datas))
return MaskFeatureContext(self.feature_id, lens)
def _decode(self, context: MaskFeatureContext) -> Tensor:
tc = self.torch_config
batch_size = len(context.sequence_lengths)
lens = context.sequence_lengths
if self.ones is None:
# when no configured size is given, recreate for each batch
sz = max(lens)
ones = self.torch_config.ones((sz,), dtype=self.data_type)
else:
# otherwise, the mask was already created in the initializer
sz = self.size
ones = self.ones
arr = tc.zeros((batch_size, sz), dtype=self.data_type)
for bix, slen in enumerate(lens):
arr[bix, :slen] = ones[:slen]
return arr
@dataclass
class SeriesEncodableFeatureVectorizer(EncodableFeatureVectorizer):
"""Vectorize a Pandas series, such as a list of rows. This vectorizer has an
undefined shape since both the number of columns and rows are not specified
at runtime.
:shape: (-1, 1)
"""
DESCRIPTION = 'pandas series'
def _get_shape(self):
return -1, -1
def _encode(self, rows: Iterable[pd.Series]) -> FeatureContext:
narrs = []
tc = self.torch_config
nptype = tc.numpy_data_type
for row in rows:
narrs.append(row.to_numpy(dtype=nptype))
arr = np.stack(narrs)
arr = tc.from_numpy(arr)
return TensorFeatureContext(self.feature_id, arr)
@dataclass
class AttributeEncodableFeatureVectorizer(EncodableFeatureVectorizer):
"""Vectorize a iterable of floats. This vectorizer has an undefined shape
since both the number of columns and rows are not specified at runtime.
:shape: (1,)
"""
DESCRIPTION = 'single attribute'
def _get_shape(self):
return 1,
def _encode(self, data: Iterable[float]) -> FeatureContext:
arr = self.torch_config.from_iterable(data)
return TensorFeatureContext(self.feature_id, arr) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/vectorize/vectorizers.py | vectorizers.py |
__author__ = 'Paul Landes'
from typing import Tuple, Any, Union
from dataclasses import dataclass, field
from abc import abstractmethod, ABCMeta
import logging
import sys
from io import TextIOBase
from scipy import sparse
from scipy.sparse import csr_matrix
import torch
from torch import Tensor
from zensols.persist import PersistableContainer
from zensols.config import ConfigFactory, Writable
from zensols.deeplearn import DeepLearnError, TorchConfig
logger = logging.getLogger(__name__)
class VectorizerError(DeepLearnError):
"""Thrown by instances of :class:`.FeatureVectorizer` during encoding or
decoding operations.
"""
pass
@dataclass
class ConfigurableVectorization(PersistableContainer, Writable):
name: str = field()
"""The name of the section given in the configuration.
"""
config_factory: ConfigFactory = field(repr=False)
"""The configuration factory that created this instance and used for
serialization functions.
"""
def __post_init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@dataclass
class FeatureVectorizer(ConfigurableVectorization, metaclass=ABCMeta):
"""An asbstrct base class that transforms a Python object in to a PyTorch
tensor.
"""
feature_id: str = field()
"""Uniquely identifies this vectorizer."""
def _allow_config_adds(self) -> bool:
return True
@abstractmethod
def _get_shape(self) -> Tuple[int, int]:
pass
@abstractmethod
def transform(self, data: Any) -> Tensor:
"""Transform ``data`` to a tensor data format.
"""
pass
@property
def shape(self) -> Tuple[int, int]:
"""Return the shape of the tensor created by ``transform``.
"""
return self._get_shape()
@property
def description(self) -> str:
"""A short human readable name.
:see: obj:`feature_id`
"""
return self.DESCRIPTION
def __str__(self):
return (f'{self.feature_id} ({type(self)}), ' +
f'desc={self.description}, shape: {self.shape}')
def __repr__(self):
return f'{self.__class__}: {self.__str__()}'
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(str(self), depth, writer)
@dataclass
class FeatureContext(PersistableContainer):
"""Data created by coding and meant to be pickled on the file system.
:see EncodableFeatureVectorizer.encode:
"""
feature_id: str = field()
"""The feature id of the :class:`.FeatureVectorizer` that created this context.
"""
def __str__(self):
return f'{self.__class__.__name__} ({self.feature_id})'
@dataclass
class NullFeatureContext(FeatureContext):
"""A no-op feature context used for cases such as prediction batches with data
points that have no labels.
:see: :meth:`~zensols.deeplearn.batch.BatchStash.create_prediction`
:see: :class:`~zensols.deeplearn.batch.Batch`
"""
pass
@dataclass
class TensorFeatureContext(FeatureContext):
"""A context that encodes data directly to a tensor. This tensor could be a
sparse matrix becomes dense during the decoding process.
"""
tensor: Tensor = field()
"""The output tensor of the encoding phase."""
def deallocate(self):
super().deallocate()
if hasattr(self, 'tensor'):
del self.tensor
def __str__(self):
tstr = f'{self.tensor.shape}' if self.tensor is not None else '<none>'
return f'{super().__str__()}: {tstr}'
def __repr__(self):
return self.__str__()
@dataclass
class SparseTensorFeatureContext(FeatureContext):
"""Contains data that was encded from a dense matrix as a sparse matrix and
back. Using torch sparse matrices currently lead to deadlocking in child
proceesses, so use scipy :class:``csr_matrix`` is used instead.
"""
USE_SPARSE = True
sparse_data: Union[Tuple[Tuple[csr_matrix, int]], Tensor] = field()
"""The sparse array data."""
@property
def sparse_arr(self) -> Tuple[csr_matrix]:
assert isinstance(self.sparse_data[0], tuple)
return self.sparse_data[0]
@classmethod
def to_sparse(cls, arr: Tensor) -> Tuple[csr_matrix]:
narr = arr.numpy()
tdim = len(arr.shape)
if tdim == 3:
narrs = tuple(map(lambda i: narr[i], range(narr.shape[0])))
elif tdim == 2 or tdim == 1:
narrs = (narr,)
else:
raise VectorizerError('Tensors of dimensions higher than ' +
f'3 not supported: {arr.shape}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating sparse matrix: {arr.shape}')
mats = tuple(map(lambda m: sparse.csr_matrix(m), narrs))
return (mats, tdim)
@classmethod
def instance(cls, feature_id: str, arr: Tensor,
torch_config: TorchConfig):
arr = arr.cpu()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding in to sparse tensor: {arr.shape}')
if cls.USE_SPARSE:
sarr = cls.to_sparse(arr)
else:
sarr = arr
return cls(feature_id, sarr)
def to_tensor(self, torch_config: TorchConfig) -> Tensor:
if isinstance(self.sparse_arr, Tensor):
tarr = self.sparse_arr
else:
narr, tdim = self.sparse_data
narrs = tuple(map(lambda sm: torch.from_numpy(sm.todense()), narr))
if len(narrs) == 1:
tarr = narrs[0]
else:
tarr = torch.stack(narrs)
dim_diff = len(tarr.shape) - tdim
if dim_diff > 0:
for _ in range(dim_diff):
tarr = tarr.squeeze(0)
elif dim_diff < 0:
for _ in range(-dim_diff):
tarr = tarr.unsqueeze(0)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded sparce matrix to: {tarr.shape}')
return tarr
@dataclass
class MultiFeatureContext(FeatureContext):
"""A composite context that contains a tuple of other contexts.
"""
contexts: Tuple[FeatureContext] = field()
"""The subordinate contexts."""
@property
def is_empty(self) -> bool:
cnt = sum(1 for _ in filter(
lambda c: not isinstance(c, NullFeatureContext), self.contexts))
return cnt == 0
def deallocate(self):
super().deallocate()
if hasattr(self, 'contexts'):
self._try_deallocate(self.contexts)
del self.contexts | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/vectorize/domain.py | domain.py |
from __future__ import annotations
"""Vectorization base classes and basic functionality.
"""
__author__ = 'Paul Landes'
from typing import Tuple, Any, Set, Dict, List, Iterable
from dataclasses import dataclass, field
from abc import abstractmethod, ABCMeta
import logging
import sys
from itertools import chain
import collections
from io import TextIOBase
from torch import Tensor
from zensols.persist import persisted, PersistedWork
from zensols.deeplearn import TorchConfig
from . import (
VectorizerError, ConfigurableVectorization, FeatureVectorizer,
FeatureContext, TensorFeatureContext, SparseTensorFeatureContext,
NullFeatureContext, MultiFeatureContext,
)
logger = logging.getLogger(__name__)
@dataclass
class EncodableFeatureVectorizer(FeatureVectorizer, metaclass=ABCMeta):
"""This vectorizer splits transformation up in to encoding and decoding. The
encoded state as a ``FeatureContext``, in cases where encoding is
prohibitively expensive, is computed once and pickled to the file system.
It is then loaded and finally decoded into a tensor.
Examples include computing an encoding as indexes of a word embedding
during the encoding phase. Then generating the full embedding layer during
decoding. Note that this decoding is done with a ``TorchConfig`` so the
output tensor goes directly to the GPU.
This abstract base class only needs the ``_encode`` method overridden. The
``_decode`` must be overridden if the context is not of type
``TensorFeatureContext``.
"""
manager: FeatureVectorizerManager = field()
"""The manager used to create this vectorizer that has resources needed to
encode and decode.
"""
def transform(self, data: Any) -> Tensor:
"""Use the output of the encoding as input to the decoding to directly produce
the output tensor ready to be used in testing, training, validation
etc.
"""
context = self.encode(data)
return self.decode(context)
def encode(self, data: Any) -> FeatureContext:
"""Encode data to a context ready to (potentially) be pickled.
"""
return self._encode(data)
def decode(self, context: FeatureContext) -> Tensor:
"""Decode a (potentially) unpickled context and return a tensor using the
manager's :obj:`torch_config`.
"""
arr: Tensor = None
self._validate_context(context)
if isinstance(context, NullFeatureContext):
pass
elif isinstance(context, MultiFeatureContext) and context.is_empty:
arr = NullFeatureContext(context.feature_id)
else:
arr = self._decode(context)
return arr
@property
def torch_config(self) -> TorchConfig:
"""The torch configuration used to create encoded/decoded tensors.
"""
return self.manager.torch_config
@abstractmethod
def _encode(self, data: Any) -> FeatureContext:
pass
def _decode(self, context: FeatureContext) -> Tensor:
arr: Tensor
if isinstance(context, NullFeatureContext):
arr = None
elif isinstance(context, TensorFeatureContext):
arr = context.tensor
elif isinstance(context, SparseTensorFeatureContext):
arr = context.to_tensor(self.manager.torch_config)
else:
cstr = str(context) if context is None else context.__class__
raise VectorizerError(f'Unknown context: {cstr}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded {type(context)} to {arr.shape}')
return arr
def _validate_context(self, context: FeatureContext):
if context.feature_id != self.feature_id:
raise VectorizerError(f'Context meant for {context.feature_id} ' +
f'routed to {self.feature_id}')
@dataclass
class TransformableFeatureVectorizer(EncodableFeatureVectorizer,
metaclass=ABCMeta):
"""Instances of this class use the output of
:meth:`.EncodableFeatureVectorizer.transform` (chain encode and decode) as
the output of :meth:`EncodableFeatureVectorizer.encode`, then passes
through the decode.
This is useful if the decoding phase is very expensive and you'd rather
take that hit when creating batches written to the file system.
"""
encode_transformed: bool = field()
"""If ``True``, enable the transformed output of the encoding step as the
decode step (see class docs).
"""
def encode(self, data: Any) -> FeatureContext:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding {type(data)}, also decode after encode' +
f'{self.encode_transformed}')
if self.encode_transformed:
ctx: FeatureContext = self._encode(data)
arr: Tensor = self._decode(ctx)
ctx = TensorFeatureContext(ctx.feature_id, arr)
else:
ctx = super().encode(data)
return ctx
def decode(self, context: FeatureContext) -> Tensor:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoding {type(context)}, already decoded: ' +
f'{self.encode_transformed}')
if self.encode_transformed:
ctx: TensorFeatureContext = context
arr = ctx.tensor
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'already decoded: {arr.shape}')
else:
arr = super().decode(context)
return arr
# manager
@dataclass
class FeatureVectorizerManager(ConfigurableVectorization):
"""Creates and manages instances of :class:`.EncodableFeatureVectorizer` and
parses text in to feature based document.
This handles encoding data into a context, which is data ready to be
pickled on the file system with the idea this intermediate state is
expensive to create. At training time, the context is brought back in to
memory and efficiently decoded in to a tensor.
This class keeps track of two kinds of vectorizers:
* module: registered with ``register_vectorizer`` in Python modules
* configured: registered at instance create time in
``configured_vectorizers``
Instances of this class act like a :class:`dict` of all registered
vectorizers. This includes both module and configured vectorizers. The
keys are the ``feature_id``s and values are the contained vectorizers.
:see: :class:`.EncodableFeatureVectorizer`
"""
ATTR_EXP_META = ('torch_config', 'configured_vectorizers')
MANAGER_SEP = '.'
torch_config: TorchConfig = field()
"""The torch configuration used to encode and decode tensors."""
configured_vectorizers: Set[str] = field()
"""Configuration names of vectorizors to use by this manager."""
def __post_init__(self):
super().__post_init__()
self.manager_set = None
self._vectorizers_pw = PersistedWork('_vectorizers_pw', self)
def transform(self, data: Any) -> \
Tuple[Tensor, EncodableFeatureVectorizer]:
"""Return a tuple of duples with the output tensor of a vectorizer and the
vectorizer that created the output. Every vectorizer listed in
``feature_ids`` is used.
"""
return tuple(map(lambda vec: (vec.transform(data), vec),
self._vectorizers.values()))
@property
@persisted('_vectorizers_pw')
def _vectorizers(self) -> Dict[str, FeatureVectorizer]:
"""Return a dictionary of all registered vectorizers. This includes both
module and configured vectorizers. The keys are the ``feature_id``s
and values are the contained vectorizers.
"""
return self._create_vectorizers()
def _create_vectorizers(self) -> Dict[str, FeatureVectorizer]:
vectorizers = collections.OrderedDict()
feature_ids = set()
conf_instances = {}
if self.configured_vectorizers is not None:
for sec in self.configured_vectorizers:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating vectorizer {sec}')
if sec.find(self.MANAGER_SEP) >= 0:
raise VectorizerError(
f'Separator {self.MANAGER_SEP} not ' +
f'allowed in names: {sec}')
vec = self.config_factory(sec, manager=self)
conf_instances[vec.feature_id] = vec
feature_ids.add(vec.feature_id)
for feature_id in sorted(feature_ids):
inst = conf_instances.get(feature_id)
vectorizers[feature_id] = inst
return vectorizers
@property
@persisted('_feature_ids')
def feature_ids(self) -> Set[str]:
"""Get the feature ids supported by this manager, which are the keys of the
vectorizer.
:see: :class:`.FeatureVectorizerManager`
"""
return frozenset(self._vectorizers.keys())
def get(self, name: str) -> FeatureVectorizer:
"""Return the feature vectorizer named ``name``."""
fv = self._vectorizers.get(name)
# if we can't find the vectorizer, try using dot syntax to find it in
# the parent manager set
if name is not None and fv is None:
idx = name.find(self.MANAGER_SEP)
if self.manager_set is not None and idx > 0:
mng_name, vec = name[:idx], name[idx+1:]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'looking up {mng_name}:{vec}')
mng = self.manager_set.get(mng_name)
if mng is not None:
fv = mng._vectorizers.get(vec)
return fv
def keys(self) -> Iterable[str]:
return self._vectorizers.keys()
def values(self) -> Iterable[FeatureVectorizer]:
return self._vectorizers.values()
def items(self) -> Iterable[Tuple[str, FeatureVectorizer]]:
return self._vectorizers.items()
def __len__(self) -> int:
return len(self._vectorizers)
def __getitem__(self, name: str) -> FeatureVectorizer:
fv = self.get(name)
if fv is None:
raise VectorizerError(
f"Manager '{self.name}' has no vectorizer: '{name}'")
return fv
def deallocate(self):
if self._vectorizers_pw.is_set():
vecs = self._vectorizers
for vec in vecs.values():
vec.deallocate()
vecs.clear()
super().deallocate()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(str(self), depth, writer)
for vec in self._vectorizers.values():
vec.write(depth + 1, writer)
@dataclass
class FeatureVectorizerManagerSet(ConfigurableVectorization):
"""A set of managers used collectively to encode and decode a series of
features across many different kinds of data (i.e. labels, language
features, numeric).
In the same way a :class:`.FeatureVectorizerManager` acts like a
:class:`dict`, this class is a ``dict`` for
:class:`.FeatureVectorizerManager` instances.
"""
ATTR_EXP_META = ('_managers',)
names: List[str] = field()
"""The sections defining :class:`.FeatureVectorizerManager` instances."""
def __post_init__(self):
super().__post_init__()
self._managers_pw = PersistedWork('_managers_pw', self)
@property
@persisted('_managers_pw')
def _managers(self) -> Dict[str, FeatureVectorizerManager]:
"""All registered vectorizer managers of the manager."""
mngs = {}
for n in self.names:
f: FeatureVectorizerManager = self.config_factory(n)
if not isinstance(f, FeatureVectorizerManager):
raise VectorizerError(
f"Config section '{n}' does not define a " +
f'FeatureVectoizerManager: {f}')
f.manager_set = self
mngs[n] = f
return mngs
def get_vectorizer_names(self) -> Iterable[str]:
"""Return the names of vectorizers across all vectorizer managers."""
return map(lambda vec: vec.name,
chain.from_iterable(
map(lambda vm: vm.values(), self.values())))
def get_vectorizer(self, name: str) -> FeatureVectorizer:
"""Find vectorizer with ``name`` in all vectorizer managers.
"""
for vm in self.values():
for vec in vm.values():
if name == vec.name:
return vec
@property
@persisted('_feature_ids')
def feature_ids(self) -> Set[str]:
"""Return all feature IDs supported across all manager registered with the
manager set.
"""
return set(chain.from_iterable(
map(lambda m: m.feature_ids, self.values())))
def __getitem__(self, name: str) -> FeatureVectorizerManager:
mng = self._managers.get(name)
if mng is None:
raise VectorizerError(
f"No such manager '{name}' in manager set '{self.name}'")
return mng
def get(self, name: str) -> FeatureVectorizerManager:
return self._managers.get(name)
def values(self) -> List[FeatureVectorizerManager]:
return self._managers.values()
def keys(self) -> Set[str]:
return set(self._managers.keys())
def deallocate(self):
if self._managers_pw.is_set():
mngs = self._managers
for mng in mngs.values():
mng.deallocate()
mngs.clear()
super().deallocate()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'{self.name}', depth, writer)
for mng in self._managers.values():
mng.write(depth + 1, writer) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/vectorize/manager.py | manager.py |
__author__ = 'Paul Landes'
from typing import Tuple, Any
from dataclasses import dataclass, field
from abc import abstractmethod, ABCMeta
import logging
import copy as cp
from functools import reduce
import math
from torch import nn
from . import LayerError
logger = logging.getLogger(__name__)
class Flattenable(object):
"""A class with a :obj:`flatten_dim` and :obj:`out_shape` properties.
"""
@property
def out_shape(self) -> Tuple[int]:
"""Return the shape of the layer after flattened in to one dimension.
"""
pass
@property
def flatten_dim(self) -> int:
"""Return the number or neurons of the layer after flattening in to one
dimension.
"""
return reduce(lambda x, y: x * y, self.out_shape)
def __str__(self):
sup = super().__str__()
return f'{sup}, out: {self.out_shape}'
class Im2DimCalculator(Flattenable):
"""Convolution matrix dimension calculation utility.
Implementation as Matrix Multiplication section.
Example (im2col)::
W_in = H_in = 227
Ch_in = D_in = 3
Ch_out = D_out = 3
K = 96
F = (11, 11)
S = 4
P = 0
W_out = H_out = 227 - 11 + (2 * 0) / 4 = 55 output locations
X_col = Fw^2 * D_out x W_out * H_out = 11^2 * 3 x 55 * 55 = 363 x 3025
Example (im2row)::
W_row = 96 filters of size 11 x 11 x 3 => K x 11 * 11 * 3 = 96 x 363
Result of convolution: transpose(W_row) dot X_col. Must reshape back to 55
x 55 x 96
:see: `Stanford <http://cs231n.github.io/convolutional-networks/#conv>`_
"""
def __init__(self, W: int, H: int, D: int = 1, K: int = 1,
F: Tuple[int, int] = (2, 2), S: int = 1, P: int = 0):
"""Initialize.
:param W: width
:param H: height
:param D: depth [of volume] (usually same as K)
:param K: number of filters
:param F: tuple of kernel/filter (width, height)
:param S: stride
:param P: padding
"""
self.W = W
self.H = H
self.D = D
self.K = K
self.F = F
self.S = S
self.P = P
def validate(self):
W, H, F, P, S = self.W, self.H, self.F, self.P, self.S
if ((W - F[0] + (2 * P)) % S):
raise LayerError('Incongruous convolution width layer parameters')
if ((H - F[1] + (2 * P)) % S):
raise LayerError('Incongruous convolution height layer parameters')
if (F[0] > (W + (2 * P))):
raise LayerError(f'Kernel/filter {F} must be <= width {W} + 2 * padding {P}')
if (F[1] > (H + (2 * P))):
raise LayerError(f'Kernel/filter {F} must be <= height {H} + 2 * padding {P}')
if self.W_row[1] != self.X_col[0]:
raise LayerError(f'Columns of W_row {self.W_row} do not match ' +
f'rows of X_col {self.X_col}')
@property
def W_out(self):
return int(((self.W - self.F[0] + (2 * self.P)) / self.S) + 1)
@property
def H_out(self):
return int(((self.H - self.F[1] + (2 * self.P)) / self.S) + 1)
@property
def X_col(self):
# TODO: not supported for non-square filters
return (self.F[0] ** 2 * self.D, self.W_out * self.H_out)
@property
def W_row(self):
# TODO: not supported for non-square filters
return (self.K, (self.F[0] ** 2) * self.D)
@property
def out_shape(self):
return (self.K, self.W_out, self.H_out)
def flatten(self, axis: int = 1):
fd = self.flatten_dim
W, H = (1, fd) if axis else (fd, 1)
return self.__class__(W, H, F=(1, 1), D=1, K=1)
def __str__(self):
attrs = 'W H D K F S P W_out H_out W_row X_col out_shape'.split()
return ', '.join(map(lambda x: f'{x}={getattr(self, x)}', attrs))
def __repr__(self):
return self.__str__()
@dataclass
class ConvolutionLayerFactory(object):
"""Create convolution layers. Each attribute maps a corresponding attribuate
variable in :class:`.Im2DimCalculator`, which documented in the parenthesis
in the parameter documentation below.
:param width: the width of the image/data (``W``)
:param height: the height of the image/data (``H``)
:param depth: the volume, which is usually same as ``n_filters`` (``D``)
:param n_filters: the number of filters, aka the filter depth/volume
(``K``)
:param kernel_filter: the kernel filter dimension in width X height (``F``)
:param stride: the stride, which is the number of cells to skip for each
convolution (``S``)
:param padding: the zero'd number of cells on the ends of the image/data
(``P``)
:see: `Stanford <http://cs231n.github.io/convolutional-networks/#conv>`_
"""
width: int = field(default=1)
height: int = field(default=1)
depth: int = field(default=1)
n_filters: int = field(default=1)
kernel_filter: Tuple[int, int] = field(default=(2, 2))
stride: int = field(default=1)
padding: int = field(default=0)
@property
def calc(self) -> Im2DimCalculator:
return Im2DimCalculator(**{
'W': self.width,
'H': self.height,
'D': self.depth,
'K': self.n_filters,
'F': self.kernel_filter,
'S': self.stride,
'P': self.padding})
def copy_calc(self, calc: Im2DimCalculator):
self.width = calc.W
self.height = calc.H
self.depth = calc.D
self.n_filters = calc.K
self.kernel_filter = calc.F
self.stride = calc.S
self.padding = calc.P
def flatten(self) -> Any:
"""Return a new flattened instance of this class.
"""
clone = self.clone()
calc = clone.calc.flatten()
clone.copy_calc(calc)
return clone
@property
def flatten_dim(self) -> int:
"""Return the dimension of a flattened array of the convolution layer
represented by this instance.
"""
return self.calc.flatten_dim
def clone(self, **kwargs) -> Any:
"""Return a clone of this factory instance.
"""
clone = cp.deepcopy(self)
clone.__dict__.update(kwargs)
return clone
def conv1d(self) -> nn.Conv1d:
"""Return a convolution layer in one dimension.
"""
c = self.calc
return nn.Conv1d(c.D, c.K, c.F, padding=c.P, stride=c.S)
def conv2d(self) -> nn.Conv2d:
"""Return a convolution layer in two dimensions.
"""
c = self.calc
return nn.Conv2d(c.D, c.K, c.F, padding=c.P, stride=c.S)
def batch_norm2d(self) -> nn.BatchNorm2d:
"""Return a 2D batch normalization layer.
"""
return nn.BatchNorm2d(self.calc.K)
def __str__(self):
return str(self.calc)
@dataclass
class PoolFactory(Flattenable, metaclass=ABCMeta):
"""Create a 2D max pool and output it's shape.
:see: `Stanford <https://cs231n.github.io/convolutional-networks/#pool>`_
"""
layer_factory: ConvolutionLayerFactory = field(repr=False, default=None)
stride: int = field(default=1)
padding: int = field(default=0)
@abstractmethod
def _calc_out_shape(self) -> Tuple[int]:
pass
@abstractmethod
def create_pool(self) -> nn.Module:
pass
@property
def out_shape(self) -> Tuple[int]:
"""Calculates the dimensions for a max pooling filter and creates a layer.
:param F: the spacial extent (kernel filter)
:param S: the stride
"""
return self._calc_out_shape()
def __call__(self) -> nn.Module:
"""Return the pooling layer.
"""
return self.create_pool()
@dataclass
class MaxPool1dFactory(PoolFactory):
"""Create a 1D max pool and output it's shape.
"""
kernel_filter: Tuple[int] = field(default=2)
"""The filter used for max pooling."""
def _calc_out_shape(self) -> Tuple[int]:
calc = self.layer_factory.calc
L = calc.flatten_dim
F = self.kernel_filter
S = self.stride
P = self.padding
Lo = math.floor((((L + (2 * P) - (F - 1) - 1)) / S) + 1)
return (1, Lo)
def create_pool(self) -> nn.Module:
return nn.MaxPool1d(
self.kernel_filter, stride=self.stride, padding=self.padding)
@dataclass
class MaxPool2dFactory(PoolFactory):
"""Create a 2D max pool and output it's shape.
"""
kernel_filter: Tuple[int, int] = field(default=(2, 2))
"""The filter used for max pooling."""
def _calc_out_shape(self) -> Tuple[int]:
calc = self.layer_factory.calc
K, W, H = calc.out_shape
F = self.kernel_filter
S = self.stride
P = self.padding
W_2 = ((W - F[0] + (2 * P)) / S) + 1
H_2 = ((H - F[1] + (2 * P)) / S) + 1
return (K, int(W_2), int(H_2))
def create_pool(self) -> nn.Module:
return nn.MaxPool2d(
self.kernel_filter, stride=self.stride, padding=self.padding) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/layer/conv.py | conv.py |
__author__ = 'Paul Landes'
from typing import Tuple, Union
from dataclasses import dataclass, field
import logging
import torch
from torch import nn
from torch import Tensor
from zensols.persist import Deallocatable
from zensols.deeplearn import (
ActivationNetworkSettings,
DropoutNetworkSettings,
BatchNormNetworkSettings,
)
from zensols.deeplearn.model import BaseNetworkModule
from . import (
RecurrentAggregation,
RecurrentAggregationNetworkSettings,
DeepLinearNetworkSettings,
)
from . import CRF, DeepLinear
logger = logging.getLogger(__name__)
@dataclass
class RecurrentCRFNetworkSettings(ActivationNetworkSettings,
DropoutNetworkSettings,
BatchNormNetworkSettings):
"""Settings for a recurrent neural network using :class:`.RecurrentCRF`.
"""
network_type: str = field()
"""One of ``rnn``, ``lstm`` or ``gru`` (usually ``lstm``)."""
bidirectional: bool = field()
"""Whether or not the network is bidirectional (usually ``True``)."""
input_size: int = field()
"""The input size to the layer."""
hidden_size: int = field()
"""The size of the hidden states of the network."""
num_layers: int = field()
"""The number of *"stacked"* layers."""
num_labels: int = field()
"""The number of output labels from the CRF."""
decoder_settings: DeepLinearNetworkSettings = field()
"""The decoder feed forward network."""
score_reduction: str = field()
"""Reduces how the score output over batches.
:see: :class:`.CRF`
"""
def to_recurrent_aggregation(self) -> RecurrentAggregationNetworkSettings:
attrs = ('name config_factory dropout network_type bidirectional ' +
'input_size hidden_size num_layers')
params = {k: getattr(self, k) for k in attrs.split()}
params['aggregation'] = 'none'
return RecurrentAggregationNetworkSettings(**params)
def get_module_class_name(self) -> str:
return __name__ + '.RecurrentCRF'
class RecurrentCRF(BaseNetworkModule):
"""Adapt the :class:`.CRF` module using the framework based
:class:`.BaseNetworkModule` class. This provides methods
:meth:`forward_recur_decode` and :meth:`decode`, which decodes the input.
This adds a recurrent neural network and a fully connected feed forward
decoder layer before the CRF layer.
"""
MODULE_NAME = 'recur crf'
def __init__(self, net_settings: RecurrentCRFNetworkSettings,
sub_logger: logging.Logger = None,
use_crf: bool = True):
"""Initialize the reccurent CRF layer.
:param net_settings: the recurrent layer configuration
:param sub_logger: the logger to use for the forward process in this
layer
"""
super().__init__(net_settings, sub_logger)
ns = self.net_settings
self.recur_settings = rs = ns.to_recurrent_aggregation()
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'recur settings: {rs}')
self.hidden_dim: int = rs.hidden_size
self.recur: RecurrentAggregation = self._create_recurrent_aggregation()
self.decoder: DeepLinear = self._create_decoder()
self.crf: CRF
if use_crf:
self.crf = self._create_crf()
self.crf.reset_parameters()
else:
self.crf = None
self.hidden = None
self._zero = None
def _create_recurrent_aggregation(self):
rs = self.recur_settings
return RecurrentAggregation(rs, self.logger)
def _create_decoder(self) -> Union[nn.Linear, DeepLinear]:
ns = self.net_settings
rs = self.recur_settings
if ns.decoder_settings is None:
layer = nn.Linear(rs.hidden_size, ns.num_labels)
else:
ln: DeepLinearNetworkSettings = ns.decoder_settings
ln.in_features = rs.hidden_size
ln.out_features = ns.num_labels
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'linear: {ln}')
layer: DeepLinear = ln.create_module()
return layer
def _create_crf(self) -> CRF:
ns = self.net_settings
return CRF(ns.num_labels, batch_first=True,
score_reduction=ns.score_reduction)
def deallocate(self):
super().deallocate()
Deallocatable._try_deallocate(self.decoder)
self.recur.deallocate()
self.recur_settings.deallocate()
def _forward_decoder(self, x: Tensor) -> Tensor:
return self.decoder(x)
def forward_recur_decode(self, x: Tensor) -> Tensor:
"""Forward the input through the recurrent network (i.e. LSTM), batch
normalization and activation (if confgiured), and decoder output.
**Note**: this layer forwards batch normalization, activation and drop
out (for those configured) after the recurrent layer is forwarded.
However, the subordinate recurrent layer can also be configured with a
dropout when having more than one stacked layer.
:param x: the network input
:return: the fully connected linear feed forward decoded output
"""
self._shape_debug('recur in', x)
x = self.recur(x)[0]
# need to droput even after the RNN/LSTM/GRU since dropout isn't
# applied for single (stacked) layers; see method docs
x = self._forward_batch_act_drop(x)
self._shape_debug('batch, act, drop', x)
x = self._forward_decoder(x)
self._shape_debug('decode', x)
return x
def to(self, *args, **kwargs):
self._zero = None
return super().to(*args, **kwargs)
def _forward(self, x: Tensor, mask: Tensor, labels: Tensor) -> Tensor:
self._shape_debug('mask', mask)
self._shape_debug('labels', labels)
if self._zero is None:
self._zero = torch.tensor(
[0], dtype=labels.dtype, device=labels.device)
x = self.forward_recur_decode(x)
# zero out negative values, since otherwise invalid transitions are
# indexed with negative values, which come from the default cross
# entropy loss functions `ignore_index`
labels = torch.max(labels, self._zero)
x = -self.crf(x, labels, mask=mask)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'training loss: {x}')
return x
def decode(self, x: Tensor, mask: Tensor) -> Tuple[Tensor, Tensor]:
"""Forward the input though the recurrent network, decoder, and then the CRF.
:param x: the input
:param mask: the mask used to block the last N states not provided
:return: the CRF sequence output and the score provided by the CRF's
veterbi algorithm as a tuple
"""
self._shape_debug('mask', mask)
x = self.forward_recur_decode(x)
seq, score = self.crf.decode(x, mask=mask)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'decoded: {len(seq)} seqs, score: {score}')
return seq, score | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/layer/recurcrf.py | recurcrf.py |
__author__ = 'Paul Landes'
from typing import Union, Tuple
from dataclasses import dataclass, field
import logging
import torch
from torch import nn
from torch import Tensor
from zensols.config import ClassImporter
from zensols.deeplearn import DropoutNetworkSettings
from zensols.deeplearn.model import BaseNetworkModule
from . import LayerError
logger = logging.getLogger(__name__)
@dataclass
class RecurrentAggregationNetworkSettings(DropoutNetworkSettings):
"""Settings for a recurrent neural network. This configures a
:class:`.RecurrentAggregation` layer.
"""
network_type: str = field()
"""One of ``rnn``, ``lstm`` or ``gru``."""
aggregation: str = field()
"""A convenience operation to aggregate the parameters; this is one of:
``max``: return the max of the output states ``ave``: return the average of
the output states ``last``: return the last output state ``none``: do not
apply an aggregation function.
"""
bidirectional: bool = field()
"""Whether or not the network is bidirectional."""
input_size: int = field()
"""The input size to the network."""
hidden_size: int = field()
"""The size of the hidden states of the network."""
num_layers: int = field()
"""The number of *"stacked"* layers."""
def get_module_class_name(self) -> str:
return __name__ + '.RecurrentAggregation'
class RecurrentAggregation(BaseNetworkModule):
"""A recurrent neural network model with an output aggregation. This includes
RNNs, LSTMs and GRUs.
"""
MODULE_NAME = 'recur'
def __init__(self, net_settings: RecurrentAggregationNetworkSettings,
sub_logger: logging.Logger = None):
"""Initialize the recurrent layer.
:param net_settings: the reccurent layer configuration
:param sub_logger: the logger to use for the forward process in this
layer
"""
super().__init__(net_settings, sub_logger)
ns = net_settings
if self.logger.isEnabledFor(logging.INFO):
self.logger.info(f'creating {ns.network_type} network')
class_name = f'torch.nn.{ns.network_type.upper()}'
ci = ClassImporter(class_name, reload=False)
hidden_size = ns.hidden_size // (2 if ns.bidirectional else 1)
param = {'input_size': ns.input_size,
'hidden_size': hidden_size,
'num_layers': ns.num_layers,
'bidirectional': ns.bidirectional,
'batch_first': True}
if ns.num_layers > 1 and ns.dropout is not None:
# UserWarning: dropout option adds dropout after all but last
# recurrent layer, so non-zero dropout expects num_layers greater
# than 1
param['dropout'] = ns.dropout
self.rnn: nn.RNNBase = ci.instance(**param)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'created {type(self.rnn)} with {param}')
def deallocate(self):
super().deallocate()
if hasattr(self, 'rnn'):
del self.rnn
@property
def out_features(self) -> int:
"""The number of features output from all layers of this module.
"""
ns = self.net_settings
return ns.hidden_size
def _forward(self, x: Tensor, x_init: Tensor = None) -> \
Union[Tensor, Tuple[Tensor, Tensor, Tensor]]:
self._shape_debug('input', x)
if x_init is None:
x, hidden = self.rnn(x)
else:
x, hidden = self.rnn(x, x_init)
self._shape_debug('recur', x)
agg = self.net_settings.aggregation
if agg == 'max':
x = torch.max(x, dim=1)[0]
self._shape_debug('max out shape', x)
elif agg == 'ave':
x = torch.mean(x, dim=1)
self._shape_debug('ave out shape', x)
elif agg == 'last':
x = x[:, -1, :]
self._shape_debug('last out shape', x)
elif agg == 'none':
pass
else:
raise LayerError(f'Unknown aggregate function: {agg}')
self._shape_debug('aggregation', x)
return x, hidden | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/layer/recur.py | recur.py |
__author__ = 'Kemal Kurniawan, Paul Landes'
__version__ = '0.7.5'
from typing import List, Optional, Union, Tuple
import torch
import torch.nn as nn
from . import LayerError
class CRF(nn.Module):
"""Conditional random field.
This module implements a conditional random field [LMP01]_. The forward computation
of this class computes the log likelihood of the given sequence of tags and
emission score tensor. This class also has `~CRF.decode` method which finds
the best tag sequence given an emission score tensor using `Viterbi algorithm`_.
Args:
num_tags: Number of tags.
batch_first: Whether the first dimension corresponds to the size of a minibatch.
score_reduction: reduces how the score output over batches, and then
tags, and has shape ``(batch size, number of tags)``
with the exception of ``tags``, which has shape
``(batch_size, sequence length, number of tags)``; how
output is returned in :meth:`decode` by:
- **skip**: do not return scores, only the decoded output (default)
- **none**: return the scores unaltered, then divide by the batch count
- **tags**: all scores
- **sum**: sum the max over batches, then divide by the batch count
- **max**: max over each batch max, then divide by the batch count
- **min**: min over each batch max, then divide by the batch count
- **mean**: average the max over batchs, then divide by the batch count
Attributes:
start_transitions (`~torch.nn.Parameter`): Start transition score tensor of size
``(num_tags,)``.
end_transitions (`~torch.nn.Parameter`): End transition score tensor of size
``(num_tags,)``.
transitions (`~torch.nn.Parameter`): Transition score tensor of size
``(num_tags, num_tags)``.
.. [LMP01] Lafferty, J., McCallum, A., Pereira, F. (2001).
"Conditional random fields: Probabilistic models for segmenting and
labeling sequence data". *Proc. 18th International Conf. on Machine
Learning*. Morgan Kaufmann. pp. 282–289.
.. _Viterbi algorithm: https://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def __init__(self, num_tags: int, batch_first: bool = False,
score_reduction: str = 'skip') -> None:
if num_tags <= 0:
raise LayerError(f'Invalid number of tags: {num_tags}')
super().__init__()
self.num_tags = num_tags
self.batch_first = batch_first
self.score_reduction = score_reduction
self.start_transitions = nn.Parameter(torch.empty(num_tags))
self.end_transitions = nn.Parameter(torch.empty(num_tags))
self.transitions = nn.Parameter(torch.empty(num_tags, num_tags))
self.reset_parameters()
def reset_parameters(self) -> None:
"""Initialize the transition parameters.
The parameters will be initialized randomly from a uniform distribution
between -0.1 and 0.1.
"""
nn.init.uniform_(self.start_transitions, -0.1, 0.1)
nn.init.uniform_(self.end_transitions, -0.1, 0.1)
nn.init.uniform_(self.transitions, -0.1, 0.1)
def __repr__(self) -> str:
return f'{self.__class__.__name__}(num_tags={self.num_tags})'
def forward(
self,
emissions: torch.Tensor,
tags: torch.LongTensor,
mask: Optional[torch.ByteTensor] = None,
reduction: str = 'sum',
) -> torch.Tensor:
"""Compute the conditional log likelihood of a sequence of tags given emission scores.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
tags (`~torch.LongTensor`): Sequence of tags tensor of size
``(seq_length, batch_size)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
reduction: Specifies the reduction to apply to the output:
``none|sum|mean|token_mean``. ``none``: no reduction will be applied.
``sum``: the output will be summed over batches. ``mean``: the output will be
averaged over batches. ``token_mean``: the output will be averaged over tokens.
Returns:
`~torch.Tensor`: The log likelihood. This will have size ``(batch_size,)`` if
reduction is ``none``, ``()`` otherwise.
"""
self._validate(emissions, tags=tags, mask=mask)
if reduction not in ('none', 'sum', 'mean', 'token_mean'):
raise LayerError(f'Invalid reduction: {reduction}')
if mask is None:
mask = torch.ones_like(tags, dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
tags = tags.transpose(0, 1)
mask = mask.transpose(0, 1)
# shape: (batch_size,)
numerator = self._compute_score(emissions, tags, mask)
# shape: (batch_size,)
denominator = self._compute_normalizer(emissions, mask)
# shape: (batch_size,)
llh = numerator - denominator
if reduction == 'none':
return llh
if reduction == 'sum':
return llh.sum()
if reduction == 'mean':
return llh.mean()
assert reduction == 'token_mean'
return llh.sum() / mask.type_as(emissions).sum()
def decode(self, emissions: torch.Tensor,
mask: Optional[torch.ByteTensor] = None) -> \
Union[List[List[int]], Tuple[List[List[int]], torch.Tensor]]:
"""Find the most likely tag sequence using Viterbi algorithm.
Args:
emissions (`~torch.Tensor`): Emission score tensor of size
``(seq_length, batch_size, num_tags)`` if ``batch_first`` is ``False``,
``(batch_size, seq_length, num_tags)`` otherwise.
mask (`~torch.ByteTensor`): Mask tensor of size ``(seq_length, batch_size)``
if ``batch_first`` is ``False``, ``(batch_size, seq_length)`` otherwise.
Returns:
List of list containing the best tag sequence for each batch and
optionally the scores based on the (~`score_reduction`) parameter
in :meth:`__init__`.
"""
self._validate(emissions, mask=mask)
if mask is None:
mask = emissions.new_ones(emissions.shape[:2], dtype=torch.uint8)
if self.batch_first:
emissions = emissions.transpose(0, 1)
mask = mask.transpose(0, 1)
return self._viterbi_decode(emissions, mask)
def _validate(
self,
emissions: torch.Tensor,
tags: Optional[torch.LongTensor] = None,
mask: Optional[torch.ByteTensor] = None) -> None:
if emissions.dim() != 3:
raise LayerError(f'Emissions must have dimension of 3, got {emissions.dim()}')
if emissions.size(2) != self.num_tags:
raise LayerError(
f'Expected last dimension of emissions is {self.num_tags}, '
f'got {emissions.size(2)}')
if tags is not None:
if emissions.shape[:2] != tags.shape:
raise LayerError(
'The first two dimensions of emissions and tags must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(tags.shape)}')
if mask is not None:
if emissions.shape[:2] != mask.shape:
raise LayerError(
'The first two dimensions of emissions and mask must match, '
f'got {tuple(emissions.shape[:2])} and {tuple(mask.shape)}')
no_empty_seq = not self.batch_first and mask[0].all()
no_empty_seq_bf = self.batch_first and mask[:, 0].all()
if not no_empty_seq and not no_empty_seq_bf:
raise LayerError('mask of the first timestep must all be on')
def _compute_score(
self, emissions: torch.Tensor, tags: torch.LongTensor,
mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# tags: (seq_length, batch_size)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and tags.dim() == 2
assert emissions.shape[:2] == tags.shape
assert emissions.size(2) == self.num_tags
assert mask.shape == tags.shape
assert mask[0].all()
seq_length, batch_size = tags.shape
mask = mask.type_as(emissions)
# Start transition score and first emission
# shape: (batch_size,)
score = self.start_transitions[tags[0]]
score += emissions[0, torch.arange(batch_size), tags[0]]
for i in range(1, seq_length):
# Transition score to next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += self.transitions[tags[i - 1], tags[i]] * mask[i]
# Emission score for next tag, only added if next timestep is valid (mask == 1)
# shape: (batch_size,)
score += emissions[i, torch.arange(batch_size), tags[i]] * mask[i]
# End transition score
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
# shape: (batch_size,)
last_tags = tags[seq_ends, torch.arange(batch_size)]
# shape: (batch_size,)
score += self.end_transitions[last_tags]
return score
def _compute_normalizer(
self, emissions: torch.Tensor, mask: torch.ByteTensor) -> torch.Tensor:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length = emissions.size(0)
# Start transition score and first emission; score has size of
# (batch_size, num_tags) where for each batch, the j-th column stores
# the score that the first timestep has tag j
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
for i in range(1, seq_length):
# Broadcast score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emissions = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the sum of scores of all
# possible tag sequences so far that end with transitioning from tag i to tag j
# and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emissions
# Sum over all possible current tags, but we're in score space, so a sum
# becomes a log-sum-exp: for each sample, entry i stores the sum of scores of
# all possible tag sequences so far, that end in tag i
# shape: (batch_size, num_tags)
next_score = torch.logsumexp(next_score, dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Sum (log-sum-exp) over all possible tags
# shape: (batch_size,)
return torch.logsumexp(score, dim=1)
def _viterbi_decode(self, emissions: torch.FloatTensor,
mask: torch.ByteTensor) -> List[List[int]]:
# emissions: (seq_length, batch_size, num_tags)
# mask: (seq_length, batch_size)
assert emissions.dim() == 3 and mask.dim() == 2
assert emissions.shape[:2] == mask.shape
assert emissions.size(2) == self.num_tags
assert mask[0].all()
seq_length, batch_size = mask.shape
# Start transition and first emission
# shape: (batch_size, num_tags)
score = self.start_transitions + emissions[0]
history = []
# Keep the scores to later return if the user wants them them based on
# the score reduction, which will have shape:
# (batch_size, seq_len, num_tags)
if self.score_reduction == 'tags':
tag_scores = []
else:
tag_scores = None
# score is a tensor of size (batch_size, num_tags) where for every batch,
# value at column j stores the score of the best tag sequence so far that ends
# with tag j
# history saves where the best tags candidate transitioned from; this is used
# when we trace back the best tag sequence
# Viterbi algorithm recursive case: we compute the score of the best tag sequence
# for every possible next tag
for i in range(1, seq_length):
# Broadcast viterbi score for every possible next tag
# shape: (batch_size, num_tags, 1)
broadcast_score = score.unsqueeze(2)
# Broadcast emission score for every possible current tag
# shape: (batch_size, 1, num_tags)
broadcast_emission = emissions[i].unsqueeze(1)
# Compute the score tensor of size (batch_size, num_tags, num_tags) where
# for each sample, entry at row i and column j stores the score of the best
# tag sequence so far that ends with transitioning from tag i to tag j and emitting
# shape: (batch_size, num_tags, num_tags)
next_score = broadcast_score + self.transitions + broadcast_emission
# Find the maximum score over all possible current tag
# shape: (batch_size, num_tags)
next_score, indices = next_score.max(dim=1)
# Set score to the next score if this timestep is valid (mask == 1)
# and save the index that produces the next score
# shape: (batch_size, num_tags)
score = torch.where(mask[i].unsqueeze(1), next_score, score)
# Add scores to cat them later based on the score reduction
# shape: (batch_size, num_tags)
if tag_scores is not None:
tag_scores.append(score.detach().unsqueeze(1))
history.append(indices)
# End transition score
# shape: (batch_size, num_tags)
score += self.end_transitions
# Add the final transition score to our returned scores
# shape: (batch_size, num_tags)
if tag_scores is not None:
tag_scores.append(score.detach().unsqueeze(1))
# Now, compute the best path for each sample
# shape: (batch_size,)
seq_ends = mask.long().sum(dim=0) - 1
best_tags_list = []
for idx in range(batch_size):
# Find the tag which maximizes the score at the last timestep; this is our best tag
# for the last timestep
_, best_last_tag = score[idx].max(dim=0)
best_tags = [best_last_tag.item()]
# We trace back where the best last tag comes from, append that to our best tag
# sequence, and trace it back again, and so on
for hist in reversed(history[:seq_ends[idx]]):
best_last_tag = hist[idx][best_tags[-1]]
best_tags.append(best_last_tag.item())
# Reverse the order because we start from the last timestep
best_tags.reverse()
best_tags_list.append(best_tags)
# If the user wants scores, return them in the desired format
if self.score_reduction == 'skip':
return best_tags_list
else:
if self.score_reduction == 'tags':
# shape: (batch_size, seq_length, num_tags)
score = torch.cat(tag_scores, 1)
elif self.score_reduction != 'none':
score = score.max(dim=1)[0]
score = {'sum': score.sum(),
'max': score.max(),
'min': score.min(),
'mean': score.mean()}[self.score_reduction]
score = score / batch_size
return best_tags_list, score | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/layer/crf.py | crf.py |
__author__ = 'Paul Landes'
from typing import Any, Tuple
from dataclasses import dataclass, field
import logging
import sys
import torch
from torch import nn
from zensols.deeplearn import (
ActivationNetworkSettings,
DropoutNetworkSettings,
BatchNormNetworkSettings,
)
from zensols.deeplearn.model import BaseNetworkModule
from . import LayerError
logger = logging.getLogger(__name__)
@dataclass
class DeepLinearNetworkSettings(ActivationNetworkSettings,
DropoutNetworkSettings,
BatchNormNetworkSettings):
"""Settings for a deep fully connected network using :class:`.DeepLinear`.
"""
## TODO: centralize on either in_features or input_size:
# embedding_output_size, RecurrentCRFNetworkSettings.input_size
in_features: int = field()
"""The number of features to the first layer."""
out_features: int = field()
"""The number of features as output from the last layer."""
middle_features: Tuple[Any] = field()
"""The number of features in the middle layers; if ``proportions`` is
``True``, then each number is how much to grow or shrink as a percetage of
the last layer, otherwise, it's the number of features.
"""
proportions: bool = field()
"""Whether or not to interpret ``middle_features`` as a proportion of the
previous layer or use directly as the size of the middle layer.
"""
repeats: int = field()
"""The number of repeats of the :obj:`middle_features` configuration."""
def get_module_class_name(self) -> str:
return __name__ + '.DeepLinear'
class DeepLinear(BaseNetworkModule):
"""A layer that has contains one more nested layers, including batch
normalization and activation. The input and output layer shapes are given
and an optional 0 or more middle layers are given as percent changes in
size or exact numbers.
If the network settings are configured to have batch normalization, batch
normalization layers are added after each linear layer.
The drop out and activation function (if any) are applied in between each
layer allowing other drop outs and activation functions to be applied
before and after. Note that the activation is implemented as a function,
and not a layer.
For example, if batch normalization and an activation function is
configured and two layers are configured, the network is configured as:
1. linear
2. batch normalization
3. activation
5. dropout
6. linear
7. batch normalization
8. activation
9. dropout
The module also provides the output features of each layer with
:py:meth:`n_features_after_layer` and ability to forward though only the
first given set of layers with :meth:`forward_n_layers`.
"""
MODULE_NAME = 'linear'
def __init__(self, net_settings: DeepLinearNetworkSettings,
sub_logger: logging.Logger = None):
"""Initialize the deep linear layer.
:param net_settings: the deep linear layer configuration
:param sub_logger: the logger to use for the forward process in this
layer
"""
super().__init__(net_settings, sub_logger)
ns = net_settings
last_feat = ns.in_features
lin_layers = []
bnorm_layers = []
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'in: {ns.in_features}, ' +
f'middle: {ns.middle_features}, ' +
f'out: {ns.out_features}')
for mf in ns.middle_features:
for i in range(ns.repeats):
if ns.proportions:
next_feat = int(last_feat * mf)
else:
next_feat = int(mf)
self._add_layer(last_feat, next_feat, ns.dropout,
lin_layers, bnorm_layers)
last_feat = next_feat
if ns.out_features is not None:
self._add_layer(last_feat, ns.out_features, ns.dropout,
lin_layers, bnorm_layers)
self.lin_layers = nn.Sequential(*lin_layers)
if len(bnorm_layers) > 0:
self.bnorm_layers = nn.Sequential(*bnorm_layers)
else:
self.bnorm_layers = None
def deallocate(self):
super().deallocate()
if hasattr(self, 'lin_layers'):
del self.lin_layers
def _add_layer(self, in_features: int, out_features: int, dropout: float,
lin_layers: list, bnorm_layers):
ns = self.net_settings
n_layer = len(lin_layers)
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'add {n_layer}: in={in_features} out={out_features}')
lin_layer = nn.Linear(in_features, out_features)
lin_layers.append(lin_layer)
if ns.batch_norm_d is not None:
if out_features is None:
raise LayerError('Bad out features')
if ns.batch_norm_features is None:
bn_features = out_features
else:
bn_features = ns.batch_norm_features
layer = ns.create_batch_norm_layer(ns.batch_norm_d, bn_features)
if layer is None:
raise LayerError(f'Bad layer params: D={ns.batch_norm_d}, ' +
f'features={out_features}')
bnorm_layers.append(layer)
def get_linear_layers(self) -> Tuple[nn.Module]:
"""Return all linear layers.
"""
return tuple(self.lin_layers)
def get_batch_norm_layers(self) -> Tuple[nn.Module]:
"""Return all batch normalize layers.
"""
if self.bnorm_layers is not None:
return tuple(self.bnorm_layers)
@property
def out_features(self) -> int:
"""The number of features output from all layers of this module.
"""
n_layers = len(self.get_linear_layers())
return self.n_features_after_layer(n_layers - 1)
def n_features_after_layer(self, nth_layer) -> int:
"""Get the output features of the Nth (0 index based) layer.
:param nth_layer: the layer to use for getting the output features
"""
return self.get_linear_layers()[nth_layer].out_features
def forward_n_layers(self, x: torch.Tensor, n_layers: int,
full_forward: bool = False) -> torch.Tensor:
"""Forward throught the first 0 index based N layers.
:param n_layers: the number of layers to forward through (0-based
index)
:param full_forward: if ``True``, also return the full forward as a
second parameter
:return: the tensor output of all layers or a tuple of ``(N-th layer,
all layers)``
"""
return self._forward(x, n_layers, full_forward)
def _forward(self, x: torch.Tensor,
n_layers: int = sys.maxsize,
full_forward: bool = False) -> torch.Tensor:
lin_layers = self.get_linear_layers()
bnorm_layers = self.get_batch_norm_layers()
n_layers = min(len(lin_layers) - 1, n_layers)
x_ret = None
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'linear: num layers: {len(lin_layers)}')
self._shape_debug('input', x)
for i, layer in enumerate(lin_layers):
x = layer(x)
self._shape_debug('linear', x)
if bnorm_layers is not None:
blayer = bnorm_layers[i]
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'batch norm: {blayer}')
x = blayer(x)
x = self._forward_activation(x)
x = self._forward_dropout(x)
if i == n_layers:
x_ret = x
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'reached {i}th layer = n_layers')
self._shape_debug('x_ret', x_ret)
if not full_forward:
self._debug('breaking')
break
if full_forward:
return x_ret, x
else:
return x | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/layer/linear.py | linear.py |
from __future__ import annotations
"""This file contains a stash used to load an embedding layer.
"""
__author__ = 'Paul Landes'
from typing import Tuple, List, Any, Dict, Set, Iterable, Type
from dataclasses import dataclass, InitVar, field
from abc import ABCMeta
import sys
import logging
import collections
from functools import reduce
import itertools as it
from itertools import chain
from pathlib import Path
from zensols.util import time
from zensols.config import Writeback
from zensols.persist import (
chunks,
Deallocatable,
persisted,
PersistedWork,
Primeable,
Stash,
)
from zensols.dataset import (
SplitKeyContainer,
SplitStashContainer,
)
from zensols.deeplearn import TorchConfig, DeepLearnError
from zensols.deeplearn.vectorize import (
FeatureVectorizerManagerSet,
FeatureVectorizerManager,
)
from . import (
BatchDirectoryCompositeStash, BatchFeatureMapping, DataPointIDSet,
DataPoint, Batch, BatchMetadata, BatchFieldMetadata,
ManagerFeatureMapping, FieldFeatureMapping, TorchMultiProcessStash,
)
logger = logging.getLogger(__name__)
@dataclass
class BatchStash(TorchMultiProcessStash, SplitKeyContainer, Writeback,
Deallocatable, metaclass=ABCMeta):
"""A stash that vectorizes features in to easily consumable tensors for
training and testing. This stash produces instances of :class:`.Batch`,
which is a batch in the machine learning sense, and the first dimension of
what will become the tensor used in PyTorch. Each of these batches has a
logical one to many relationship to that batche's respective set of data
points, which is encapsulated in the :class:`.DataPoint` class.
The stash creates subprocesses to vectorize features in to tensors in
chunks of IDs (data point IDs) from the subordinate stash using
``DataPointIDSet`` instances.
To speed up experiements, all available features configured in
``vectorizer_manager_set`` are encoded on disk. However, only the
``decoded_attributes`` (see attribute below) are avilable to the model
regardless of what was created during encoding time.
The lifecycle of the data follows:
1. Feature data created by the client, which could be language features,
row data etc.
2. Vectorize the feature data using the vectorizers in
``vectorizer_manager_set``. This creates the feature contexts
(``FeatureContext``) specifically meant to be pickeled.
3. Pickle the feature contexts when dumping to disk, which is invoked in
the child processes of this class.
4. At train time, load the feature contexts from disk.
5. Decode the feature contexts in to PyTorch tensors.
6. The model manager uses the ``to`` method to copy the CPU tensors to the
GPU (where GPUs are available).
:see _process: for details on the pickling of the batch instances
"""
_DICTABLE_WRITE_EXCLUDES = {'batch_feature_mappings'}
data_point_type: Type[DataPoint] = field()
"""A subclass type of :class:`.DataPoint` implemented for the specific
feature.
"""
batch_type: Type[Batch] = field()
"""The batch class to be instantiated when created batchs.
"""
split_stash_container: SplitStashContainer = field()
"""The source data stash that has both the data and data set keys for each
split (i.e. ``train`` vs ``test``).
"""
vectorizer_manager_set: FeatureVectorizerManagerSet = field()
"""Used to vectorize features in to tensors."""
batch_size: int = field()
"""The number of data points in each batch, except the last (unless the
data point cardinality divides the batch size).
"""
model_torch_config: TorchConfig = field()
"""The PyTorch configuration used to (optionally) copy CPU to GPU memory.
"""
data_point_id_sets_path: Path = field()
"""The path of where to store key data for the splits; note that the
container might store it's key splits in some other location.
"""
decoded_attributes: InitVar[Set[str]] = field()
"""The attributes to decode; only these are avilable to the model
regardless of what was created during encoding time; if None, all are
available.
"""
batch_feature_mappings: BatchFeatureMapping = field(default=None)
"""The meta data used to encode and decode each feature in to tensors.
"""
batch_limit: int = field(default=sys.maxsize)
"""The max number of batches to process, which is useful for debugging."""
def __post_init__(self, decoded_attributes):
super().__post_init__()
Deallocatable.__init__(self)
# TODO: this class conflates key split and delegate stash functionality
# in the `split_stash_container`. An instance of this type serves the
# purpose, but it need not be. Instead it just needs to be both a
# SplitKeyContainer and a Stash. This probably should be split out in
# to two different fields.
cont = self.split_stash_container
if not isinstance(cont, SplitStashContainer) \
and (not isinstance(cont, SplitKeyContainer) or
not isinstance(cont, Stash)):
raise DeepLearnError('Expecting SplitStashContainer but got ' +
f'{self.split_stash_container.__class__}')
self._batch_data_point_sets = PersistedWork(
self.data_point_id_sets_path, self, mkdir=True)
self.priming = False
self.decoded_attributes = decoded_attributes
self._update_comp_stash_attribs()
@property
def decoded_attributes(self) -> Set[str]:
"""The attributes to decode. Only these are avilable to the model regardless
of what was created during encoding time; if None, all are available
"""
return self._decoded_attributes
@decoded_attributes.setter
def decoded_attributes(self, attribs: Set[str]):
"""The attributes to decode. Only these are avilable to the model regardless
of what was created during encoding time; if None, all are available
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting decoded attributes: {attribs}')
self._decoded_attributes = attribs
if isinstance(self.delegate, BatchDirectoryCompositeStash):
self.delegate.load_keys = attribs
@property
@persisted('_batch_metadata')
def batch_metadata(self) -> BatchMetadata:
mapping: BatchFeatureMapping
if self.batch_feature_mappings is not None:
mapping = self.batch_feature_mappings
else:
batch: Batch = self.batch_type(None, None, None, None)
batch.batch_stash = self
mapping = batch._get_batch_feature_mappings()
batch.deallocate()
vec_mng_set: FeatureVectorizerManagerSet = self.vectorizer_manager_set
attrib_keeps = self.decoded_attributes
vec_mng_names = set(vec_mng_set.keys())
by_attrib = {}
mmng: ManagerFeatureMapping
for mmng in mapping.manager_mappings:
vec_mng_name: str = mmng.vectorizer_manager_name
if vec_mng_name in vec_mng_names:
vec_mng: FeatureVectorizerManager = vec_mng_set[vec_mng_name]
field: FieldFeatureMapping
for field in mmng.fields:
if field.attr in attrib_keeps:
vec = vec_mng[field.feature_id]
by_attrib[field.attr] = BatchFieldMetadata(field, vec)
return BatchMetadata(self.data_point_type, self.batch_type,
mapping, by_attrib)
def _update_comp_stash_attribs(self):
"""Update the composite stash grouping if we're using one and if this class is
already configured.
"""
if isinstance(self.delegate, BatchDirectoryCompositeStash):
meta: BatchMetadata = self.batch_metadata
meta_attribs: Set[str] = set(
map(lambda f: f.attr, meta.mapping.get_attributes()))
groups: Tuple[Set[str]] = self.delegate.groups
gattribs = reduce(lambda x, y: x | y, groups)
to_remove = gattribs - meta_attribs
new_groups = []
if len(to_remove) > 0:
group: Set[str]
for group in groups:
ng: Set[str] = meta_attribs & group
if len(ng) > 0:
new_groups.append(ng)
self.delegate.groups = tuple(new_groups)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'meta attribs: {meta_attribs}, groups: {groups}')
@property
@persisted('_batch_data_point_sets')
def batch_data_point_sets(self) -> List[DataPointIDSet]:
"""Create the data point ID sets. Each instance returned will correlate
to a batch and each set of keys point to a feature :class:`.DataPoint`.
"""
psets = []
batch_id = 0
cont = self.split_stash_container
tc_seed = TorchConfig.get_random_seed_context()
if logger.isEnabledFor(logging.INFO):
logger.info(f'{self.name}: creating keys with ({type(cont)}) ' +
f'using batch size of {self.batch_size}')
for split, keys in cont.keys_by_split.items():
if logger.isEnabledFor(logging.INFO):
logger.info(f'keys for split {split}: {len(keys)}')
# keys are ordered and needed to be as such for consistency
# keys = sorted(keys, key=int)
cslice = it.islice(chunks(keys, self.batch_size), self.batch_limit)
for chunk in cslice:
chunk = tuple(chunk)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'chunked size: {len(chunk)}')
dp_set = DataPointIDSet(str(batch_id), chunk, split, tc_seed)
psets.append(dp_set)
batch_id += 1
logger.info(f'created {len(psets)} each set limited with ' +
f'{self.batch_limit} with batch_limit={self.batch_limit}')
return psets
def _get_keys_by_split(self) -> Dict[str, Tuple[str]]:
by_batch = collections.defaultdict(lambda: [])
for dps in self.batch_data_point_sets:
by_batch[dps.split_name].append(dps.batch_id)
return {k: tuple(by_batch[k]) for k in by_batch.keys()}
def _create_data(self) -> List[DataPointIDSet]:
"""Data created for the sub proceesses are the first N data point ID
sets.
"""
return self.batch_data_point_sets
def populate_batch_feature_mapping(self, batch: Batch):
"""Add batch feature mappings to a batch instance."""
if self.batch_feature_mappings is not None:
batch.batch_feature_mappings = self.batch_feature_mappings
def create_batch(self, points: Tuple[DataPoint], split_name: str = None,
batch_id: str = None):
"""Create a new batch instance with data points, which happens when
primed.
"""
bcls: Type[Batch] = self.batch_type
batch: Batch = bcls(self, batch_id, split_name, points)
self.populate_batch_feature_mapping(batch)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created batch: {batch}')
return batch
def _process(self, chunk: List[DataPointIDSet]) -> \
Iterable[Tuple[str, Any]]:
"""Create the batches by creating the set of data points for each
:class:`.DataPointIDSet` instance. When the subordinate stash dumps
the batch (specifically a subclass of :class:`.Batch`), the overrided
pickle logic is used to *detatch* the batch by encoded all data in to
:class:`.FeatureContext` instances.
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'{self.name}: processing: {len(chunk)} data points')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'chunk data points: {chunk}')
tseed = chunk[0].torch_seed_context
dpcls: Type[DataPoint] = self.data_point_type
cont = self.split_stash_container
if tseed is not None:
TorchConfig.set_random_seed(
tseed['seed'], tseed['disable_cudnn'], False)
dset: DataPointIDSet
for dset in chunk:
batch_id: str = dset.batch_id
points: Tuple[DataPoint] = tuple(
map(lambda dpid: dpcls(dpid, self, cont[dpid]),
dset.data_point_ids))
batch: Batch = self.create_batch(points, dset.split_name, batch_id)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created batch: {batch}')
yield (batch_id, batch)
def _get_data_points_for_batch(self, batch: Any) -> Tuple[Any]:
"""Return the data points that were used to create ``batch``.
"""
dpcls = self.data_point_type
cont = self.split_stash_container
return tuple(map(lambda dpid: dpcls(dpid, self, cont[dpid]),
batch.data_point_ids))
def load(self, name: str):
with time('loaded batch {name} ({obj.split_name})'):
obj = super().load(name)
# add back the container of the batch to reconstitute the original
# features and use the CUDA for tensor device transforms
if obj is not None:
if not hasattr(obj, 'batch_stash'):
obj.batch_stash = self
if (not hasattr(obj, 'batch_feature_mappings') or
obj.batch_feature_mappings is None):
self.populate_batch_feature_mapping(obj)
return obj
def _prime_vectorizers(self):
vec_mng_set: FeatureVectorizerManagerSet = self.vectorizer_manager_set
vecs = map(lambda v: v.values(), vec_mng_set.values())
for vec in chain.from_iterable(vecs):
if isinstance(vec, Primeable):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'priming {vec}')
vec.prime()
def prime(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'priming {self.__class__}, is child: ' +
f'{self.is_child}, currently priming: {self.priming}')
if self.priming:
raise DeepLearnError('Already priming')
self.priming = True
try:
self.batch_data_point_sets
self._prime_vectorizers()
super().prime()
finally:
self.priming = False
def deallocate(self):
self._batch_data_point_sets.deallocate()
if id(self.delegate) != id(self.split_stash_container):
self._try_deallocate(self.delegate)
self._try_deallocate(self.split_stash_container)
self.vectorizer_manager_set.deallocate()
super().deallocate()
def _from_dictable(self, *args, **kwargs):
# avoid long Wriable.write output
dct = super()._from_dictable(*args, **kwargs)
rms = tuple(filter(lambda k: k.startswith('_'), dct.keys()))
for k in rms:
del dct[k]
return dct
def clear(self):
"""Clear the batch, batch data point sets."""
logger.debug('clearing')
super().clear()
self._batch_data_point_sets.clear()
def clear_all(self):
"""Clear the batch, batch data point sets, and the source data
(:obj:`split_stash_container`).
"""
self.clear()
self.split_stash_container.clear() | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/batch/stash.py | stash.py |
from __future__ import annotations
"""This file contains a stash used to load an embedding layer. It creates
features in batches of matrices and persists matrix only (sans features) for
efficient retrival.
"""
__author__ = 'Paul Landes'
from typing import Tuple, List, Any, Dict, Union, Set
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
import sys
import logging
from io import TextIOBase
import torch
from torch import Tensor
import collections
from zensols.util import time
from zensols.config import Writable
from zensols.persist import (
persisted,
PersistedWork,
PersistableContainer,
Deallocatable,
)
from zensols.deeplearn import DeepLearnError, TorchConfig
from zensols.deeplearn.vectorize import (
FeatureContext,
NullFeatureContext,
FeatureVectorizer,
FeatureVectorizerManager,
FeatureVectorizerManagerSet,
CategoryEncodableFeatureVectorizer,
)
from . import (
BatchError,
FieldFeatureMapping,
ManagerFeatureMapping,
BatchFeatureMapping,
)
logger = logging.getLogger(__name__)
@dataclass
class DataPoint(Writable, metaclass=ABCMeta):
"""Abstract class that makes up a container class for features created from
sentences.
"""
id: int = field()
"""The ID of this data point, which maps back to the ``BatchStash`` instance's
subordinate stash.
"""
batch_stash: BatchStash = field(repr=False)
"""Ephemeral instance of the stash used during encoding only."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'id: {self.id}', depth, writer)
def __getstate__(self):
raise DeepLearnError('Data points should not be pickeled')
@dataclass
class Batch(PersistableContainer, Writable, metaclass=ABCMeta):
"""Contains a batch of data used in the first layer of a net. This class holds
the labels, but is otherwise useless without at least one embedding layer
matrix defined.
The user must subclass, add mapping meta data, and optionally (suggested)
add getters and/or properties for the specific data so the model can by
more *Pythonic* in the PyTorch :class:`torch.nn.Module`.
"""
STATES = {'n': 'nascent',
'e': 'encoded',
'd': 'decoded',
't': 'memory copied',
'k': 'deallocated'}
"""A human friendly mapping of the encoded states."""
_PERSITABLE_TRANSIENT_ATTRIBUTES = {'_data_points'}
batch_stash: BatchStash = field(repr=False)
"""Ephemeral instance of the stash used during encoding and decoding."""
id: int = field()
"""The ID of this batch instance, which is the sequence number of the batch
given during child processing of the chunked data point ID setes.
"""
split_name: str = field()
"""The name of the split for this batch (i.e. ``train`` vs ``test``)."""
data_points: Tuple[DataPoint] = field(repr=False)
"""The list of the data points given on creation for encoding, and
``None``'d out after encoding/pickinglin.
"""
def __post_init__(self):
super().__init__()
if hasattr(self, '_data_points') and self._data_points is not None:
self.data_point_ids = tuple(map(lambda d: d.id, self.data_points))
self._decoded_state = PersistedWork(
'_decoded_state', self, transient=True)
self.state = 'n'
@property
def data_points(self) -> Tuple[DataPoint]:
"""The data points used to create this batch. If the batch does not contain
the data points, which is the case when it has been decoded, then they
are retrieved from the :obj:`batch_stash` instance's feature stash.
"""
if not hasattr(self, '_data_points') or self._data_points is None:
stash: BatchStash = self.batch_stash
self._data_points = stash._get_data_points_for_batch(self)
return self._data_points
@data_points.setter
def data_points(self, data_points: Tuple[DataPoint]):
self._data_points = data_points
@abstractmethod
def _get_batch_feature_mappings(self) -> BatchFeatureMapping:
"""Return the feature mapping meta data for this batch and it's data points.
It is best to define a class level instance of the mapping and return
it here to avoid instancing for each batch.
:see: :class:`.BatchFeatureMapping`
"""
pass
def get_labels(self) -> torch.Tensor:
"""Return the label tensor for this batch.
"""
bmap: BatchFeatureMapping = self._get_batch_feature_mappings()
if bmap is None:
raise DeepLearnError('No batch feature mapping set')
label_attr = bmap.label_attribute_name
return self.attributes[label_attr]
@property
@persisted('_has_labels', transient=True)
def has_labels(self) -> bool:
"""Return whether or not this batch has labels. If it doesn't, it is a
batch used for prediction.
"""
return self.get_labels() is not None
def get_label_classes(self) -> List[str]:
"""Return the labels in this batch in their string form. This assumes the
label vectorizer is instance of
:class:`~zensols.deeplearn.vectorize.CategoryEncodableFeatureVectorizer`.
:return: the reverse mapped, from nominal values, labels
"""
vec: FeatureVectorizer = self.get_label_feature_vectorizer()
if not isinstance(vec, CategoryEncodableFeatureVectorizer):
raise BatchError(
'Reverse label decoding is only supported with type of ' +
'CategoryEncodableFeatureVectorizer, but got: ' +
f'{vec} ({(type(vec))})')
return vec.get_classes(self.get_labels().cpu())
def get_label_feature_vectorizer(self) -> FeatureVectorizer:
"""Return the label vectorizer used in the batch. This assumes there's only
one vectorizer found in the vectorizer manager.
:param batch: used to access the vectorizer set via the batch stash
"""
mapping: BatchFeatureMapping = self._get_batch_feature_mappings()
field_name: str = mapping.label_attribute_name
mng, f = mapping.get_field_map_by_attribute(field_name)
vec_name: str = mng.vectorizer_manager_name
vec_mng_set = self.batch_stash.vectorizer_manager_set
vec: FeatureVectorizerManager = vec_mng_set[vec_name]
return vec[f.feature_id]
def size(self) -> int:
"""Return the size of this batch, which is the number of data points.
"""
return len(self.data_point_ids)
@property
def attributes(self) -> Dict[str, torch.Tensor]:
"""Return the attribute batched tensors as a dictionary using the attribute
names as the keys.
"""
return self._get_decoded_state()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_data_points: bool = False):
self._write_line(self.__class__.__name__, depth, writer)
self._write_line(f'size: {self.size()}', depth + 1, writer)
for k, v in self.attributes.items():
shape = None if v is None else v.shape
self._write_line(f'{k}: {shape}', depth + 2, writer)
if include_data_points:
self._write_line('data points:', depth + 1, writer)
for dp in self.get_data_points():
dp.write(depth + 2, writer)
@property
def _feature_contexts(self) -> \
Dict[str, Dict[str, Union[FeatureContext, Tuple[FeatureContext]]]]:
has_ctx = hasattr(self, '_feature_context_inst')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'has feature contexts: {has_ctx}')
if has_ctx:
if self._feature_context_inst is None:
raise BatchError('Bad state transition, null contexts')
else:
with time(f'encoded batch {self.id}'):
self._feature_context_inst = self._encode()
if logger.isEnabledFor(logging.INFO):
logger.info(f'access context: (state={self.state}), num keys=' +
f'{len(self._feature_context_inst.keys())}')
return self._feature_context_inst
@_feature_contexts.setter
def _feature_contexts(self,
contexts: Dict[str, Dict[
str, Union[FeatureContext,
Tuple[FeatureContext]]]]):
if logger.isEnabledFor(logging.DEBUG):
obj = 'None' if contexts is None else contexts.keys()
logger.debug(f'setting context: {obj}')
self._feature_context_inst = contexts
def __getstate__(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'create batch state {self.id} (state={self.state})')
assert self.state == 'n'
if not hasattr(self, '_feature_context_inst'):
self._feature_contexts
self.state = 'e'
state = super().__getstate__()
state.pop('batch_stash', None)
state.pop('data_points', None)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'state keys: {state.keys()}')
return state
def __setstate__(self, state):
super().__setstate__(state)
if logger.isEnabledFor(logging.INFO):
logger.info(f'unpickling batch: {self.id}')
@persisted('_decoded_state')
def _get_decoded_state(self):
"""Decode the pickeled attriubtes after loaded by containing ``BatchStash`` and
remove the context information to save memory.
"""
assert self.state == 'e'
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoding ctxs: {self._feature_context_inst.keys()}')
assert self._feature_context_inst is not None
with time(f'decoded batch {self.id}'):
attribs = self._decode(self._feature_contexts)
self._feature_contexts = None
assert self._feature_context_inst is None
self.state = 'd'
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'return decoded attributes: {attribs.keys()}')
return attribs
@property
def torch_config(self) -> TorchConfig:
"""The torch config used to copy from CPU to GPU memory."""
return self.batch_stash.model_torch_config
def _clone(self) -> Batch:
def to(arr: Tensor) -> Tensor:
if arr is not None:
arr = torch_config.to(arr)
return arr
torch_config = self.torch_config
attribs = self._get_decoded_state()
attribs = {k: to(attribs[k]) for k in attribs.keys()}
inst = self.__class__(self.batch_stash, self.id, self.split_name, None)
inst.data_point_ids = self.data_point_ids
inst._decoded_state.set(attribs)
inst.state = 't'
return inst
def to(self) -> Batch:
"""Clone this instance and copy data to the CUDA device configured in the batch
stash.
:return: a clone of this instance with all attribute tensors copied
to the given torch configuration device
"""
if self.state == 't':
inst = self
else:
inst = self._clone()
return inst
def deallocate(self):
with time('deallocated attribute', logging.DEBUG):
if self.state == 'd' or self.state == 't':
attrs = self.attributes
for arr in tuple(attrs.values()):
del arr
attrs.clear()
del attrs
self._decoded_state.deallocate()
if hasattr(self, 'batch_stash'):
del self.batch_stash
if hasattr(self, 'data_point_ids'):
del self.data_point_ids
if hasattr(self, '_data_points'):
Deallocatable._try_deallocate(self._data_points)
del self._data_points
with time('deallocated feature context', logging.DEBUG):
if hasattr(self, '_feature_context_inst') and \
self._feature_context_inst is not None:
for ctx in self._feature_context_inst.values():
self._try_deallocate(ctx)
self._feature_context_inst.clear()
del self._feature_context_inst
self.state = 'k'
super().deallocate()
logger.debug(f'deallocated batch: {self.id}')
def _encode_field(self, vec: FeatureVectorizer, fm: FieldFeatureMapping,
vals: List[Any]) -> FeatureContext:
"""Encode a set of features in to feature contexts:
:param vec: the feature vectorizer to use to create the context
:param fm: the field metadata for the feature values
:param vals: a list of feature input values used to create the context
:see: :class:`.BatchFeatureMapping`
"""
if fm.is_agg:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding aggregate with {vec}')
ctx = vec.encode(vals)
else:
ctx = tuple(map(lambda v: vec.encode(v), vals))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoded: {ctx.__class__}')
return ctx
def _decode_context(self, vec: FeatureVectorizer, ctx: FeatureContext,
fm: FieldFeatureMapping) -> torch.Tensor:
"""Decode ``ctx`` in to a tensor using vectorizer ``vec``.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decode with {fm}')
if isinstance(ctx, tuple):
arrs = tuple(map(vec.decode, ctx))
try:
arr = torch.cat(arrs)
except Exception as e:
raise BatchError(
'Batch has inconsistent data point length, eg magic ' +
f'bedding or using combine_sentences for NLP for: {vec}') \
from e
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decodeed shape for {fm}: {arr.shape}')
else:
arr = vec.decode(ctx)
return arr
def _is_missing(self, aval: Union[Any, Tuple[Any]]):
return (aval is None) or \
(isinstance(aval, (tuple, list)) and all(v is None for v in aval))
def _encode(self) -> Dict[str, Dict[str, Union[FeatureContext,
Tuple[FeatureContext]]]]:
"""Called to create all matrices/arrays needed for the layer. After this is
called, features in this instance are removed for so pickling is fast.
The returned data structure has the following form:
- feature vector manager name
- attribute name -> feature context
where feature context can be either a single context or a tuple of
context. If it is a tuple, then each is decoded in turn and the
resulting matrices will be concatenated together at decode time with
``_decode_context``. Note that the feature id is an attribute of the
feature context.
:see: meth:`_decode_context`
"""
vms = self.batch_stash.vectorizer_manager_set
attrib_to_ctx = collections.OrderedDict()
bmap: BatchFeatureMapping = self._get_batch_feature_mappings()
label_attr: str = bmap.label_attribute_name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"encoding with label: '{label_attr}' using {vms}")
mmap: ManagerFeatureMapping
for mmap in bmap.manager_mappings:
vm: FeatureVectorizerManager = vms[mmap.vectorizer_manager_name]
fm: FieldFeatureMapping
for fm in mmap.fields:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'field: {fm}')
if fm.feature_id in attrib_to_ctx:
raise BatchError(f'Duplicate feature: {fm.feature_id}')
vec: FeatureVectorizer = vm[fm.feature_id]
avals: List[Any] = []
ctx: FeatureContext = None
dp: DataPoint
for dp in self.data_points:
aval = getattr(dp, fm.attribute_accessor)
avals.append(aval)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'attr: {fm.attr} => {aval.__class__}')
try:
is_label = fm.is_label or (label_attr == fm.attr)
if is_label and self._is_missing(aval):
# assume prediction
if logger.isEnabledFor(logging.DEBUG):
logger.debug('skipping missing label')
ctx = NullFeatureContext(fm.feature_id)
else:
ctx = self._encode_field(vec, fm, avals)
except Exception as e:
raise BatchError(
f'Could not vectorize {fm} using {vec}: {e}') from e
if ctx is not None:
attrib_to_ctx[fm.attr] = ctx
return attrib_to_ctx
def _decode(self, ctx: Dict[str, Dict[str, Union[FeatureContext,
Tuple[FeatureContext]]]]):
"""Called to create all matrices/arrays needed for the layer. After this is
called, features in this instance are removed for so pickling is fast.
:param ctx: the context to decode
"""
attribs = collections.OrderedDict()
attrib_keeps: Set[str] = self.batch_stash.decoded_attributes
if attrib_keeps is not None:
attrib_keeps = set(attrib_keeps)
bmap: BatchFeatureMapping = self._get_batch_feature_mappings()
label_attr: str = bmap.label_attribute_name
vms: FeatureVectorizerManagerSet = \
self.batch_stash.vectorizer_manager_set
mmap: ManagerFeatureMapping
for attrib, ctx in ctx.items():
mng_fmap = bmap.get_field_map_by_attribute(attrib)
if mng_fmap is None:
raise BatchError(
f'Missing mapped attribute \'{attrib}\' on decode')
mng, fmap = mng_fmap
mmap_name = mng.vectorizer_manager_name
feature_id = fmap.feature_id
vm: FeatureVectorizerManager = vms[mmap_name]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'vec manager: {mmap_name} -> {vm}')
# keep only the desired feature subset for speed up
if attrib_keeps is not None and attrib not in attrib_keeps:
continue
if attrib_keeps is not None:
attrib_keeps.remove(attrib)
if isinstance(ctx, tuple):
feature_id = ctx[0].feature_id
elif ctx is None:
feature_id = None
else:
feature_id = ctx.feature_id
vec: FeatureVectorizer = vm.get(feature_id)
if vec is None:
raise BatchError(
f'No such vectorizer for feature ID: {feature_id}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoding {ctx} with {vec}')
arr: Tensor = self._decode_context(vec, ctx, fmap)
if arr is None and fmap.attr != label_attr:
raise BatchError(
f'No decoded value for {fmap}, which is not ' +
f"the label attribute '{label_attr}'")
if logger.isEnabledFor(logging.DEBUG):
shape = '<none>' if arr is None else arr.shape
logger.debug(f'decoded: {attrib} -> {shape}')
if attrib in attribs:
raise BatchError(
f'Attribute collision on decode: {attrib}')
attribs[attrib] = arr
if attrib_keeps is not None and len(attrib_keeps) > 0:
raise BatchError(f'Unknown attriubtes: {attrib_keeps}')
return attribs
@property
def state_name(self):
return self.STATES.get(self.state, 'unknown: ' + self.state)
def __len__(self):
return len(self.data_point_ids)
def keys(self) -> Tuple[str]:
return tuple(self.attributes.keys())
def __getitem__(self, key: str) -> torch.Tensor:
return self.attributes[key]
def __str__(self):
# the data_points property overrides the @dataclass field, so we must
# eclipse it with our own to stringt
return (f'{self.__class__.__name__}: id={self.id}, ' +
f'split={self.split_name}, size={self.size()}, ' +
f'state={self.state}')
def __repr__(self):
return self.__str__()
@dataclass
class DefaultBatch(Batch):
"""A concrete implementation that uses a :obj:`batch_feature_mapping` usually
configured with :class:`.ConfigBatchFeatureMapping` and provided by
:class:`.BatchStash`.
"""
_PERSITABLE_REMOVE_ATTRIBUTES = {'batch_feature_mappings'}
batch_feature_mappings: BatchFeatureMapping = field(default=None)
"""The mappings used by this instance."""
def _get_batch_feature_mappings(self) -> BatchFeatureMapping:
assert self.batch_feature_mappings is not None
return self.batch_feature_mappings
def _clone(self) -> Batch:
copy = super()._clone()
copy.batch_feature_mappings = self.batch_feature_mappings
return copy | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/batch/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Tuple, List, Union, Iterable, Set
from dataclasses import dataclass, field
import sys
import logging
from itertools import chain
from io import TextIOBase
from zensols.config import Dictable
from zensols.deeplearn.vectorize import FeatureVectorizerManager
logger = logging.getLogger(__name__)
@dataclass
class FieldFeatureMapping(Dictable):
"""Meta data describing an attribute of the data point.
"""
attr: str = field()
"""The (human readable/used) name for the mapping."""
feature_id: str = field()
"""Indicates which vectorizer to use."""
is_agg: bool = field(default=False)
"""If ``True``, tuplize across all data points and encode as one tuple of
data to create the batched tensor on decode; otherwise, each data point
feature is encoded and concatenated on decode.
"""
attr_access: str = field(default=None)
"""The attribute on the source :class:`DataPoint` instance (see
:obj:`~attribute_accessor`).
"""
is_label: bool = field(default=False)
"""Whether or not this field is a label. The is ``True`` in cases where there
is more than one label. In these cases, usually which label to use changes
based on the model (i.e. word embedding vs. BERT word piece token IDs).
This is used in :class:`.Batch` to skip label vectorization while encoding
of prediction based batches.
"""
@property
def attribute_accessor(self):
"""Return the attribute name on the :class:`DataPoint` instance. This uses
:obj:`~attr_access` if it is not ``None``, otherwise, use
:obj:`~attr`.
"""
return self.attr if self.attr_access is None else self.attr_access
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(str(self), depth, writer)
@dataclass
class ManagerFeatureMapping(Dictable):
"""Meta data for a vectorizer manager with fields describing attributes to be
vectorized from features in to feature contests.
"""
vectorizer_manager_name: str = field()
"""The configuration name that identifiees an instance of
``FeatureVectorizerManager``.
"""
fields: Tuple[FieldFeatureMapping] = field()
"""The fields of the data point to be vectorized."""
def remove_field(self, attr: str) -> bool:
"""Remove a field by attribute if it exists.
:param attr: the name of the field's attribute to remove
:return: ``True`` if the field was removed, ``False`` otherwise
"""
plen = len(self.fields)
self.fields = tuple(filter(lambda f: f.attr != attr, self.fields))
return plen != len(self.fields)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(self.vectorizer_manager_name, depth, writer)
for f in self.fields:
f.write(depth + 1, writer)
@dataclass
class BatchFeatureMapping(Dictable):
"""The meta data used to encode and decode each feature in to tensors. It is
best to define a class level instance of this in the ``Batch`` class and
return it with ``_get_batch_feature_mappings``.
An example from the iris data set test::
MAPPINGS = BatchFeatureMapping(
'label',
[ManagerFeatureMapping(
'iris_vectorizer_manager',
(FieldFeatureMapping('label', 'ilabel', True),
FieldFeatureMapping('flower_dims', 'iseries')))])
"""
label_attribute_name: str = field(default='label')
"""The name of the attribute used for labels."""
manager_mappings: List[ManagerFeatureMapping] = field(default_factory=list)
"""The manager level attribute mapping meta data."""
def __post_init__(self):
attrs = tuple(map(lambda f: f.attr, self.get_attributes()))
attr_set = set(attrs)
if len(attrs) != len(attr_set):
raise ValueError(f'attribute names must be unique: {attrs}')
def get_attributes(self) -> Iterable[FieldFeatureMapping]:
return chain.from_iterable(
map(lambda m: m.fields, self.manager_mappings))
@property
def label_feature_id(self) -> Union[None, str]:
"""Return the feature id of the label. This is the vectorizer used to
transform the label data.
"""
mng, f = self.get_field_map_by_attribute(self.label_attribute_name)
if f is not None:
return f.feature_id
@property
def label_vectorizer_manager(self) -> \
Union[FeatureVectorizerManager, None]:
"""Return the feature id of the label. This is the vectorizer used to
transform the label data.
"""
mng, f = self.get_field_map_by_attribute(self.label_attribute_name)
if mng is not None:
return mng
def get_field_map_by_feature_id(self, feature_id: str) -> \
Union[None, Tuple[ManagerFeatureMapping, FieldFeatureMapping]]:
for mng in self.manager_mappings:
for f in mng.fields:
if feature_id == f.feature_id:
return mng, f
def get_field_map_by_attribute(self, attribute_name: str) -> \
Union[None, Tuple[ManagerFeatureMapping, FieldFeatureMapping]]:
for mng in self.manager_mappings:
for f in mng.fields:
if attribute_name == f.attr:
return mng, f
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'label: {self.label_attribute_name}', depth, writer)
for m in self.manager_mappings:
m.write(depth + 1, writer)
@dataclass
class ConfigBatchFeatureMapping(BatchFeatureMapping):
"""A utility class that allows a easy configuration driven way of refining
:obj:`manager_mappings` by adding and deleting them both at the mapping and
field levels. These edits happen during the classes ``__init__``.
"""
batch_feature_mapping_adds: List[BatchFeatureMapping] = field(
default_factory=list, repr=False)
"""Mappings to add."""
field_remove: Set[str] = field(default_factory=set, repr=False)
"""Field removed from all batch mappings."""
field_keep: Set[str] = field(default=None, repr=False)
"""Only these field remain from all batch mappings."""
def __post_init__(self):
super().__post_init__()
self.manager_mappings.extend(
chain.from_iterable(map(lambda m: m.manager_mappings,
self.batch_feature_mapping_adds)))
mng: BatchFeatureMapping
for mng in self.manager_mappings:
keeps = set(map(lambda f: f.attr, mng.fields))
if self.field_keep is not None:
keeps = keeps & self.field_keep
keeps = keeps - self.field_remove
mng.fields = tuple(filter(lambda f: f.attr in keeps, mng.fields))
if self.label_attribute_name is None:
for mng in self.manager_mappings:
if mng.label_attribute_name is not None:
self.label = mng.label_attribute_name | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/batch/mapping.py | mapping.py |
__author__ = 'Paul Landes'
from typing import Dict, Type
from dataclasses import dataclass
from dataclasses import field as dc_field
import sys
from io import TextIOBase
from zensols.config import Dictable
from zensols.deeplearn import NetworkSettings
from zensols.deeplearn.vectorize import FeatureVectorizer
from . import DataPoint, Batch, BatchFeatureMapping, FieldFeatureMapping
@dataclass
class BatchFieldMetadata(Dictable):
"""Data that describes a field mapping in a batch object.
"""
field: FieldFeatureMapping = dc_field()
"""The field mapping."""
vectorizer: FeatureVectorizer = dc_field(repr=False)
"""The vectorizer used to map the field."""
@property
def shape(self):
return self.vectorizer.shape
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(self.field.attr, depth, writer)
self._write_line('field:', depth + 1, writer)
self.field.write(depth + 2, writer)
self._write_line('vectorizer:', depth + 1, writer)
self.vectorizer.write(depth + 2, writer)
@dataclass
class BatchMetadata(Dictable):
"""Describes metadata about a :class:`.Batch` instance.
"""
data_point_class: Type[DataPoint] = dc_field()
"""The :class:`.DataPoint` class, which are created at encoding time."""
batch_class: Type[Batch] = dc_field()
"""The :class:`.Batch` class, which are created at encoding time."""
mapping: BatchFeatureMapping = dc_field()
"""The mapping used for encoding and decoding the batch."""
fields_by_attribute: Dict[str, BatchFieldMetadata] = dc_field(repr=False)
"""Mapping by field name to attribute."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'data point: {self.data_point_class}', depth, writer)
self._write_line(f'batch: {self.batch_class}', depth, writer)
self._write_line('mapping:', depth, writer)
self.mapping.write(depth + 1, writer)
self._write_line('attributes:', depth, writer)
for attr, field in self.fields_by_attribute.items():
field.write(depth + 1, writer)
@dataclass
class MetadataNetworkSettings(NetworkSettings):
"""A network settings container that has metadata about batches it recieves for
its model.
"""
_PERSITABLE_TRANSIENT_ATTRIBUTES = {'batch_stash'}
batch_stash: 'BatchStash' = dc_field(repr=False)
"""The batch stash that created the batches and has the batch metdata.
"""
@property
def batch_metadata(self) -> BatchMetadata:
"""Return the batch metadata used by this model.
"""
return self.batch_stash.batch_metadata
def _from_dictable(self, *args, **kwargs):
dct = super()._from_dictable(*args, **kwargs)
del dct['batch_stash']
return dct | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/batch/meta.py | meta.py |
__author__ = 'Paul Landes'
from typing import Any, Tuple
from dataclasses import dataclass, field
from enum import Enum
import sys
import logging
from logging import Logger
import json
from pathlib import Path
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from torch.optim.lr_scheduler import LambdaLR
from zensols.deeplearn import ModelError
from zensols.deeplearn.result import EpochResult
class UpdateAction(Enum):
"""An action type to invoke on the :class:`.ModelExecutor` during training.
"""
ITERATE_EPOCH = 0
SET_EPOCH = 1
STOP = 2
@dataclass
class TrainStatus(object):
"""Indicates what to do in the next epoch of the training cycle.
"""
action: UpdateAction
epoch: int = field(default=None)
reason: str = field(default=None)
@dataclass
class TrainManager(object):
"""The class is used to assist in the training of the
:class:`.ModelExecutor`. It updates validation loss and helps with early
stopping decisions. It also watches for a file on the file system to
provide instructions on what to do in the next epoch.
"""
status_logger: Logger = field()
"""The logger to record status updates during training."""
progress_logger: Logger = field()
"""The logger to record progress updates during training. This is used only
when the progress bar is turned off (see
:meth:`.ModelFacade._configure_cli_logging`).
"""
update_path: Path = field()
"""See :obj:`.ModelExecutor.update_path`.
"""
max_consecutive_increased_count: int = field()
"""See :obj:`.Domain.max_consecutive_increased_count`.
"""
progress_bar_number_width: int = field(default=6)
"""The string width of the train/validation loss metrics in the progress
bar, which needs to be greater than 4.
"""
def start(self, optimizer: nn.L1Loss, scheduler: Any,
n_epochs: int, pbar: tqdm):
# clear any early stop state
if self.update_path is not None and self.update_path.is_file():
self.progress_logger.info(
f'cleaning update file: {self.update_path}')
self.update_path.unlink()
self.optimizer = optimizer
self.scheduler = scheduler
self.n_epochs = n_epochs
self.current_epoch = 0
self.consecutive_increased_count = 0
# set initial "min" to infinity
self.valid_loss_min = np.Inf
self.pbar = pbar
if self.progress_logger.isEnabledFor(logging.INFO):
self.progress_logger.info(f'watching update file {self.update_path}')
self.validation_loss_decreases = 0
def _get_optimizer_lr(self, optimizer: torch.optim.Optimizer) -> float:
"""Return the current optimizer learning rate, which can be modified by a
scheduler if one is configured.
"""
param_group = next(iter(optimizer.param_groups))
return float(param_group['lr'])
def _fixed_sci_format(self, v: str) -> str:
"""Format a number to a width resorting to scientific notation where necessary.
The returned string is left padded with space in cases where scientific
notation is too wide for ``v > 0``. The mantissa is cut off also for
``v > 0`` when the string version of the number is too wide.
"""
length: int = self.progress_bar_number_width
n: int = length
ln: int = None
pad: int = None
while n > 0:
i = len('%#.*g' % (n, v))
s = '%.*g' % (n + n - i, v)
ln = len(s)
pad = length - ln
if pad >= 0:
break
n -= 1
if pad > 0:
s = (' ' * pad) + s
return s
def update_loss(self, valid_epoch_result: EpochResult,
train_epoch_result: EpochResult,
ave_valid_loss: float) -> Tuple[float, bool]:
"""Update the training and validation loss.
:return: a tuple of the latest minimum validation loss and whether or
not the last validation loss has decreased
"""
logger = self.status_logger
progress_logger = self.progress_logger
optimizer = self.optimizer
valid_loss = valid_epoch_result.ave_loss
sfmt = self._fixed_sci_format
# adjust the learning rate if a scheduler is configured
if self.scheduler is not None:
# the LambdaLR scheduler creates warnings when it gets the
# validation loss
if isinstance(self.scheduler, LambdaLR):
self.scheduler.step()
else:
self.scheduler.step(ave_valid_loss)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('epoch ave valid loss/results averaged valid_loss ' +
f'{ave_valid_loss}/{valid_loss}, ' +
f'losses: {len(valid_epoch_result.losses)}')
decreased = valid_loss < self.valid_loss_min
dec_str = '\\/' if decreased else '/\\'
if abs(ave_valid_loss - valid_loss) > 1e-10:
logger.warning('validation loss and result are not close: ' +
f'{ave_valid_loss} - {valid_loss} > 1e-10')
if train_epoch_result.contains_results:
train_loss = train_epoch_result.ave_loss
else:
train_loss = -1
msg = (f'tr:{sfmt(train_loss)}|' +
f'va min:{sfmt(self.valid_loss_min)}|' +
f'va:{sfmt(valid_loss)}')
if self.scheduler is not None:
lr = self._get_optimizer_lr(optimizer)
msg += f'|lr:{sfmt(lr)}'
msg += f' {dec_str}'
if self.pbar is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(msg)
self.pbar.set_description(msg)
else:
if logger.isEnabledFor(logging.INFO):
logger.info(f'epoch {self.current_epoch}/' +
f'{self.n_epochs}: {msg}')
# save model if validation loss has decreased
if decreased:
if progress_logger.isEnabledFor(logging.DEBUG):
progress_logger.debug('validation loss decreased min/iter' +
f'({self.valid_loss_min:.6f}' +
f'/{valid_loss:.6f}); saving model')
self.valid_loss_min = valid_loss
self.consecutive_increased_count = 0
self.validation_loss_decreases += 1
else:
if progress_logger.isEnabledFor(logging.DEBUG):
progress_logger.debug('validation loss increased min/iter' +
f'({self.valid_loss_min:.6f}' +
f'/{valid_loss:.6f})')
self.consecutive_increased_count += 1
return self.valid_loss_min, decreased
def _read_status(self) -> TrainStatus:
"""Read the early stop/update file and return a value to update the current
epoch number (if any).
"""
update = TrainStatus(UpdateAction.ITERATE_EPOCH)
update_path = self.update_path
if update_path is not None:
if self.status_logger.isEnabledFor(logging.DEBUG):
self.status_logger.debug(f'update check at {update_path}')
if update_path.exists():
data = None
try:
with open(update_path) as f:
data = json.load(f)
if 'epoch' in data:
epoch = int(data['epoch'])
update.epoch = epoch
update.action = UpdateAction.SET_EPOCH
update.reason = (f'update from {update_path}: ' +
f'setting epoch to: {epoch}')
except Exception as e:
reason = f'bad format in {update_path}--assume exit: {e}'
update.action = UpdateAction.STOP
update.reason = reason
update_path.unlink()
return update
def _get_stop_reason(self) -> str:
reason = None
if self.current_epoch >= self.n_epochs:
reason = f'epoch threshold reached at {self.n_epochs}'
elif (self.consecutive_increased_count >
self.max_consecutive_increased_count):
reason = ('reached max consecutive increased count: ' +
f'{self.max_consecutive_increased_count}')
return reason
def get_status(self) -> TrainStatus:
"""Return the epoch to set in the training loop of the :class:`.ModelExecutor`.
"""
status = self._read_status()
if status.action == UpdateAction.STOP:
# setting to the max value fails the executors train outter loop
# causing a robust non-error exit
status.epoch = sys.maxsize
elif status.action == UpdateAction.SET_EPOCH:
self.current_epoch = status.epoch
if self.pbar is not None:
self.pbar.reset()
self.pbar.update(self.current_epoch)
elif status.action == UpdateAction.ITERATE_EPOCH:
self.current_epoch += 1
status.epoch = self.current_epoch
stop_reason = self._get_stop_reason()
if self.pbar is not None:
self.pbar.update()
if stop_reason is not None:
status.action = UpdateAction.STOP
status.reason = stop_reason
else:
raise ModelError(f'Unknownn status: {status}')
if status.reason and self.status_logger.isEnabledFor(logging.INFO):
self.status_logger.info(status.reason)
return status
def stop(self) -> bool:
"""Stops the execution of training the model. Currently this is done by
creating a file the executor monitors.
:return: ``True`` if the application is configured to early stop and
the signal has not already been given
"""
update_path = self.update_path
if update_path is not None and not update_path.is_file():
update_path.parent.mkdir(parents=True, exist_ok=True)
update_path.touch()
self.status_logger.info(f'created early stop file: {update_path}')
return True
return False | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/trainmng.py | trainmng.py |
__author__ = 'Paul Landes'
from typing import Dict, Any
from dataclasses import dataclass, field, InitVar
import logging
import collections
from pathlib import Path
import torch
from zensols.util import time
from zensols.persist import persisted, PersistedWork
from . import ModelExecutor
logger = logging.getLogger(__name__)
@dataclass
class WeightedModelExecutor(ModelExecutor):
"""A class that weighs labels non-uniformly. This class uses invert class
sampling counts to help the minority label.
"""
weighted_split_name: str = field(default='train')
"""The split name used to re-weight labels."""
weighted_split_path: InitVar[Path] = field(default=None)
"""The path to the cached weithed labels."""
use_weighted_criterion: bool = field(default=True)
"""If ``True``, use the class weights in the initializer of the criterion.
Setting this to ``False`` effectively disables this class.
"""
def __post_init__(self, weighted_split_path: Path):
super().__post_init__()
if weighted_split_path is None:
path = '_label_counts'
else:
file_name = f'weighted-labels-{self.weighted_split_name}.dat'
path = weighted_split_path / file_name
self._label_counts = PersistedWork(path, self)
def clear(self):
super().clear()
self._label_counts.clear()
@persisted('_label_counts')
def get_label_counts(self) -> Dict[int, int]:
stash = self.dataset_stash.splits[self.weighted_split_name]
label_counts = collections.defaultdict(lambda: 0)
batches = tuple(stash.values())
for batch in batches:
for label in batch.get_labels():
label_counts[label.item()] += 1
for batch in batches:
batch.deallocate()
return dict(label_counts)
@persisted('_class_weighs')
def get_class_weights(self) -> torch.Tensor:
"""Compute invert class sampling counts to return the weighted class.
"""
counts = self.get_label_counts().items()
counts = map(lambda x: x[1], sorted(counts, key=lambda x: x[0]))
counts = self.torch_config.from_iterable(counts)
return counts.mean() / counts
def get_label_statistics(self) -> Dict[str, Dict[str, Any]]:
"""Return a dictionary whose keys are the labels and values are dictionaries
containing statistics on that label.
"""
counts = self.get_label_counts()
weights = self.get_class_weights().cpu().numpy()
batch = next(iter(self.dataset_stash.values()))
vec = batch.batch_stash.get_label_feature_vectorizer(batch)
classes = vec.get_classes(range(weights.shape[0]))
return {c[0]: {'index': c[1],
'count': counts[c[1]],
'weight': weights[c[1]]}
for c in zip(classes, range(weights.shape[0]))}
def _create_criterion(self) -> torch.optim.Optimizer:
resolver = self.config_factory.class_resolver
criterion_class_name = self.model_settings.criterion_class_name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'criterion: {criterion_class_name}')
criterion_class = resolver.find_class(criterion_class_name)
with time('weighted classes'):
class_weights = self.get_class_weights()
if logger.isEnabledFor(logging.INFO):
logger.info(f'using class weights: {class_weights}')
if self.use_weighted_criterion:
inst = criterion_class(weight=class_weights)
else:
inst = criterion_class()
return inst | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/wgtexecutor.py | wgtexecutor.py |
__author__ = 'Paul Landes'
from typing import Tuple, Dict, Iterable, Any, Union
from dataclasses import dataclass, field
import logging
import sys
import re
from io import TextIOBase
from pathlib import Path
import yaml
import pandas as pd
from zensols.config import Writable
from zensols.deeplearn.model import ModelFacade
from zensols.deeplearn.result import (
ClassificationMetrics, PredictionsDataFrameFactory,
ModelResultError, ModelResultManager, ModelResultReporter,
)
logger = logging.getLogger(__name__)
@dataclass
class PerformanceMetricsDumper(Writable):
"""Formats performance metrics, which can be used in papers.
:see: :class:`.LatexPerformanceMetricsDumper`
"""
facade: ModelFacade = field()
"""The facade used to fetch previously written results."""
summary_columns: Tuple[str] = field(
default=tuple('mF1t mPt mRt MF1t MPt MRt'.split()))
"""The columns used in the summary report."""
by_label_columns: Tuple[str] = field(
default=tuple('mF1 mP mR MF1 MP MR acc count'.split()))
"""The columns used in the by-label report."""
name_replace: Tuple[str, str] = field(default=None)
"""If provided, a tuple of ``(regular expression, replacement)`` string
given to :func:`re.sub` in the name column of generated tables.
"""
sort_column: str = field(default='mF1')
"""The column to sort, with the exception of the majority label, which is
always first.
"""
majority_label_res_id: Union[str, bool] = field(default=True)
"""Indicates how to create (if any) the majority label performance metrics.
If a string, use as the result id (``res_id``) of previous result set used
to compute the majority label statitics to include in the summary. If
``True`` use the results from the last tested model. If ``None`` the
majority label is not added.
"""
precision: int = field(default=3)
"""The number of signification digits to format results."""
@staticmethod
def format_thousand(x: int, apply_k: bool = True,
add_comma: bool = True) -> str:
add_k = False
if x > 10000:
if apply_k:
x = round(x / 1000)
add_k = True
if add_comma:
x = f'{x:,}'
else:
x = str(x)
if add_k:
x += 'K'
return x
@staticmethod
def capitalize(name: str) -> str:
return ' '.join(map(lambda s: s.capitalize(),
re.split(r'[ _-]', name)))
@staticmethod
def _map_col(col: str) -> str:
desc = ModelResultReporter.METRIC_DESCRIPTIONS.get(col)
if desc is not None:
return f'{col} is the {desc}'
def _map_name(self, name: str) -> str:
m: re.Match = re.match(r'^(.+): (\d+)$', name)
if m is None:
raise ModelResultError(f'Unknown model name format: {name}')
run_idx = int(m.group(2))
if run_idx != 1:
raise ModelResultError(
f'Multiple runs not supported: {name} ({run_idx})')
name = m.group(1)
if self.name_replace is not None:
name = re.sub(*self.name_replace, name)
return name
@property
def summary_dataframe(self) -> pd.DataFrame:
pcols = list(self.summary_columns)
rcols = list(map(lambda x: x[:-1], pcols))
rm: ModelResultManager = self.facade.result_manager
reporter = ModelResultReporter(rm)
reporter.include_validation = False
df: pd.DataFrame = reporter.dataframe
df = df[['name'] + pcols]
df = df.rename(columns=dict(zip(pcols, rcols)))
if self.sort_column is not None:
df = df.sort_values(self.sort_column)
df['name'] = df['name'].apply(self._map_name)
if self.majority_label_res_id is not None:
params = {}
if isinstance(self.majority_label_res_id, str):
params['name'] = self.majority_label_res_id
pred_factory: PredictionsDataFrameFactory = \
self.facade.get_predictions_factory(**params)
mets: ClassificationMetrics = pred_factory.majority_label_metrics
majlab = pred_factory.metrics_to_series('Majority Label', mets)
majlab = majlab.rename({
PredictionsDataFrameFactory.LABEL_COL: 'name'})
dfm = pd.DataFrame([majlab[['name'] + rcols]])
df = pd.concat((dfm, df), ignore_index=True)
fmt = '{x:.%sf}' % self.precision
for c in rcols:
df[c] = df[c].apply(lambda x: fmt.format(x=x))
df = df.rename(columns={'name': 'Name'})
return df
def _get_best_results(self) -> pd.DataFrame:
rm: ModelResultManager = self.facade.result_manager
reporter = ModelResultReporter(rm)
reporter.include_validation = False
df: pd.DataFrame = reporter.dataframe
ix = df['wF1t'].idxmax()
name, file_name = df.loc[ix, ['name', 'file']]
df = self.facade.get_predictions_factory(
name=file_name).metrics_dataframe
return df
@property
def by_label_dataframe(self) -> pd.DataFrame:
cols = list(self.by_label_columns)
df: pd.DataFrame = self._get_best_results().copy()
df = df[['label'] + cols]
fmt = '{x:.%sf}' % self.precision
for c in cols:
if c == 'count':
continue
df[c] = df[c].apply(lambda x: fmt.format(x=x))
crenames = dict(map(lambda c: (c, self.capitalize(c)),
'label correct acc count'.split()))
df = df.rename(columns=crenames)
if self.sort_column is not None:
col = self.sort_column
if self.sort_column == 'name':
col = 'label'
df = df.sort_values(col)
return df
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
indent: int = 0):
from tabulate import tabulate
self._write_line('summary:', depth, writer)
df = self.summary_dataframe
content = tabulate(df, headers=df.columns, disable_numparse=True)
self._write_block(content, depth + indent, writer)
self._write_empty(writer)
self._write_line('label:', depth, writer)
df = self.by_label_dataframe
content = tabulate(df, headers=df.columns, disable_numparse=True)
self._write_block(content, depth + indent, writer)
def __call__(self):
self.write()
@dataclass
class LatexPerformanceMetricsDumper(PerformanceMetricsDumper):
"""Writes model performance metrics in data formats then used to import to the
LaTeX typesetting system used by the Zensols build framework. The class
writes a YAML configuration used by `mklatextbl.py` script in the Zensols
Build repo, which generates a LaTeX table. The output is a ``.sty` style
file with the table, which is included with ``usepackage`` and then added
with a command.
:see: `Zensols Build <https://github.com/plandes/zenbuild>`_
:see: `mklatextbl.py <https://github.com/plandes/zenbuild/blob/master/bin/mklatextbl.py>`_
"""
results_dir: Path = field(default=Path('results/perf'))
"""The path to the output CSV files with performance metrics."""
config_dir: Path = field(default=Path('../config'))
"""The path to the YAML configuration files used by the ``mklatextbl.py``
Zensols LaTeX table generator.
"""
def _create_table(self, name: str, output_csv: Path, caption: str,
cols: Iterable[str]) -> Dict[str, Any]:
desc = ', '.join(filter(lambda x: x is not None,
map(self._map_col, cols)))
return {
f'metrics{name}tab':
{'path': f'../model/{output_csv}',
# 'type': 'slack',
# 'slack_col': 0,
'caption': caption.format(**dict(desc=desc)),
'placement': 'VAR',
'size': 'small',
'single_column': False,
'uses': 'zentable'}}
def dump_summary(self) -> Tuple[Path, Path]:
"""Dump summary of metrics to a LaTeX mktable YAML and CSV files.
:return: a tuple of the output CSV and YAML files
"""
output_csv: Path = self.results_dir / 'metrics-summary.csv'
output_yml: Path = self.config_dir / 'metrics-summary-table.yml'
df = self.summary_dataframe
caption = 'Summarization of performance metrics where {desc}.'
rcols = df.columns.to_list()[1:]
table_def = self._create_table('summary', output_csv, caption, rcols)
for path in (output_csv, output_yml):
path.parent.mkdir(parents=True, exist_ok=True)
with open(output_yml, 'w') as f:
yaml.dump(table_def, f)
logger.info(f'wrote: {output_yml}')
df.to_csv(output_csv, index=False)
logger.info(f'wrote: {output_csv}')
return (output_csv, output_yml)
def dump_by_label(self) -> Tuple[Path, Path]:
"""Dump per label of metrics of the highest performing model to a LaTeX mktable
YAML and CSV files.
"""
output_csv: Path = self.results_dir / 'metrics-by-label.csv'
output_yml: Path = self.config_dir / 'metrics-by-label-table.yml'
df = self.by_label_dataframe
caption = 'By label performance metrics where {desc}.'
cols = self.by_label_columns
table_def = self._create_table('label', output_csv, caption, cols)
for path in (output_csv, output_yml):
path.parent.mkdir(parents=True, exist_ok=True)
with open(output_yml, 'w') as f:
yaml.dump(table_def, f)
logger.info(f'wrote: {output_yml}')
df.to_csv(output_csv, index=False)
logger.info(f'wrote: {output_csv}')
return (output_csv, output_yml)
def __call__(self):
self.dump_summary()
self.dump_by_label() | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/format.py | format.py |
__author__ = 'Paul Landes'
from typing import Any, Tuple, Dict
from dataclasses import dataclass, InitVar, field
import logging
from logging import Logger
from torch import Tensor
from torch import nn
from torch.optim import Optimizer
from zensols.deeplearn import ModelError, EarlyBailError, DatasetSplitType
from zensols.deeplearn.result import EpochResult
from zensols.deeplearn.batch import Batch, MetadataNetworkSettings
from . import BaseNetworkModule
@dataclass
class BatchIterator(object):
"""This class assists in the batch loop during training, validation and
testing. Any special handling of a model related to its loss function can
be overridden in this class.
.. document private functions
.. automethod:: _decode_outcomes
"""
executor: InitVar[Any] = field()
"""The owning executor."""
logger: Logger = field()
"""The status logger from the executor."""
def __post_init__(self, executor: Any):
self.model_settings = executor.model_settings
self.net_settings = executor.net_settings
self.torch_config = executor.torch_config
def _decode_outcomes(self, outcomes: Tensor) -> Tensor:
"""Transform the model output (and optionally the labels) that will be added to
the ``EpochResult``, which composes a ``ModelResult``.
This implementation returns :py:meth:~`Tensor.argmax`, which are
the indexes of the max value across columns.
"""
logger = self.logger
reduce_outcomes = self.model_settings.reduce_outcomes
# get the indexes of the max value across labels and outcomes (for the
# descrete classification case)
if reduce_outcomes == 'argmax':
res = outcomes.argmax(dim=-1)
# softmax over each outcome
elif reduce_outcomes == 'softmax':
res = outcomes.softmax(dim=-1)
elif reduce_outcomes == 'none':
# leave when nothing, prediction/regression measure is used
res = outcomes
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'argmax outcomes: {outcomes.shape} -> {res.shape}')
return res
def _encode_labels(self, labels: Tensor) -> Tensor:
"""Encode labels to be in the same form and on the same CUDA device as the
batch data. This base class implementation only copies to the GPU.
:param labels: labels paired with the training and validation datasets
:return: labels to be used in the loss function
"""
logger = self.logger
if not self.model_settings.nominal_labels:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'labels type: {labels.dtype}')
labels = self.torch_config.to_type(labels)
return labels
def _debug_output(self, msg: str, labels: Tensor, output: Tensor):
logger = self.logger
if isinstance(self.debug, int) and self.debug > 1 and \
logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{msg}:')
shape = None if labels is None else labels.shape
dtype = None if labels is None else labels.dtype
logger.debug(f'labels: {shape} ({dtype})')
if isinstance(self.debug, int) and self.debug > 1:
logger.debug(f'label values:\n{labels}')
if output is None:
logger.debug('output: <none>')
else:
logger.debug(f'output: {output.shape} ({output.dtype})')
if isinstance(self.debug, int) and self.debug > 1:
logger.debug(f'\n{output}')
def iterate(self, model: BaseNetworkModule, optimizer: Optimizer,
criterion, batch: Batch, epoch_result: EpochResult,
split_type: DatasetSplitType) -> Tensor:
"""Train, validate or test on a batch. This uses the back propogation
algorithm on training and does a simple feed forward on validation and
testing.
One call of this method represents a single batch iteration
:param model: the model to excercise
:param optimizer: the optimization algorithm (i.e. adam) to iterate
:param criterion: the loss function (i.e. cross entropy loss) used for
the backward propogation step
:param batch: contains the data to test, predict, and optionally the
labels for training and validation
:param epoch_result: to be populated with the results of this epoch's
run
:param split_type: indicates if we're training, validating or testing
:return: the singleton tensor containing the loss
"""
logger = self.logger
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'train/validate on {split_type}: ' +
f'batch={batch} ({id(batch)})')
logger.debug(f'model on device: {model.device}')
# copy batch to GPU if configured to do so
batch: Batch = batch.to()
outcomes: Tensor = None
output: Tensor = None
try:
if self.debug:
# write a batch sample when debugging; maybe make this a hook
if isinstance(self.net_settings, MetadataNetworkSettings):
meta = self.net_settings.batch_metadata
meta.write()
batch.write()
# when training, reset gradients for the next epoch
if split_type == DatasetSplitType.train:
optimizer.zero_grad()
# execute an the epoch
loss, labels, outcomes, output = self._execute(
model, optimizer, criterion, batch, split_type)
self._debug_output('decode', labels, outcomes)
# if debugging the model, raise the exception to interrupt the
# flow, which is caught in ModelExecutor._execute
if self.debug:
raise EarlyBailError()
if logger.isEnabledFor(logging.DEBUG):
logger.debug('outcomes shape: {outcomes.shape}')
# add results for performance metrics, predictions output, etc
epoch_result.update(batch, loss, labels, outcomes, output)
return loss
finally:
# clean up and GPU memeory deallocation
biter = self.model_settings.batch_iteration
cb = self.model_settings.cache_batches
if (biter == 'cpu' and not cb) or biter == 'buffered':
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocating batch: {batch}')
batch.deallocate()
def _step(self, loss: Tensor, split_type: DatasetSplitType,
optimizer, model: BaseNetworkModule):
"""Iterate over the error surface."""
# when training, backpropogate and step
if split_type == DatasetSplitType.train:
clip_thresh: float = self.model_settings.clip_gradient_threshold
clip_params: Dict[str, Any] = \
self.model_settings.scale_gradient_params
# invoke back propogation on the network
loss.backward()
# clip the gradient
if clip_thresh is not None:
nn.utils.clip_grad_value_(model.parameters(), clip_thresh)
# scale the gradient
if clip_params is not None:
nn.utils.clip_grad_norm_(model.parameters(), **clip_params)
# take an update step and update the new weights
optimizer.step()
def _execute(self, model: BaseNetworkModule, optimizer: Optimizer,
criterion, batch: Batch, split_type: DatasetSplitType) -> \
Tuple[Tensor]:
"""Execute one epoch of training, testing, validation or prediction.
:param model: the model to excercise
:param optimizer: the optimization algorithm (i.e. adam) to iterate
:param criterion: the loss function (i.e. cross entropy loss) used for
the backward propogation step
:param batch: contains the data to test, predict, and optionally the
labels for training and validation
:param split_type: indicates if we're training, validating or testing
:return: a tuple of the loss, labels, outcomes, and the output
(i.e. logits); the outcomes are the decoded
(:meth:`_decode_outcomes`) output and represent some ready to
use data, like argmax'd classification nominal label integers
"""
logger = self.logger
labels: Tensor = batch.get_labels()
# forward pass, get our output, which are usually the logits
output: Tensor = model(batch)
# sanity check
if output is None:
raise ModelError('Null model output')
# check for sane state with labels, and munge if necessary
if labels is None:
# sanity check
if split_type != DatasetSplitType.test:
raise ModelError('Expecting no split type on prediction, ' +
f'but got: {split_type}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug('skipping loss calculation on prediction execute')
loss = None
else:
# put labels in a form to be used by the loss function
labels = self._encode_labels(labels)
self._debug_output('input', labels, output)
# calculate the loss with the logps and the labels
loss = criterion(output, labels)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'split: {split_type}, loss: {loss}')
# iterate over the error surface
self._step(loss, split_type, optimizer, model)
self._debug_output('output', labels, output)
# apply the same decoding on the labels as the output if necessary
if labels is not None and not self.model_settings.nominal_labels:
labels = self._decode_outcomes(labels)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'label nom decoded: {labels.shape}')
outcomes = self._decode_outcomes(output)
loss, labels, outcomes, output = self.torch_config.to_cpu_deallocate(
loss, labels, outcomes, output)
return loss, labels, outcomes, output | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/batchiter.py | batchiter.py |
__author__ = 'Paul Landes'
from typing import Tuple, Iterable
from dataclasses import dataclass, field
import logging
import sys
from io import TextIOBase, StringIO
import pandas as pd
from zensols.config import Dictable
from zensols.persist import PersistedWork, persisted
from zensols.deeplearn import ModelError
from zensols.deeplearn.result import ModelResult, ModelResultManager
from . import ModelExecutor
logger = logging.getLogger(__name__)
@dataclass
class DataComparison(Dictable):
"""Contains the results from two runs used to compare. The data in this object
is used to compare the validation loss from a previous run to a run that's
currently in progress. This is provided along with the performance metrics
of the runs when written with :meth:`write`.
"""
key: str = field()
"""The results key used with a :class:`.ModelResultManager`."""
previous: ModelResult = field()
"""The previous resuls of the model from a previous run."""
current: ModelResult = field()
"""The current results, which is probably a model currently running."""
compare_df: pd.DataFrame = field()
"""A dataframe with the validation loss from the previous and current results
and that difference.
"""
def _get_dictable_attributes(self) -> Iterable[str]:
return self._split_str_to_attributes('key previous current')
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
converge_idx = self.previous.validation.converged_epoch.index
sio = StringIO()
self._write_line(f'result: {self.key}', depth, writer)
self._write_line('loss:', depth, writer)
with pd.option_context('display.max_rows', None,
'display.max_columns', None):
print(self.compare_df, file=sio)
self._write_block(sio.getvalue().strip(), depth + 1, writer)
self._write_line('previous:', depth, writer)
self.previous.validation.write(depth + 1, writer)
self._write_line(f'converged: {converge_idx}',
depth + 1, writer)
self._write_line('current:', depth, writer)
self.current.validation.write(depth + 1, writer)
@dataclass
class ResultAnalyzer(object):
"""Load results from a previous run of the :class:`ModelExecutor` and a more
recent run. This run is usually a currently running model to compare the
results during training. This might provide meaningful information such as
whether to early stop training.
"""
executor: ModelExecutor = field()
"""The executor (not the running executor necessary) that will load the
results if not already loadded.
"""
previous_results_key: str = field()
"""The key given to retreive the previous results with
:class:`ModelResultManager`.
"""
cache_previous_results: bool = field()
"""If ``True``, globally cache the previous results to avoid having to
reload each time.
"""
def __post_init__(self):
self._previous_results = PersistedWork(
'_previous_results', self,
cache_global=self.cache_previous_results)
def clear(self):
"""Clear the previous results, if cached.
"""
self._previous_results.clear()
@property
@persisted('_previous_results')
def previous_results(self) -> ModelResult:
"""Return the previous results (see class docs).
"""
rm: ModelResultManager = self.executor.result_manager
if rm is None:
rm = ModelError('No result manager available')
return rm[self.previous_results_key]
@property
def current_results(self) -> Tuple[ModelResult, ModelResult]:
"""Return the current results (see class docs).
"""
if self.executor.model_result is None:
self.executor.load()
return self.executor.model_result
@property
def comparison(self) -> DataComparison:
"""Load the results data and create a comparison instance read to write or
jsonify.
"""
prev, cur = self.previous_results, self.current_results
prev_losses = prev.validation.losses
cur_losses = cur.validation.losses
cur_len = len(cur_losses)
df = pd.DataFrame({'epoch': range(cur_len),
'previous': prev_losses[:cur_len],
'current': cur_losses})
df['improvement'] = df['previous'] - df['current']
return DataComparison(self.previous_results_key, prev, cur, df) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/analyze.py | analyze.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
Any, Callable, List, Union, Iterable, Type, Dict, Optional, ClassVar
)
from dataclasses import dataclass, field, InitVar
import sys
import os
import logging
import pandas as pd
from io import TextIOBase
from pathlib import Path
from zensols.util import time
from zensols.config import (
Configurable,
ConfigFactory,
Writable,
ImportConfigFactory,
)
from zensols.persist import (
persisted, PersistableContainer, PersistedWork,
Deallocatable, Stash,
)
from zensols.datdesc import DataDescriber, DataFrameDescriber
from zensols.dataset import DatasetSplitStash
from zensols.deeplearn import ModelError, NetworkSettings, ModelSettings
from zensols.deeplearn.vectorize import (
SparseTensorFeatureContext, FeatureVectorizerManagerSet,
)
from zensols.deeplearn.batch import (
Batch, DataPoint, BatchStash, BatchMetadata, BatchFeatureMapping
)
from zensols.deeplearn.result import (
EpochResult, ModelResult, ModelResultManager,
PredictionsDataFrameFactory, ModelResultReporter,
)
from . import (
ModelManager, ModelExecutor, PredictionMapper,
FacadeClassExplorer, ResultAnalyzer, ModelPacker
)
logger = logging.getLogger(__name__)
@dataclass
class ModelFacade(PersistableContainer, Writable):
"""This class provides easy to use client entry points to the model
executor, which trains, validates, tests, saves and loads the model.
More common attributes, such as the learning rate and number of epochs, are
properties that dispatch to :py:obj:`executor`. For the others, go
directly to the property.
:see: :class:`zensols.deeplearn.domain.ModelSettings`
"""
_SINGLETONS: ClassVar[Dict[str, ModelFacade]] = {}
config: Configurable = field()
"""The configuraiton used to create the facade, and used to create a new
configuration factory to load models.
"""
config_factory: InitVar[ConfigFactory] = field(default=None)
"""The configuration factory used to create this facade, or ``None`` if no
factory was used.
"""
progress_bar: bool = field(default=True)
"""Create text/ASCII based progress bar if ``True``."""
progress_bar_cols: Union[str, int] = field(default='term')
"""The number of console columns to use for the text/ASCII based progress
bar. If the value is ``term``, then use the terminal width.
"""
executor_name: str = field(default='executor')
"""The configuration entry name for the executor, which defaults to
``executor``.
"""
writer: TextIOBase = field(default=sys.stdout)
"""The writer to this in methods like :meth:`train`, and :meth:`test` for
writing performance metrics results and predictions or ``None`` to not
output them.
"""
predictions_dataframe_factory_class: Type[PredictionsDataFrameFactory] = \
field(default=PredictionsDataFrameFactory)
"""The factory class used to create predictions.
:see: :meth:`get_predictions_factory`
"""
def __post_init__(self, config_factory: ConfigFactory):
super().__init__()
self._init_config_factory(config_factory)
self._config_factory = PersistedWork('_config_factory', self)
self._executor = PersistedWork('_executor', self)
self.debuged = False
if self.progress_bar_cols == 'term':
try:
term_width = os.get_terminal_size()[0]
# make space for embedded validation loss messages
self.progress_bar_cols = term_width - 5
except OSError:
logger.debug('unable to automatically determine ' +
'terminal width--skipping')
self.progress_bar_cols = None
@classmethod
def get_singleton(cls, *args, **kwargs) -> ModelFacade:
"""Return the singleton application using ``args`` and ``kwargs`` as
initializer arguments.
"""
key: str = str(cls)
inst: ModelFacade = cls._SINGLETONS.get(key)
if inst is None:
inst = cls(*args, **kwargs)
cls._SINGLETONS[key] = inst
return inst
def _init_config_factory(self, config_factory: ConfigFactory):
if isinstance(config_factory, ImportConfigFactory):
params = config_factory.__dict__
keeps = set('reload shared reload_pattern'.split())
params = {k: params[k] for k in set(params.keys()) & keeps}
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'import config factory params: {params}')
self._config_factory_params = params
else:
self._config_factory_params = {}
def _create_executor(self) -> ModelExecutor:
"""Create a new instance of an executor. Used by :obj:`executor`.
"""
logger.info('creating new executor')
executor = self.config_factory(
self.executor_name,
progress_bar=self.progress_bar,
progress_bar_cols=self.progress_bar_cols)
return executor
@property
@persisted('_config_factory')
def config_factory(self):
"""The configuration factory used to create facades.
"""
return ImportConfigFactory(self.config, **self._config_factory_params)
@property
@persisted('_executor')
def executor(self) -> ModelExecutor:
"""A cached instance of the executor tied to the instance of this class.
"""
return self._create_executor()
@property
def net_settings(self) -> NetworkSettings:
"""Return the executor's network settings.
"""
return self.executor.net_settings
@property
def model_settings(self) -> ModelSettings:
"""Return the executor's model settings.
"""
return self.executor.model_settings
@property
def result_manager(self) -> ModelResultManager:
"""Return the executor's result manager.
"""
rm: ModelResultManager = self.executor.result_manager
if rm is None:
rm = ModelError('No result manager available')
return rm
@property
def feature_stash(self) -> Stash:
"""The stash used to generate the feature, which is not to be confused
with the batch source stash ``batch_stash``.
"""
return self.executor.feature_stash
@property
def batch_stash(self) -> BatchStash:
"""The stash used to encode and decode batches by the executor.
"""
return self.executor.batch_stash
@property
def dataset_stash(self) -> DatasetSplitStash:
"""The stash used to encode and decode batches split by dataset.
"""
return self.executor.dataset_stash
@property
def vectorizer_manager_set(self) -> FeatureVectorizerManagerSet:
"""Return the vectorizer manager set used for the facade. This is taken
from the executor's batch stash.
"""
return self.batch_stash.vectorizer_manager_set
@property
def batch_metadata(self) -> BatchMetadata:
"""Return the batch metadata used on the executor.
:see: :class:`zensols.deepnlp.model.module.EmbeddingNetworkSettings`
"""
return self.batch_stash.batch_metadata
@property
def label_attribute_name(self):
"""Get the label attribute name.
"""
bmeta = self.batch_metadata
if bmeta is not None:
return bmeta.mapping.label_attribute_name
def _notify(self, event: str, context: Any = None):
"""Notify observers of events from this class.
"""
self.model_settings.observer_manager.notify(event, self, context)
def remove_metadata_mapping_field(self, attr: str) -> bool:
"""Remove a field by attribute if it exists across all metadata
mappings.
This is useful when a very expensive vectorizer slows down tasks, such
as prediction, on a single run of a program. For this use case,
override :meth:`predict` to call this method before calling the super
``predict`` method.
:param attr: the name of the field's attribute to remove
:return: ``True`` if the field was removed, ``False`` otherwise
"""
removed = False
meta: BatchMetadata = self.batch_metadata
mapping: BatchFeatureMapping
for mapping in meta.mapping.manager_mappings:
removed = removed or mapping.remove_field(attr)
return removed
@property
def dropout(self) -> float:
"""The dropout for the entire network.
"""
return self.net_settings.dropout
@dropout.setter
def dropout(self, dropout: float):
"""The dropout for the entire network.
"""
self.net_settings.dropout = dropout
@property
def epochs(self) -> int:
"""The number of epochs for training and validation.
"""
return self.model_settings.epochs
@epochs.setter
def epochs(self, n_epochs: int):
"""The number of epochs for training and validation.
"""
self.model_settings.epochs = n_epochs
@property
def learning_rate(self) -> float:
"""The learning rate to set on the optimizer.
"""
return self.model_settings.learning_rate
@learning_rate.setter
def learning_rate(self, learning_rate: float):
"""The learning rate to set on the optimizer.
"""
self.executor.model_settings.learning_rate = learning_rate
@property
def cache_batches(self) -> bool:
"""The cache_batches for the entire network.
"""
return self.model_settings.cache_batches
@cache_batches.setter
def cache_batches(self, cache_batches: bool):
"""The cache_batches for the entire network.
"""
# if the caching strategy changed, be safe and deallocate and purge to
# lazy recreate everything
if self.model_settings.cache_batches != cache_batches:
self.clear()
self.model_settings.cache_batches = cache_batches
def clear(self):
"""Clear out any cached executor.
"""
if logger.isEnabledFor(logging.INFO):
logger.info('clearing')
executor = self.executor
config_factory = self.config_factory
executor.deallocate()
config_factory.deallocate()
self._executor.clear()
self._config_factory.clear()
def reload(self):
"""Clears all state and reloads the configuration.
"""
self.clear()
self.config.reload()
def deallocate(self):
super().deallocate()
self._SINGLETONS.pop(str(self.__class__), None)
@classmethod
def load_from_path(cls, path: Path, *args, **kwargs) -> ModelFacade:
"""Construct a new facade from the data saved in a persisted model file.
This uses the :py:meth:`.ModelManager.load_from_path` to reconstruct the
returned facade, which means some attributes are taken from default if
not taken from ``*args`` or ``**kwargs``.
:param path: the path of the model file on the file system
:param model_config_overwrites:
a :class:`~zensols.config.Configurable` used to overrwrite
configuration in the model package config
:param args: passed through to the initializer of invoking class ``cls``
:param kwargs: passed through to the initializer of invoking class
``cls``
:return: a new instance of a :class:`.ModelFacade`
:see: :meth:`.ModelManager.load_from_path`
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading from facade from {path}')
# load the model from disk
mm: ModelManager = ModelManager.load_from_path(path)
# the configuration section name of the executor
if 'executor_name' not in kwargs:
kwargs['executor_name'] = mm.model_executor_name
# merge in external configuration into the model config before the
# configuration factory begins to instantiate
src_conf: Optional[Configurable] = kwargs.pop(
'model_config_overwrites', None)
# instantiate the executor
executor: ModelExecutor = mm.load_executor(src_conf)
# set the path of the model
executor.model_settings.path = path
# cleanup any CUDA memory
mm.config_factory.deallocate()
# create a new facade and configure with the loaded model
facade: ModelFacade = cls(executor.config, *args, **kwargs)
facade._config_factory.set(executor.config_factory)
facade._executor.set(executor)
facade._model_config = mm.config_factory.config
return facade
@property
def model_config(self) -> Configurable:
"""The configurable packaged with the model if this facade was created
with :mth:`load_from_path`.
"""
if hasattr(self, '_model_config'):
return self._model_config
def debug(self, debug_value: Union[bool, int] = True):
"""Debug the model by setting the configuration to debug mode and
invoking a single forward pass. Logging must be configured properly to
get the output, which is typically just invoking
:py:meth:`logging.basicConfig`.
:param debug_value: ``True`` turns on executor debugging; if an
``int``, the higher the value, the more the logging
"""
executor = self.executor
self._configure_debug_logging()
executor.debug = debug_value
executor.progress_bar = False
executor.model_settings.batch_limit = 1
self.debuged = True
executor.train()
def persist_result(self):
"""Save the last recorded result during an :py:meth:`.Executor.train` or
:py:meth:`.Executor.test` invocation to disk. Optionally also save a
plotted graphics file to disk as well when :obj:`persist_plot_result`
is set to ``True``.
Note that in Jupyter notebooks, this method has the side effect of
plotting the results in the cell when ``persist_plot_result`` is
``True``.
:param persist_plot_result: if ``True``, plot and save the graph as a
PNG file to the results directory
"""
executor = self.executor
rmng: ModelResultManager = self.result_manager
if executor.result_manager is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dumping model result: {executor.model_result}')
rmng.dump(executor.model_result)
def train(self, description: str = None) -> ModelResult:
"""Train and test or just debug the model depending on the
configuration.
:param description: a description used in the results, which is useful
when making incremental hyperparameter changes to
the model
"""
executor = self.executor
executor.reset()
logger.info('training...')
self._notify('train_start', description)
with time('trained'):
res = executor.train(description)
self._notify('train_end', description)
return res
def test(self, description: str = None) -> ModelResult:
"""Load the model from disk and test it.
"""
if self.debuged:
raise ModelError('Testing is not allowed in debug mode')
executor = self.executor
executor.load()
logger.info('testing...')
self._notify('test_start', description)
with time('tested'):
res = executor.test(description)
if self.writer is not None:
res.write(writer=self.writer)
self._notify('test_end', description)
return res
def train_production(self, description: str = None) -> ModelResult:
"""Train on the training and test data sets, then test
:param description: a description used in the results, which is useful
when making incremental hyperparameter changes to
the model
"""
executor = self.executor
executor.reset()
if self.writer is not None:
executor.write(writer=self.writer)
logger.info('training...')
self._notify('train_production_start', description)
with time('trained'):
res = executor.train_production(description)
self._notify('train_production_end', description)
return res
def predict(self, datas: Iterable[Any]) -> Any:
"""Make ad-hoc predictions on batches without labels, and return the
results.
:param datas: the data predict on, each as a separate element as a data
point in a batch
"""
executor: ModelExecutor = self.executor
ms: ModelSettings = self.model_settings
if ms.prediction_mapper_name is None:
raise ModelError(
f'The model settings ({ms.name}) is not configured to create ' +
"prediction batches: no set 'prediction_mapper'")
pm: PredictionMapper = self.config_factory.new_instance(
ms.prediction_mapper_name, datas, self.batch_stash)
self._notify('predict_start')
try:
batches: List[Batch] = pm.batches
if not executor.model_exists:
executor.load()
logger.info('predicting...')
with time('predicted'):
res: ModelResult = executor.predict(batches)
eres: EpochResult = res.results[0]
ret: Any = pm.map_results(eres)
finally:
self._notify('predict_end')
pm.deallocate()
return ret
def stop_training(self):
"""Early stop training if the model is currently training. This invokes
the :meth:`.TrainManager.stop`, communicates to the training process to
stop on the next check.
:return: ``True`` if the application is configured to early stop and
the signal has not already been given
"""
self._notify('stop_training')
return self.executor.train_manager.stop()
@property
def last_result(self) -> ModelResult:
"""The last recorded result during an :meth:`.ModelExecutor.train` or
:meth:`.ModelExecutor.test` invocation is used.
"""
res = self.executor.model_result
if res is None:
rm: ModelResultManager = self.result_manager
res = rm.load()
if res is None:
raise ModelError('No results found')
return res
def write_result(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_settings: bool = False,
include_converged: bool = False,
include_config: bool = False):
"""Load the last set of results from the file system and print them out.
The result to print is taken from :obj:`last_result`
:param depth: the number of indentation levels
:param writer: the data sink
:param include_settings: whether or not to include model and network
settings in the output
:param include_config: whether or not to include the configuration in
the output
"""
if logger.isEnabledFor(logging.INFO):
logger.info('load previous results')
res = self.last_result
res.write(depth, writer, include_settings=include_settings,
include_converged=include_converged,
include_config=include_config)
def plot_result(self, result: ModelResult = None, save: bool = False,
show: bool = False) -> ModelResult:
"""Plot results and optionally save and show them. If this is called in
a Jupyter notebook, the plot will be rendered in a cell.
:param result: the result to plot, or if ``None``, use
:py:meth:`last_result`
:param save: if ``True``, save the plot to the results directory with
the same naming as the last data results
:param show: if ``True``, invoke ``matplotlib``'s ``show`` function to
visualize in a non-Jupyter environment
:return: the result used to graph, which comes from the executor when
none is given to the invocation
"""
result = self.last_result if result is None else result
grapher = self.executor.result_manager.get_grapher()
grapher.plot([result])
if save:
grapher.save()
if show:
grapher.show()
return result
def get_predictions_factory(self, column_names: List[str] = None,
transform: Callable[[DataPoint], tuple] = None,
batch_limit: int = sys.maxsize,
name: str = None) \
-> PredictionsDataFrameFactory:
"""Generate a predictions factoty from the test data set.
:param column_names: the list of string column names for each data item
the list returned from ``data_point_transform`` to
be added to the results for each label/prediction
:param transform:
a function that returns a tuple, each with an element respective of
``column_names`` to be added to the results for each
label/prediction; if ``None`` (the default), ``str`` used (see the
`Iris Jupyter Notebook
<https://github.com/plandes/deeplearn/blob/master/notebook/iris.ipynb>`_
example)
:param batch_limit: the max number of batche of results to output
:param name: the name/ID (name of the file sans extension in the
results directory) of the previously archived saved
results to fetch or ``None`` to get the last result
"""
rm: ModelResultManager = self.result_manager
res: ModelResult
if name is None:
res = self.last_result
key: str = rm.get_last_key(False)
else:
res = rm.results_stash[name].model_result
key: str = name
if res is None:
raise ModelError(f'No test results found: {name}')
if not res.test.contains_results:
raise ModelError('No test results found')
path: Path = rm.key_to_path(key)
return self.predictions_dataframe_factory_class(
path, res, self.batch_stash,
column_names, transform, batch_limit)
def get_predictions(self, *args, **kwargs) -> pd.DataFrame:
"""Generate a Pandas dataframe containing all predictions from the test
data set. This method is meant to be overridden by application specific
facades to customize prediction output.
:see: :meth:`get_predictions_factory`
:param args: arguments passed to :meth:`get_predictions_factory`
:param kwargs: arguments passed to :meth:`get_predictions_factory`
"""
df_fac = self.get_predictions_factory(*args, **kwargs)
return df_fac.dataframe
def write_predictions(self, lines: int = 10):
"""Print the predictions made during the test phase of the model
execution.
:param lines: the number of lines of the predictions data frame to be
printed
:param writer: the data sink
"""
preds = self.get_predictions()
print(preds.head(lines), file=self.writer)
def get_result_analyzer(self, key: str = None,
cache_previous_results: bool = False) \
-> ResultAnalyzer:
"""Return a results analyzer for comparing in flight training progress.
"""
rm: ModelResultManager = self.result_manager
if key is None:
key = rm.get_last_key()
return ResultAnalyzer(self.executor, key, cache_previous_results)
def get_described_results(self, res_id: str = None) -> DataDescriber:
"""Create Zensols LaTeX ready results. This includes a summary from the
:class:`.ModelResultReporter` and detailed results using ``res_id``.
:param res_id: the result ID or use the last if not given
"""
rm: ModelResultManager = self.result_manager
pfac: PredictionsDataFrameFactory = \
self.get_predictions_factory(name=res_id)
reporter = ModelResultReporter(rm, include_validation=True)
summary: DataFrameDescriber = reporter.dataframe_describer
res: DataFrameDescriber = pfac.metrics_dataframe_describer
summary.name = 'Summary'
res.name = f'Run {pfac.result.index}'
return DataDescriber(
name=f'{self.model_settings.model_name} Model Results',
describers=(summary, res))
@property
def class_explorer(self) -> FacadeClassExplorer:
return self._create_facade_explorer()
def _create_facade_explorer(self) -> FacadeClassExplorer:
"""Return a facade explorer used to print the facade's object graph.
"""
return FacadeClassExplorer()
def get_modeL_packer(self, version: str = '0.0.1') -> ModelPacker:
"""Return a new a distribution model packager instance.
:param version: the version used to encode the package
"""
return ModelPacker(result_manager=self.result_manager, version=version)
def write(self, depth: int = 0, writer: TextIOBase = None,
include_executor: bool = True, include_metadata: bool = True,
include_settings: bool = True, include_model: bool = True,
include_config: bool = False, include_object_graph: bool = False):
writer = self.writer if writer is None else writer
writer = sys.stdout if writer is None else writer
bmeta = None
try:
bmeta = self.batch_metadata
except AttributeError:
pass
if include_executor:
self._write_line(f'{self.executor.name}:', depth, writer)
self.executor.write(depth + 1, writer,
include_settings=include_settings,
include_model=include_model)
if include_metadata and bmeta is not None:
self._write_line('metadata:', depth, writer)
bmeta.write(depth + 1, writer)
if include_object_graph:
self._write_line('graph:', depth, writer)
ce = self._create_facade_explorer()
ce.write(self, depth=depth + 1, writer=writer)
if include_config:
self._write_line('config:', depth, writer)
self.config.write(depth + 1, writer)
def _deallocate_config_instance(self, inst: Any):
if isinstance(self.config_factory, ImportConfigFactory):
inst = self.config_factory.clear_instance(inst)
dealloc = isinstance(inst, Deallocatable)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocate {inst}: {type(inst)}: {dealloc}')
if dealloc:
inst.deallocate()
def _configure_debug_logging(self):
"""When debuging the model, configure the logging system for output. The
correct loggers need to be set to debug mode to print the model
debugging information such as matrix shapes.
"""
for name in ['zensols.deeplearn.model',
__name__]:
logging.getLogger(name).setLevel(logging.DEBUG)
def _configure_cli_logging(self, info_loggers: List[str],
debug_loggers: List[str]):
info_loggers.extend([
# multi-process (i.e. batch creation)
'zensols.multi.stash',
'zensols.deeplearn.batch.multi',
# validation/training loss messages
'zensols.deeplearn.model.executor.status',
__name__])
if not self.progress_bar:
info_loggers.extend([
# load messages
'zensols.deeplearn.batch.stash',
# save results messages
'zensols.deeplearn.result',
# validation/training loss messages
'zensols.deeplearn.model.executor.progress',
# model save/load
'zensols.deeplearn.model.manager',
# early stop messages
'zensols.deeplearn.model.trainmng',
# performance metrics formatting
'zensols.deeplearn.model.format',
# model packaging
'zensols.deeplearn.model.pack',
# model save messages
'zensols.deeplearn.result.manager',
# observer module API messages
'zensols.deeplearn.observer.status',
# CLI interface
'zensols.deeplearn.cli.app'])
@staticmethod
def configure_default_cli_logging(log_level: int = logging.WARNING):
"""Configure the logging system with the defaults.
"""
fmt = '%(asctime)s[%(levelname)s]%(name)s: %(message)s'
logging.basicConfig(format=fmt, level=log_level)
def configure_cli_logging(self, log_level: int = None):
""""Configure command line (or Python REPL) debugging. Each facade can turn on
name spaces that make sense as useful information output for long
running training/testing iterations.
This calls "meth:`_configure_cli_logging` to collect the names of
loggers at various levels.
"""
info = []
debug = []
if log_level is not None:
self.configure_default_cli_logging(log_level)
self._configure_cli_logging(info, debug)
for name in info:
logging.getLogger(name).setLevel(logging.INFO)
for name in debug:
logging.getLogger(name).setLevel(logging.DEBUG)
def configure_jupyter(self, log_level: int = logging.WARNING,
progress_bar_cols: int = 120):
"""Configures logging and other configuration related to a Jupyter notebook.
This is just like :py:meth:`configure_cli_logging`, but adjusts logging
for what is conducive for reporting in Jupyter cells.
;param log_level: the default logging level for the logging system
:param progress_bar_cols: the number of columns to use for the progress
bar
"""
self.configure_cli_logging(log_level)
for name in [
# turn off loading messages
'zensols.deeplearn.batch.stash',
# turn off model save messages
'zensols.deeplearn.result.manager']:
logging.getLogger(name).setLevel(logging.WARNING)
# number of columns for the progress bar
self.executor.progress_bar_cols = progress_bar_cols
# turn off console output (non-logging)
self.writer = None
@staticmethod
def get_encode_sparse_matrices() -> bool:
"""Return whether or not sparse matricies are encoded.
:see: :meth:`set_sparse`
"""
return SparseTensorFeatureContext.USE_SPARSE
@staticmethod
def set_encode_sparse_matrices(use_sparse: bool = False):
"""If called before batches are created, encode all tensors the would be
encoded as dense rather than sparse when ``use_sparse`` is ``False``.
Oherwise, tensors will be encoded as sparse where it makes sense on a
per vectorizer basis.
"""
SparseTensorFeatureContext.USE_SPARSE = use_sparse | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/facade.py | facade.py |
__author__ = 'Paul Landes'
from typing import Tuple, Optional, ClassVar
from dataclasses import dataclass, field
import logging
from pathlib import Path
from zipfile import ZipFile
from zensols.persist import Stash
from zensols.install import Installer
from zensols.config import (
ConfigurableError, Configurable, DictionaryConfig, ConfigFactory
)
from ..result import ArchivedResult
from . import ModelError, ModelResultManager, ModelExecutor
logger = logging.getLogger(__name__)
@dataclass
class ModelPacker(object):
"""Creates distribution model packages by creating a zip file of everything
needed to by a client to use the model.
"""
_PT_MODEL_DIR: ClassVar[str] = 'ptmodel'
executor: ModelExecutor = field()
"""The result manager used to obtain the results and model to package."""
version: str = field()
"""The version used to encode the package."""
installer: Optional[Installer] = field(default=None)
"""If set, used to create a path to the model file."""
def pack(self, res_id: str, output_dir: Path) -> Path:
"""Create a distribution model package on the file system.
:param res_id: the result ID or use the last if not given (if optional)
:return: the path to the generated distribution model package
"""
verpath: str = 'v' + self.version.replace('.', '_')
result_manager: ModelResultManager = self.executor.result_manager
res_stash: Stash = result_manager.results_stash
result: ArchivedResult = res_stash.get(res_id)
if result is None:
raise ModelError(f'No such result ID: {res_id}')
output_file: Path = output_dir / f'{result.name}-{verpath}.zip'
arch_suffix: str = 'model'
arch_prefix: str = f'{result.name}-{verpath}'
if logger.isEnabledFor(logging.INFO):
logger.info(f'packing {res_id}')
result.write_to_log(logger, depth=1)
with ZipFile(output_file, 'w') as zf:
if result is None:
raise ModelError(f'No such result: {res_id}')
else:
for path in result.get_paths():
arch_name: str = f'{arch_prefix}/{arch_suffix}{path.suffix}'
if logger.isEnabledFor(logging.INFO):
logger.info(f'adding file: {path} -> {arch_name}')
if path.is_dir():
for subpath in path.iterdir():
m_prefix = f'{self._PT_MODEL_DIR}/{subpath.name}'
zf.write(subpath, f'{arch_prefix}/{m_prefix}')
else:
zf.write(path, arch_name)
zf.writestr(f'{arch_prefix}/{arch_suffix}.version', self.version)
if logger.isEnabledFor(logging.INFO):
logger.info(f'wrote: {output_file}')
return output_file
@property
def installed_model_path(self) -> Path:
"""Return the path to the model to be PyTorch loaded."""
if self.installer is not None:
res_path: Path = self.installer.get_singleton_path()
path: Path = res_path / self._PT_MODEL_DIR
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading model {path}')
return path
def install_model(self) -> Path:
"""Install the model if it isn't already and return a path to it."""
model_path: Path = self.installed_model_path
if model_path is not None:
self.installer.install()
return model_path
class SubsetConfig(DictionaryConfig):
"""A :class:`~zensols.config.Configurable` that takes a subset of the
application configuration. This is useful to pass to
:meth:`.ModelFacade.load_from_path` to merge application into the packed
model's configuration.
"""
def __init__(self, config_factory: ConfigFactory, sections: Tuple[str, ...],
options: Tuple[str, ...], option_delim: str = ':'):
"""Initialize the instance.
:param config_factory: the application config and factory
:param sections: a list of sections to subset
:param options: a list of ``<section>:<option>``, each of which is added
to the subset
:param option_delim: the string used to delimit sections and options in
``options``
"""
super().__init__()
src: Configurable = config_factory.config
src.copy_sections(self, sections=sections)
option: str
for option in options:
sec_name: Tuple[str, str] = option.split(option_delim)
if len(sec_name) != 2:
raise ConfigurableError('Wrong format: expecting delim ' +
f'{option_delim} but got: {option}')
sec, name = sec_name
val: str = src.get_option(name, sec)
self.set_option(name, val, sec) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/pack.py | pack.py |
__author__ = 'Paul Landes'
from types import ModuleType
from typing import Union, Type, ClassVar
from abc import abstractmethod, ABCMeta
import logging
import torch
from torch import nn
from torch import Tensor
from zensols.introspect import ClassImporter
from zensols.persist import PersistableContainer
from zensols.deeplearn import (
ModelError,
NetworkSettings,
ActivationNetworkSettings,
DropoutNetworkSettings,
BatchNormNetworkSettings,
EarlyBailError,
)
from zensols.deeplearn.batch import Batch
logger = logging.getLogger(__name__)
class DebugModule(nn.Module):
"""A utility base class that makes logging more understandable.
"""
DEBUG_DEVICE: ClassVar[bool] = False
"""If ``True``, add tensor devices to log messages."""
DEBUG_TYPE: ClassVar[bool] = False
"""If ``True``, add tensor shapes to log messages."""
DEBUG_CLASS: ClassVar[bool] = True
"""If ``True``, add the logging class to log messages."""
MODULE_NAME: ClassVar[str] = None
"""The module name used in the logging message. This is set in each
inherited class.
"""
_DEBUG_MESSAGE_MAX_LEN: ClassVar[int] = 100
def __init__(self, sub_logger: logging.Logger = None):
"""Initialize.
:param sub_logger: used to log activity in this module so they logged
module comes from some parent model
"""
super().__init__()
if sub_logger is None:
self.logger = self._resolve_class_logger()
else:
self.logger = sub_logger
def _resolve_class_logger(self) -> logging.Logger:
cls: Type = self.__class__
mod: ModuleType = ClassImporter.get_module(cls.__module__, False)
lg: logging.Logger = logger
if hasattr(mod, 'logger'):
lg_mod = getattr(mod, 'logger')
if isinstance(lg_mod, logging.Logger):
lg = lg_mod
return lg
def _debug(self, msg: str):
"""Debug a message using the module name in the description.
"""
if self.logger.isEnabledFor(logging.DEBUG):
if msg is not None:
if len(msg) > self._DEBUG_MESSAGE_MAX_LEN:
msg = msg[:self._DEBUG_MESSAGE_MAX_LEN - 3] + '...'
mname = self.MODULE_NAME
cls = self.__class__.__name__
mname = '' if mname is None else f'[{mname}]'
if self.DEBUG_CLASS:
prefix = f'{cls}{mname}'
else:
prefix = mname if len(mname) > 0 else f'[{cls}]'
self.logger.debug(f'{prefix} {msg}')
def _shape_debug(self, msg: str, x: Tensor):
"""Debug a message using the module name in the description and include
the shape.
"""
if self.logger.isEnabledFor(logging.DEBUG):
if x is None:
shape, device, dtype = [None] * 3
else:
shape, device, dtype = x.shape, x.device, x.dtype
msg = f'{msg} shape: {shape}'
if self.DEBUG_DEVICE:
msg += f', device: {device}'
if self.DEBUG_TYPE:
msg += f', type: {dtype}'
self._debug(msg)
def _bail(self):
"""A convenience method to assist in debugging. This is useful when the
output isn't in the correct form for the :class:`.ModelExecutor`.
"""
self.logger.debug('-' * 60)
raise EarlyBailError()
class BaseNetworkModule(DebugModule, PersistableContainer, metaclass=ABCMeta):
"""A utility base network module that contains ubiquitous, but optional
layers, such as dropout and batch layeres, activation, etc.
.. document private functions
.. automethod:: _forward
"""
def __init__(self, net_settings: NetworkSettings,
sub_logger: logging.Logger = None):
"""Initialize.
:param net_settings: contains common layers such as droput and batch
normalization
:param sub_logger: used to log activity in this module so they logged
module comes from some parent model
"""
super().__init__(sub_logger)
self.net_settings = ns = net_settings
if isinstance(ns, DropoutNetworkSettings):
self.dropout = ns.dropout_layer
else:
self.dropout = None
if isinstance(ns, BatchNormNetworkSettings) and \
(ns.batch_norm_d is not None or ns.batch_norm_features is not None):
if ns.batch_norm_d is None or ns.batch_norm_features is None:
raise ModelError('Both the dimension and features must be ' +
f'set if one is set: {ns}')
self.batch_norm = ns.batch_norm_layer
else:
self.batch_norm = None
if isinstance(ns, ActivationNetworkSettings):
self.activation_function = ns.activation_function
else:
self.activation_function = None
def _deallocate_children_modules(self):
for layer in self.children():
self._try_deallocate(layer)
def __getstate__(self):
raise ModelError(f'Layers should not be pickeled: {self}')
@abstractmethod
def _forward(self, x: Union[Batch, Tensor], *args, **kwargs) -> Tensor:
"""The model's forward implementation. Normal backward semantics are no
different.
:param x: the batch or tensor to train, validate or test on; the type
depends on the needs of the model
:param args: additional model specific arguments needed by classes that
need more context
:param kwargs: additional model specific arguments needed by classes
that need more context
"""
pass
@staticmethod
def device_from_module(module: nn.Module) -> torch.device:
"""Return the device on which the model is configured.
:param module: the module containing the parameters used to get the
device
"""
return next(module.parameters()).device
@property
def device(self) -> torch.device:
"""Return the device on which the model is configured."""
return self.device_from_module(self)
def _forward_dropout(self, x: Tensor) -> Tensor:
"""Forward the dropout if there is one configured.
"""
if self.dropout is None:
self._debug('skipping unset dropout')
else:
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'dropout: {self.dropout}')
x = self.dropout(x)
return x
def _forward_batch_norm(self, x: Tensor) -> Tensor:
"""Forward the batch normalization if there is one configured.
"""
if self.batch_norm is None:
self._debug('skipping unset batch norm')
else:
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'batch norm: {self.batch_norm}')
x = self.batch_norm(x)
return x
def _forward_activation(self, x: Tensor) -> Tensor:
"""Transform using the activation function if there is one configured.
"""
if self.activation_function is None:
self._debug('skipping unset forward')
else:
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'activation: {self.activation_function}')
x = self.activation_function(x)
return x
def _forward_batch_act_drop(self, x: Tensor) -> Tensor:
"""Forward convolution, batch normalization, pool, activation and
dropout for those layers that are configured.
:see: `Sunghean et al. <http://mipal.snu.ac.kr/images/1/16/Dropout_ACCV2016.pdf>`_
:see: `Ioffe et al. <https://arxiv.org/pdf/1502.03167.pdf>`_
"""
x = self._forward_batch_norm(x)
x = self._forward_activation(x)
x = self._forward_dropout(x)
return x
def forward(self, x: Union[Batch, Tensor], *args, **kwargs) -> Tensor:
"""Main forward takes a batch for top level modules, or a tensor for
framework based layers. Return the transformed tensor.
"""
if self.logger.isEnabledFor(logging.DEBUG) and isinstance(x, Batch):
self._debug(f'input batch: {x}')
return self._forward(x, *args, **kwargs) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/module.py | module.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Any, Dict, Tuple
from dataclasses import dataclass, field
import logging
from pathlib import Path
import torch
from zensols.util import time
from zensols.config import ConfigFactory, Configurable
from .. import ModelError, TorchConfig, NetworkSettings
from . import BaseNetworkModule
logger = logging.getLogger(__name__)
@dataclass
class ModelManager(object):
"""This class manages the lifecycle of an instance of a ``ModelExecutor``.
This class is mostly used by the executor to control it's lifecycle.
However, a client can also use an instance of this to revive a model that's
been saved to disk with the ``ModelResultManager``
:see ModelExecutor:
"""
path: Path = field()
"""The path of where the model results saved to disk by
:class:`.zensols.deeplearn.results.ModelResultManager`.
"""
config_factory: ConfigFactory = field()
"""The configuration factory to be used to create the ``ModelExecutor``."""
model_executor_name: str = field(default=None)
"""The configuration entry and name of the ``ModelExecutor`` instance."""
persist_random_seed_context: bool = field(default=True)
"""If ``True`` persist the current random seed state, which helps in
creating consistent results across train/test/validate.
"""
keep_last_state_dict: bool = field(default=False)
"""Whether or not to store the PyTorch module state in attribute
``last_saved_state_dict``.
"""
@staticmethod
def _get_paths(path: Path) -> Tuple[Path, Path]:
return (path / 'state.pt', path / 'weight.pt')
@classmethod
def load_from_path(cls, path: Path) -> ModelManager:
"""Load and return an instance of this class from a previously saved
model. This method exists to recreate a :class:`.ModelManager` from a
saved file from scratch. The returned model manager can be used to
create the executor or :class:`ModelFacade` using
:obj:``config_factory``.
:param path: points to the model file persisted with
:py:meth:`_save_executor`
:return: an instance of :class:`.ModelManager` that was used to save
the executor pointed by ``path``
"""
checkpoint = cls._load_checkpoint(*cls._get_paths(path))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'keys: {checkpoint.keys()}')
config_factory = checkpoint['config_factory']
model_executor_name = checkpoint['model_executor_name']
persist_random = checkpoint['random_seed_context'] is not None
return cls(path, config_factory, model_executor_name, persist_random)
def load_executor(self, config_overwrites: Configurable = None) -> \
'ModelExecutor':
"""Load the model the last saved model from the disk. This is used load
an instance of a ``ModelExecutor`` with all previous state completely in
tact. It does this by using an instance of
:class:`zensols.config.factory.Configurable` and a
:class:`zensols.config.factory.ImportConfigFactory` to reconstruct the
executor and it's state by recreating all instances.
After the executor has been recreated with the factory, the previous
model results and model weights are restored.
:param load_factory: whether to load the configuration factory from the
check point; which you probably don't want when
loading from :meth:`load_from_path`
:return: an instance of :class:`.ModelExecutor`
:see: :class:`zensols.deeplearn.model.ModelExecutor`
"""
checkpoint: Dict[str, Any] = self._get_checkpoint(True)
# reload the config factory even if loaded from `load_from_path` since,
# in that case, this instance will be deallcated in the facade
config_factory: ConfigFactory = checkpoint['config_factory']
self._set_random_seed(checkpoint)
# overwrite model configuration before the executor is instantiated
if config_overwrites is not None:
config_overwrites.copy_sections(config_factory.config)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading config factory: {config_factory}')
# create the executor from the executor section
executor: 'Executor' = config_factory.instance(
checkpoint['model_executor_name'])
# create the PyTorch model
model: BaseNetworkModule = self._create_module(executor.net_settings)
# load and set the state
self._load_optimizer_state(executor, model, checkpoint)
return executor
def _load_optimizer_state(self, executor: Any, model: BaseNetworkModule,
checkpoint: Dict[str, Any]):
model.load_state_dict(checkpoint['model_state_dict'])
executor._set_model(model, True, False)
executor.model_result = checkpoint['model_result']
criterion, optimizer, scheduler = executor.criterion_optimizer_scheduler
if 'model_scheduler_state_dict' in checkpoint:
scheduler_state = checkpoint['model_scheduler_state_dict']
else:
scheduler_state = None
optimizer.load_state_dict(checkpoint['model_optim_state_dict'])
if scheduler is not None and scheduler_state is not None:
scheduler.load_state_dict(scheduler_state)
if logger.isEnabledFor(logging.INFO):
logger.info(f'loaded model from {executor.model_settings.path} ' +
f'on device {model.device}')
def _load_model_optim_weights(self, executor):
"""Load the model and optimizer weights from the last check point. A
side effect is that the optimizer is recreated.
"""
model = executor._get_or_create_model()
checkpoint = self._get_checkpoint(True)
self._load_optimizer_state(executor, model, checkpoint)
def _save_executor(self, executor: Any):
"""Save a ``ModelExecutor`` instance.
:param executor: the executor to persost to disk
"""
logger.debug('saving model state')
if self.persist_random_seed_context:
random_seed_context = TorchConfig.get_random_seed_context()
else:
random_seed_context = None
criterion, optimizer, scheduler = executor.criterion_optimizer_scheduler
if scheduler is None:
scheduler_state = None
else:
scheduler_state = scheduler.state_dict()
state_dict = executor.model.state_dict()
if self.keep_last_state_dict:
self.last_saved_state_dict = self._copy_state_dict(state_dict)
checkpoint = {'config_factory': self.config_factory,
'random_seed_context': random_seed_context,
'model_executor_name': self.model_executor_name,
'net_settings_name': executor.net_settings.name,
'model_result': executor.model_result,
'model_optim_state_dict': optimizer.state_dict(),
'model_scheduler_state_dict': scheduler_state,
'model_state_dict': state_dict}
self._save_checkpoint(checkpoint, True)
def _create_module(self, net_settings: NetworkSettings,
reload: bool = False) -> BaseNetworkModule:
"""Create a new instance of the network model.
"""
resolver = self.config_factory.class_resolver
initial_reload = resolver.reload
try:
resolver.reload = reload
return net_settings.create_module()
finally:
resolver.reload = initial_reload
@staticmethod
def _copy_state_dict(state_dict):
"""Copy the PyTorch module state (weights) and return them as a dict.
"""
return {k: state_dict[k].clone() for k in state_dict.keys()}
def _set_random_seed(self, checkpoint: Dict[str, Any]):
random_seed_context = checkpoint['random_seed_context']
if random_seed_context is not None:
TorchConfig.set_random_seed(**random_seed_context)
def _save_final_trained_results(self, executor):
"""Save the results of the :class:`.ModelResult`, which is typically
called when the validation loss decreases. Note this does not save the
model weights since doing so might clobber with an overtrained model
(assuming the last converved with the lowest validation loss was saved).
:param executor: the executor with the model results to save
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'updating results: {self.path}')
checkpoint = self._get_checkpoint(False)
checkpoint['model_result'] = executor.model_result
self._save_checkpoint(checkpoint, False)
def _save_checkpoint(self, checkpoint: Dict[str, Any], save_weights: bool):
"""Save the check point to disk.
:param checkpoint: all model state (results, random seed, weights etc)
:param save_weights: if ``True`` then save the weights to the weight
file (in addition to the state to the state file)
"""
state_path, weight_path = self._get_paths(self.path)
weights = {}
for k in 'model_optim_state_dict model_state_dict'.split():
wval = checkpoint.pop(k, None)
if save_weights and wval is None:
raise ModelError(
f'Missing checkpoint key while saving weights: {k}')
weights[k] = wval
self.path.mkdir(parents=True, exist_ok=True)
if save_weights:
with time(f'saved model weights to {weight_path}'):
torch.save(weights, str(weight_path))
with time(f'saved model state to {state_path}'):
torch.save(checkpoint, str(state_path))
def _get_checkpoint(self, load_weights: bool) -> Dict[str, Any]:
"""The check point from loaded by the PyTorch framework. This contains
the executor, model results, and model weights.
:param load_weights: if ``True`` load the weights from the weights file
and add it to the checkpoint state
"""
state_path, weight_path = self._get_paths(self.path)
if not load_weights:
weight_path = None
return self._load_checkpoint(state_path, weight_path)
@staticmethod
def _load_checkpoint(state_path: Path, weight_path: Path) -> \
Dict[str, Any]:
if not state_path.exists():
raise ModelError(f'No such state file: {state_path}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading check point from: {state_path}')
with time(f'loaded check point from {state_path}'):
cp = torch.load(str(state_path))
if weight_path is not None:
params = {}
if not torch.cuda.is_available():
params['map_location'] = torch.device('cpu')
weights = torch.load(str(weight_path), **params)
cp.update(weights)
return cp | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/manager.py | manager.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Tuple, List, Any, Type
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
from zensols.persist import PersistableContainer, persisted
from zensols.deeplearn.batch import DataPoint, Batch, BatchStash
from zensols.deeplearn.result import ResultsContainer
from .. import ModelError
@dataclass
class PredictionMapper(PersistableContainer, metaclass=ABCMeta):
"""Used by a top level client to create features used to create instances of
:class:`.DataPoint` and map label classes from nominal IDs to their string
representations.
.. document private functions
.. automethod:: _create_data_point
.. automethod:: _create_features
"""
datas: Tuple[Any] = field()
"""The input data to create ad-hoc predictions."""
batch_stash: BatchStash = field()
""""The batch stash used to own batches and data points created by this
instance.
"""
def __post_init__(self):
super().__init__()
@abstractmethod
def _create_features(self, data: Any) -> Tuple[Any]:
"""Create an instance of a feature from ``data``.
:param data: data used to create data points
:return: the data used in the initializer of the respective (in list)
:class:`.DataPoint`
"""
pass
@abstractmethod
def map_results(self, result: ResultsContainer) -> Any:
"""Map ad-hoc prediction results from the :class:`.ModelExecutor` to an
instance that makes sense to the client.
:param result: contains the predictions produced by the model as
:obj:`~zensols.deeplear.result.ResultsContainer.predictions_raw`
:return: a first class instance suitable for easy client consumption
"""
pass
def _create_prediction_batch(self, data: Any) -> Batch:
dpcls: Type[DataPoint] = self.batch_stash.data_point_type
features: Tuple[Any] = self._create_features(data)
dps: Tuple[DataPoint] = tuple(
map(lambda f: self._create_data_point(dpcls, f), features))
return self.batch_stash.create_batch(dps)
@property
@persisted('_batches')
def batches(self) -> List[Batch]:
"""Create a prediction batch that is detached from any stash resources,
except this instance that created it. This creates a tuple of features,
each of which is used to create a :class:`.DataPoint`.
"""
return self._create_batches()
def _create_batches(self) -> List[Batch]:
bcls: Type[Batch] = self.batch_stash.batch_type
batches = []
for data in self.datas:
batch: Batch = self._create_prediction_batch(data)
state = batch.__getstate__()
dec_batch = object.__new__(bcls)
dec_batch.__setstate__(state)
dec_batch.batch_stash = self.batch_stash
dec_batch.data_points = batch.data_points
batches.append(dec_batch)
return batches
def _create_data_point(self, cls: Type[DataPoint],
feature: Any) -> DataPoint:
"""Create a data point. This base implementation creates it with the
passed parameters.
:param cls: the data point class of which to make an instance
:param stash: to be set as the batch stash on the data point and the
caller
:param feature: to be set as the third argument and generate from
:meth:`_create_features`
"""
return cls(None, self.batch_stash, feature)
def __getstate__(self):
raise ModelError('Iinstances are not pickleable') | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/pred.py | pred.py |
from __future__ import annotations
"""Sequence modules for sequence models.
"""
__author__ = 'Paul Landes'
from typing import List, Union, Tuple
from dataclasses import dataclass, field
from abc import abstractmethod
import logging
import torch
from torch import Tensor
from torch import nn
from torch.optim import Optimizer
from zensols.persist import Deallocatable
from zensols.deeplearn import DatasetSplitType, ModelError
from zensols.deeplearn.batch import Batch
from . import BaseNetworkModule, BatchIterator
logger = logging.getLogger(__name__)
@dataclass
class SequenceNetworkContext(object):
"""The forward context for the :class:`.SequenceNetworkModule`. This is used
in :meth:`.SequenceNetworkModule._forward` to provide the module additional
information needed to score the model and produce the loss.
"""
split_type: DatasetSplitType = field()
"""The split type, which informs the module when decoding to produce outputs or
using the forward pass to prod.
:see: :meth:`.SequenceNetworkModule._forward`
"""
criterion: nn.Module = field()
"""The criterion used to create the loss. This is provided for modules that
produce the loss in the forward phase with the
`:meth:`torch.nn.module.forward`` method.
"""
class SequenceNetworkOutput(Deallocatable):
"""The output from :clas:`.SequenceNetworkModule` modules.
"""
def __init__(self, predictions: Union[List[List[int]], Tensor],
loss: Tensor = None,
score: Tensor = None,
labels: Union[List[List[int]], Tensor] = None,
outputs: Tensor = None):
"""Initialize the output of a sequence NN.
:param predictions: list of list predictions to convert in to a 1-D
tensor if given and not already a tensor; if a
tensor, the shape must also be 1-D
:param loss: the loss tensor
:param score: the score given by the CRF's Verterbi algorithm
:param labels: list of list gold labels to convert in to a 1-D tensor
if given and not already a tensor
:param outputs: the logits from the model
"""
if predictions is not None and not isinstance(predictions, Tensor):
# shape: 1D
self.predictions = self._to_tensor(predictions)
else:
self.predictions = predictions
if labels is not None and not isinstance(labels, Tensor):
self.labels = self._to_tensor(labels)
else:
self.labels = labels
self.loss = loss
self.score = score
self.outputs = outputs
def _to_tensor(self, lists: List[List[int]]) -> Tensor:
"""Flatten a list of lists.
:return: a 1-D tensor by flattening of the ``lists`` data
"""
outs = []
for lst in lists:
outs.append(torch.tensor(lst, dtype=torch.int64))
arr: Tensor = torch.cat(outs, dim=0)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'output shape: {arr.shape}')
return arr
def righsize_labels(self, preds: List[List[int]]):
"""Convert the :obj:`labels` tensor as a 1-D tensor. This removes the padded
values by iterating over ``preds`` using each sub list's for copying
the gold label tensor to the new tensor.
"""
labs = []
labels = self.labels
for rix, bout in enumerate(preds):
blen = len(bout)
labs.append(labels[rix, :blen].cpu())
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'row: {rix}, len: {blen}, out/lab')
self.labels = torch.cat(labs, 0)
def deallocate(self):
for i in 'predictions loss score':
if hasattr(self, i):
delattr(self, i)
class SequenceNetworkModule(BaseNetworkModule):
"""A module that has a forward training pass and a separate *scoring* phase.
Examples include layers with an ending linear CRF layer, such as a BiLSTM
CRF. This module has a ``decode`` method that returns a 2D list of integer
label indexes of a nominal class.
The context provides additional information needed to train, test and use
the module.
:see: :class:`zensols.deeplearn.layer.RecurrentCRFNetwork`
.. document private functions
.. automethod:: _forward
"""
@abstractmethod
def _forward(self, batch: Batch, context: SequenceNetworkContext) -> \
SequenceNetworkOutput:
"""The forward pass, which either trains the model and creates the loss and/or
decodes the output for testing and evaluation.
:param batch: the batch to train, validate or test on
:param context: contains the additional information needed for scoring
and decoding the sequence
"""
pass
@dataclass
class SequenceBatchIterator(BatchIterator):
"""Expects outputs as a list of lists of labels of indexes. Examples of
use cases include CRFs (e.g. BiLSTM/CRFs).
"""
def _execute(self, model: BaseNetworkModule, optimizer: Optimizer,
criterion, batch: Batch, split_type: DatasetSplitType) -> \
Tuple[Tensor]:
logger = self.logger
cctx = SequenceNetworkContext(split_type, criterion)
seq_out: SequenceNetworkOutput = model(batch, cctx)
outcomes: Tensor = seq_out.predictions
loss: Tensor = seq_out.loss
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{batch.id}: output: {seq_out}')
if seq_out.labels is not None:
labels = seq_out.labels
else:
labels: Tensor = batch.get_labels()
labels = self._encode_labels(labels)
if logger.isEnabledFor(logging.DEBUG):
if labels is not None:
logger.debug(f'label shape: {labels.shape}')
self._debug_output('after forward', labels, outcomes)
# iterate over the error surface
self._step(loss, split_type, optimizer, model)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'split: {split_type}, loss: {loss}')
# transform the labels in the same manner as the predictions so tensor
# shapes match
if not self.model_settings.nominal_labels:
labels = self._decode_outcomes(labels)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'label nom decoded: {labels.shape}')
if outcomes is None and split_type != DatasetSplitType.train:
raise ModelError('Expecting predictions for all splits except ' +
f'{DatasetSplitType.train} on {split_type}')
if logger.isEnabledFor(logging.DEBUG):
if outcomes is not None:
logger.debug(f'outcomes: {outcomes.shape}')
if labels is not None:
logger.debug(f'labels: {labels.shape}')
loss, labels, outcomes, outputs = self.torch_config.to_cpu_deallocate(
loss, labels, outcomes, seq_out.outputs)
return loss, labels, outcomes, outputs | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/sequence.py | sequence.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
from typing import List, Callable, Tuple, Any, Union
import sys
import gc
import logging
import itertools as it
from itertools import chain
from io import TextIOBase, StringIO
import random as rand
from pathlib import Path
import torch
from torch import nn
from tqdm import tqdm
from zensols.util import time
from zensols.config import Configurable, ConfigFactory, Writable
from zensols.persist import (
Deallocatable,
persisted, PersistedWork, PersistableContainer,
Stash, UnionStash,
)
from zensols.dataset import SplitStashContainer, DatasetSplitStash
from zensols.deeplearn import (
ModelError, EarlyBailError,
TorchConfig, DatasetSplitType, NetworkSettings
)
from zensols.deeplearn.result import (
EpochResult, ModelResult, ModelSettings, ModelResultManager,
)
from zensols.deeplearn.batch import BatchStash, Batch
from . import (
ModelResourceFactory, BaseNetworkModule,
ModelManager, UpdateAction,
BatchIterator, TrainManager,
)
# default message logger
logger = logging.getLogger(__name__ + '.status')
# logger for messages, which is active when the progress bar is not
progress_logger = logging.getLogger(__name__ + '.progress')
@dataclass
class ModelExecutor(PersistableContainer, Deallocatable, Writable):
"""This class creates and uses a network to train, validate and test the
model. This class is either configured using a
:class:`~zensols.config.factory.ConfigFactory` or is unpickled with
:class:`.ModelManager`. If the later, it's from a previously trained (and
possibly tested) state.
Typically, after creating a nascent instance, :meth:`train` is called to
train the model. This returns the results, but the results are also
available via the :class:`ResultManager` using the :obj:`model_manager`
property. To load previous results, use
``executor.result_manager.load()``.
During training, the training set is used to train the weights of the model
provided by the executor in the :obj:`model_settings`, then validated using
the validation set. When the validation loss is minimized, the following
is saved to disk:
* Settings: :obj:`net_settings`, :obj:`model_settings`,
* the model weights,
* the results of the training and validation thus far,
* the entire configuration (which is later used to restore the
executor),
* random seed information, which includes Python, Torch and GPU random
state.
After the model is trained, you can immediately test the model with
:meth:`test`. To be more certain of being able to reproduce the same
results, it is recommended to load the model with
``model_manager.load_executor()``, which loads the last instance of the
model that produced a minimum validation loss.
:see: :class:`.ModelExecutor`
:see: :class:`.NetworkSettings`
:see: :class:`zensols.deeplearn.model.ModelSettings`
"""
ATTR_EXP_META = ('model_settings',)
config_factory: ConfigFactory = field()
"""The configuration factory that created this instance."""
config: Configurable = field()
"""The configuration used in the configuration factory to create this
instance.
"""
name: str = field()
"""The name given in the configuration."""
model_settings: ModelSettings = field()
"""The configuration of the model."""
net_settings: NetworkSettings = field()
"""The settings used to configure the network."""
dataset_stash: DatasetSplitStash = field()
"""The split data set stash that contains the ``BatchStash``, which
contains the batches on which to train and test.
"""
dataset_split_names: List[str] = field()
"""The list of split names in the ``dataset_stash`` in the order: train,
validation, test (see :meth:`_get_dataset_splits`)
"""
result_path: Path = field(default=None)
"""If not ``None``, a path to a directory where the results are to be
dumped; the directory will be created if it doesn't exist when the results
are generated.
"""
update_path: Path = field(default=None)
"""The path to check for commands/updates to make while training. If this is
set, and the file exists, then it is parsed as a JSON file. If the file
cannot be parsed, or 0 size etc., then the training is (early) stopped.
If the file can be parsed, and there is a single ``epoch`` dict entry, then
the current epoch is set to that value.
"""
intermediate_results_path: Path = field(default=None)
"""If this is set, then save the model and results to this path after
validation for each training epoch.
"""
progress_bar: bool = field(default=False)
"""Create text/ASCII based progress bar if ``True``."""
progress_bar_cols: int = field(default=None)
"""The number of console columns to use for the text/ASCII based progress
bar.
"""
def __post_init__(self):
super().__init__()
if not isinstance(self.dataset_stash, DatasetSplitStash) and False:
raise ModelError('Expecting type DatasetSplitStash but ' +
f'got {self.dataset_stash.__class__}')
self._model = None
self._dealloc_model = False
self.model_result: ModelResult = None
self.batch_stash.delegate_attr: bool = True
self._criterion_optimizer_scheduler = PersistedWork(
'_criterion_optimizer_scheduler', self)
self._result_manager = PersistedWork('_result_manager', self)
self._train_manager = PersistedWork('_train_manager', self)
self.cached_batches = {}
self.debug = False
@property
def batch_stash(self) -> DatasetSplitStash:
"""The stash used to obtain the data for training and testing. This
stash should have a training, validation and test splits. The names of
these splits are given in the ``dataset_split_names``.
"""
return self.dataset_stash.split_container
@property
def feature_stash(self) -> Stash:
"""The stash used to generate the feature, which is not to be confused
with the batch source stash``batch_stash``.
"""
return self.batch_stash.split_stash_container
@property
def torch_config(self) -> TorchConfig:
"""Return the PyTorch configuration used to convert models and data
(usually GPU) during training and test.
"""
return self.batch_stash.model_torch_config
@property
@persisted('_result_manager')
def result_manager(self) -> ModelResultManager:
"""Return the manager used for controlling the life cycle of the results
generated by this executor.
"""
if self.result_path is not None:
return self._create_result_manager(self.result_path)
def _create_result_manager(self, path: Path) -> ModelResultManager:
return ModelResultManager(
name=self.model_settings.model_name, path=path,
model_path=self.model_settings.path)
@property
@persisted('_model_manager')
def model_manager(self) -> ModelManager:
"""Return the manager used for controlling the train of the model.
"""
model_path = self.model_settings.path
return ModelManager(model_path, self.config_factory, self.name)
@property
@persisted('_batch_iterator')
def batch_iterator(self) -> BatchIterator:
"""The train manager that assists with the training process.
"""
resolver = self.config_factory.class_resolver
batch_iter_class_name = self.model_settings.batch_iteration_class_name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'batch_iteration: {batch_iter_class_name}')
batch_iter_class = resolver.find_class(batch_iter_class_name)
batch_iter = batch_iter_class(self, logger)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'batch_iter={batch_iter}')
return batch_iter
@property
def debug(self) -> Union[bool, int]:
return self._debug
@debug.setter
def debug(self, debug: Union[bool, int]):
self._debug = debug
self.batch_iterator.debug = debug
@property
@persisted('_train_manager')
def train_manager(self) -> TrainManager:
"""Return the train manager that assists with the training process.
"""
return TrainManager(
logger, progress_logger, self.update_path,
self.model_settings.max_consecutive_increased_count)
def _weight_reset(self, m):
if hasattr(m, 'reset_parameters') and callable(m.reset_parameters):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'resetting parameters on {m}')
m.reset_parameters()
def reset(self):
"""Reset the executor's to it's nascent state.
"""
if logger.isEnabledFor(logging.INFO):
logger.info('resetting executor')
self._criterion_optimizer_scheduler.clear()
self._deallocate_model()
def load(self) -> nn.Module:
"""Clear all results and trained state and reload the last trained model
from the file system.
:return: the model that was loaded and registered in this instance of
the executor
"""
if logger.isEnabledFor(logging.INFO):
logger.info('reloading model weights')
self._deallocate_model()
self.model_manager._load_model_optim_weights(self)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'copied model to {self.model.device}')
return self.model
def deallocate(self):
super().deallocate()
self._deallocate_model()
self.deallocate_batches()
self._try_deallocate(self.dataset_stash)
self._deallocate_settings()
self._criterion_optimizer_scheduler.deallocate()
self._result_manager.deallocate()
self.model_result = None
def _deallocate_model(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('dealloc model: model exists/dealloc: ' +
f'{self._model is not None}/{self._dealloc_model}')
if self._model is not None and self._dealloc_model:
self._try_deallocate(self._model)
self._model = None
def _deallocate_settings(self):
self.model_settings.deallocate()
self.net_settings.deallocate()
def deallocate_batches(self):
set_of_ds_sets = self.cached_batches.values()
ds_sets = chain.from_iterable(set_of_ds_sets)
batches = chain.from_iterable(ds_sets)
for batch in batches:
batch.deallocate()
self.cached_batches.clear()
@property
def model_exists(self) -> bool:
"""Return whether the executor has a model.
:return: ``True`` if the model has been trained or loaded
"""
return self._model is not None
@property
def model(self) -> BaseNetworkModule:
"""Get the PyTorch module that is used for training and test.
"""
if self._model is None:
raise ModelError('No model, is populated; use \'load\'')
return self._model
@model.setter
def model(self, model: BaseNetworkModule):
"""Set the PyTorch module that is used for training and test.
"""
self._set_model(model, False, True)
def _set_model(self, model: BaseNetworkModule,
take_owner: bool, deallocate: bool):
if logger.isEnabledFor(level=logging.DEBUG):
logger.debug(f'setting model: {type(model)}')
if deallocate:
self._deallocate_model()
self._model = model
self._dealloc_model = take_owner
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting dealloc model: {self._dealloc_model}')
self._criterion_optimizer_scheduler.clear()
def _get_or_create_model(self) -> BaseNetworkModule:
if self._model is None:
self._dealloc_model = True
model = self._create_model()
self._model = model
else:
model = self._model
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created model as dealloc: {self._dealloc_model}')
return model
def _create_model(self) -> BaseNetworkModule:
"""Create the network model instance.
"""
mng: ModelManager = self.model_manager
model = mng._create_module(self.net_settings, self.debug)
if logger.isEnabledFor(logging.INFO):
logger.info(f'created model on {model.device} ' +
f'with {self.torch_config}')
return model
def _create_model_result(self) -> ModelResult:
res = ModelResult(
self.config,
f'{self.model_settings.model_name}: {ModelResult.get_num_runs()}',
self.model_settings, self.net_settings,
self.batch_stash.decoded_attributes)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating model result ({id(res)}): ' +
self.model_settings.model_name)
return res
@property
@persisted('_criterion_optimizer_scheduler')
def criterion_optimizer_scheduler(self) -> \
Tuple[nn.L1Loss, torch.optim.Optimizer, Any]:
"""Return the loss function and descent optimizer.
"""
criterion = self._create_criterion()
optimizer, scheduler = self._create_optimizer_scheduler()
return criterion, optimizer, scheduler
def _create_criterion(self) -> torch.optim.Optimizer:
"""Factory method to create the loss function and optimizer.
"""
resolver = self.config_factory.class_resolver
criterion_class_name = self.model_settings.criterion_class_name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'criterion: {criterion_class_name}')
criterion_class = resolver.find_class(criterion_class_name)
criterion = criterion_class()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'criterion={criterion}')
return criterion
def _create_optimizer_scheduler(self) -> Tuple[nn.L1Loss, Any]:
"""Factory method to create the optimizer and the learning rate scheduler (is
any).
"""
model = self.model
resolver = self.config_factory.class_resolver
optimizer_class_name = self.model_settings.optimizer_class_name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'optimizer: {optimizer_class_name}')
optimizer_class = resolver.find_class(optimizer_class_name)
if self.model_settings.optimizer_params is None:
optimizer_params = {}
else:
optimizer_params = dict(self.model_settings.optimizer_params)
optimizer_params['lr'] = self.model_settings.learning_rate
if issubclass(optimizer_class, ModelResourceFactory):
opt_call = optimizer_class()
optimizer_params['model'] = model
optimizer_params['executor'] = self
else:
opt_call = optimizer_class
optimizer = opt_call(model.parameters(), **optimizer_params)
scheduler_class_name = self.model_settings.scheduler_class_name
if scheduler_class_name is not None:
scheduler_class = resolver.find_class(scheduler_class_name)
scheduler_params = self.model_settings.scheduler_params
if scheduler_params is None:
scheduler_params = {}
else:
scheduler_params = dict(scheduler_params)
scheduler_params['optimizer'] = optimizer
if issubclass(scheduler_class, ModelResourceFactory):
# model resource factories are callable
sch_call = scheduler_class()
scheduler_params['executor'] = self
else:
sch_call = scheduler_class
scheduler = sch_call(**scheduler_params)
else:
scheduler = None
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'optimizer={optimizer}')
return optimizer, scheduler
def get_model_parameter(self, name: str):
"""Return a parameter of the model, found in ``model_settings``.
"""
return getattr(self.model_settings, name)
def set_model_parameter(self, name: str, value: Any):
"""Safely set a parameter of the model, found in ``model_settings``. This
makes the corresponding update in the configuration, so that when it is
restored (i.e for test) the parameters are consistent with the trained
model. The value is converted to a string as the configuration
representation stores all data values as strings.
*Important*: ``eval`` syntaxes are not supported, and probably not the
kind of values you want to set a parameters with this interface anyway.
:param name: the name of the value to set, which is the key in the
configuration file
:param value: the value to set on the model and the configuration
"""
self.config.set_option(
name, str(value), section=self.model_settings.name)
setattr(self.model_settings, name, value)
def get_network_parameter(self, name: str):
"""Return a parameter of the network, found in ``network_settings``.
"""
return getattr(self.net_settings, name)
def set_network_parameter(self, name: str, value: Any):
"""Safely set a parameter of the network, found in ``network_settings``. This
makes the corresponding update in the configuration, so that when it is
restored (i.e for test) the parameters are consistent with the trained
network. The value is converted to a string as the configuration
representation stores all data values as strings.
*Important*: ``eval`` syntaxes are not supported, and probably not the
kind of values you want to set a parameters with this interface anyway.
:param name: the name of the value to set, which is the key in the
configuration file
:param value: the value to set on the network and the configuration
"""
self.config.set_option(
name, str(value), section=self.net_settings.name)
setattr(self.net_settings, name, value)
def _to_iter(self, ds):
ds_iter = ds
if isinstance(ds_iter, Stash):
ds_iter = ds_iter.values()
return ds_iter
def _gc(self, level: int):
"""Invoke the Python garbage collector if ``level`` is high enough. The
*lower* the value of ``level``, the more often it will be run during
training, testing and validation.
:param level: if priority of the need to collect--the lower the more
its needed
"""
if level <= self.model_settings.gc_level:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('garbage collecting')
self._notify('gc_start')
with time('garbage collected', logging.DEBUG):
gc.collect()
self._notify('gc_end')
def _notify(self, event: str, context: Any = None):
"""Notify observers of events from this class.
"""
self.model_settings.observer_manager.notify(event, self, context)
def _train(self, train: List[Batch], valid: List[Batch]):
"""Train the network model and record validation and training losses. Every
time the validation loss shrinks, the model is saved to disk.
"""
n_epochs = self.model_settings.epochs
# create network model, loss and optimization functions
model = self._get_or_create_model()
model = self.torch_config.to(model)
self._model = model
if logger.isEnabledFor(logging.INFO):
logger.info(f'training model {type(model)} on {model.device} ' +
f'for {n_epochs} epochs using ' +
f'learning rate {self.model_settings.learning_rate}')
criterion, optimizer, scheduler = self.criterion_optimizer_scheduler
# create a second module manager for after epoch results
if self.intermediate_results_path is not None:
model_path = self.intermediate_results_path
intermediate_manager = self._create_result_manager(model_path)
intermediate_manager.file_pattern = '{prefix}.{ext}'
else:
intermediate_manager = None
train_manager = self.train_manager
action = UpdateAction.ITERATE_EPOCH
# set up graphical progress bar
exec_logger = logging.getLogger(__name__)
if self.progress_bar and \
(exec_logger.level == 0 or
exec_logger.level > logging.INFO) and \
(progress_logger.level == 0 or
progress_logger.level > logging.INFO):
pbar = tqdm(total=n_epochs, ncols=self.progress_bar_cols)
else:
pbar = None
train_manager.start(optimizer, scheduler, n_epochs, pbar)
self.model_result.train.start()
self.model_result.validation.start()
# epochs loop
while action != UpdateAction.STOP:
epoch: int = train_manager.current_epoch
train_epoch_result = EpochResult(epoch, DatasetSplitType.train)
valid_epoch_result = EpochResult(epoch, DatasetSplitType.validation)
if progress_logger.isEnabledFor(logging.INFO):
progress_logger.debug(f'training on epoch: {epoch}')
self.model_result.train.append(train_epoch_result)
self.model_result.validation.append(valid_epoch_result)
# train ----
# prep model for training and train
model.train()
train_epoch_result.start()
self._notify('train_start', {'epoch': epoch})
for batch in self._to_iter(train):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'training on batch: {batch.id}')
with time('trained batch', level=logging.DEBUG):
self.batch_iterator.iterate(
model, optimizer, criterion, batch,
train_epoch_result, DatasetSplitType.train)
self._gc(3)
self._notify('train_end', {'epoch': epoch})
train_epoch_result.end()
self._gc(2)
# validate ----
# prep model for evaluation and evaluate
ave_valid_loss = 0
model.eval()
valid_epoch_result.start()
self._notify('validation_start', {'epoch': epoch})
for batch in self._to_iter(valid):
# forward pass: compute predicted outputs by passing inputs
# to the model
with torch.no_grad():
loss = self.batch_iterator.iterate(
model, optimizer, criterion, batch,
valid_epoch_result, DatasetSplitType.validation)
ave_valid_loss += (loss.item() * batch.size())
self._gc(3)
self._notify('validation_end', {'epoch': epoch})
valid_epoch_result.end()
ave_valid_loss = ave_valid_loss / len(valid)
self._gc(2)
valid_loss_min, decreased = train_manager.update_loss(
valid_epoch_result, train_epoch_result, ave_valid_loss)
if decreased:
self.model_manager._save_executor(self)
if intermediate_manager is not None:
inter_res = self.model_result.get_intermediate()
intermediate_manager.save_text_result(inter_res)
intermediate_manager.save_plot_result(inter_res)
# look for indication of update or early stopping
status = train_manager.get_status()
action = status.action
val_losses = train_manager.validation_loss_decreases
if logger.isEnabledFor(logging.INFO):
logger.info('final minimum validation ' +
f'loss: {train_manager.valid_loss_min}, ' +
f'{val_losses} decreases')
if val_losses == 0:
logger.warn('no validation loss decreases encountered, ' +
'so there was no model saved; model can not be tested')
self.model_result.train.end()
self.model_result.validation.end()
self.model_manager._save_final_trained_results(self)
def _test(self, batches: List[Batch]):
"""Test the model on the test set. If a model is not given, it is unpersisted
from the file system.
"""
# create the loss and optimization functions
criterion, optimizer, scheduler = self.criterion_optimizer_scheduler
model = self.torch_config.to(self.model)
# track epoch progress
test_epoch_result = EpochResult(0, DatasetSplitType.test)
if logger.isEnabledFor(logging.INFO):
logger.info(f'testing model {type(model)} on {model.device}')
# in for some reason the model was trained but not tested, we'll load
# from the model file, which will have no train results (bad idea)
if self.model_result is None:
self.model_result = self._create_model_result()
self.model_result.reset(DatasetSplitType.test)
self.model_result.test.start()
self.model_result.test.append(test_epoch_result)
# prep model for evaluation
model.eval()
# run the model on test data
test_epoch_result.start()
for batch in self._to_iter(batches):
# forward pass: compute predicted outputs by passing inputs
# to the model
with torch.no_grad():
self.batch_iterator.iterate(
model, optimizer, criterion, batch,
test_epoch_result, DatasetSplitType.test)
self._gc(3)
test_epoch_result.end()
self._gc(2)
self.model_result.test.end()
def _preproces_training(self, ds_train: Tuple[Batch]):
"""Preprocess the training set, which for this method implementation, includes
a shuffle if configured in the model settings.
"""
self._notify('preprocess_training_start')
if self.model_settings.shuffle_training:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('shuffling training dataset')
# data sets are ordered with training as the first
rand.shuffle(ds_train)
self._notify('preprocess_training_end')
def _calc_batch_limit(self, src: Stash,
batch_limit: Union[int, float]) -> int:
if batch_limit <= 0:
raise ModelError(f'Batch limit must be positive: {batch_limit}')
if isinstance(batch_limit, float):
if batch_limit > 1.0:
raise ModelError('Batch limit must be less than 1 ' +
f'when a float: {batch_limit}')
vlim = round(len(src) * batch_limit)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('batch limit calculated as a percentage: ' +
f'{vlim} = {len(src)} * {batch_limit}')
else:
vlim = batch_limit
if isinstance(src, SplitStashContainer):
desc = f' for {src.split_name}'
else:
desc = ''
if logger.isEnabledFor(logging.INFO):
logger.info(f'using batch limit: {vlim}{desc}')
return vlim
def _prepare_datasets(self, batch_limit: Union[int, float],
to_deallocate: List[Batch],
ds_src: List[Stash]) -> List[List[Batch]]:
"""Return batches for each data set. The batches are returned per dataset as
given in :meth:`_get_dataset_splits`.
Return:
[(training batch 1..N), (validation batch 1..N), (test batch 1..N)]
"""
biter = self.model_settings.batch_iteration
cnt = 0
if logger.isEnabledFor(logging.INFO):
logger.info(f'preparing datasets using iteration: {biter}')
self._notify('prepare_datasets_start', biter)
if biter == 'gpu':
ds_dst = []
for src in ds_src:
vlim = self._calc_batch_limit(src, batch_limit)
cpu_batches = tuple(it.islice(src.values(), vlim))
gpu_batches = list(map(lambda b: b.to(), cpu_batches))
cnt += len(gpu_batches)
# the `to` call returns the same instance if the tensor is
# already on the GPU, so only deallocate batches copied over
for cpu_batch, gpu_batch in zip(cpu_batches, gpu_batches):
if cpu_batch is not gpu_batch:
to_deallocate.append(cpu_batch)
if not self.model_settings.cache_batches:
to_deallocate.extend(gpu_batches)
ds_dst.append(gpu_batches)
elif biter == 'cpu':
ds_dst = []
for src in ds_src:
vlim = self._calc_batch_limit(src, batch_limit)
batches = list(it.islice(src.values(), vlim))
cnt += len(batches)
if not self.model_settings.cache_batches:
to_deallocate.extend(batches)
ds_dst.append(batches)
elif biter == 'buffered':
ds_dst = ds_src
cnt = '?'
else:
raise ModelError(f'No such batch iteration method: {biter}')
self._notify('prepare_datasets_end', biter)
self._preproces_training(ds_dst[0])
return cnt, ds_dst
def _execute(self, sets_name: str, description: str,
func: Callable, ds_src: tuple) -> bool:
"""Either train or test the model based on method ``func``.
:param sets_name: the name of the data sets, which ``train`` or
``test``
:param func: the method to call to do the training or testing
:param ds_src: a tuple of datasets in a form such as ``(train,
validation, test)`` (see :meth:`_get_dataset_splits`)
:return: ``True`` if training/testing was successful, otherwise
`the an exception occured or early bail
"""
to_deallocate: List[Batch] = []
ds_dst: List[List[Batch]] = None
batch_limit = self.model_settings.batch_limit
biter = self.model_settings.batch_iteration
if self.model_settings.cache_batches and biter == 'buffered':
raise ModelError('Can not cache batches for batch ' +
'iteration setting \'buffered\'')
if logger.isEnabledFor(logging.INFO):
logger.info(f'batch iteration: {biter}, limit: {batch_limit}' +
f', caching: {self.model_settings.cache_batches}'
f', cached: {len(self.cached_batches)}')
self._notify('execute_start', sets_name)
self._gc(1)
ds_dst = self.cached_batches.get(sets_name)
if ds_dst is None:
cnt = 0
with time('loaded {cnt} batches'):
cnt, ds_dst = self._prepare_datasets(
batch_limit, to_deallocate, ds_src)
if self.model_settings.cache_batches:
self.cached_batches[sets_name] = ds_dst
if logger.isEnabledFor(logging.INFO):
logger.info('train/validation sets: ' +
f'{" ".join(map(lambda l: str(len(l)), ds_dst))}')
try:
with time(f'executed {sets_name}'):
func(*ds_dst)
if description is not None:
res_name = f'{self.model_result.index}: {description}'
self.model_result.name = res_name
return True
except EarlyBailError as e:
logger.warning(f'<{e}>')
self.reset()
return False
finally:
self._notify('execute_end', sets_name)
self._train_manager.clear()
if logger.isEnabledFor(logging.INFO):
logger.info(f'deallocating {len(to_deallocate)} batches')
for batch in to_deallocate:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocating: {batch}')
batch.deallocate()
self._gc(1)
self.torch_config.empty_cache()
def _get_dataset_splits(self) -> Tuple[BatchStash]:
"""Return a stash, one for each respective data set tracked by this
executor.
"""
def map_split(n: str) -> DatasetSplitStash:
s = splits.get(n)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'split: {n}={len(s)}')
if s is None:
raise ModelError(
f"No split '{n}' in {self.dataset_stash.split_names}, " +
f'executor splits: {self.dataset_split_names}')
return s
splits = self.dataset_stash.splits
return tuple(map(map_split, self.dataset_split_names))
def train(self, description: str = None) -> ModelResult:
"""Train the model.
"""
self.model_result = self._create_model_result()
train, valid, _ = self._get_dataset_splits()
self._execute('train', description, self._train, (train, valid))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'trained model result: {self.model_result}')
return self.model_result
def test(self, description: str = None) -> ModelResult:
"""Test the model.
"""
train, valid, test = self._get_dataset_splits()
if self.model_result is None:
logger.warning('no results found--loading')
self.model_result = self.result_manager.load()
self._execute('test', description, self._test, (test,))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'tested model result: {self.model_result}')
return self.model_result
def train_production(self, description: str = None) -> ModelResult:
"""Train and test the model on the training and test datasets. This is
used for a "production" model that is used for some purpose other than
evaluation.
"""
self.model_result = self._create_model_result()
train, valid, test = self._get_dataset_splits()
train = UnionStash((train, test))
self._execute('train production', description,
self._train, (train, valid))
return self.model_result
def predict(self, batches: List[Batch]) -> ModelResult:
"""Create predictions on ad-hoc data.
:param batches: contains the data (X) on which to predict
:return: the results of the predictions
"""
for batch in batches:
self.batch_stash.populate_batch_feature_mapping(batch)
self._test(batches)
return self.model_result.test
def write_model(self, depth: int = 0, writer: TextIOBase = sys.stdout):
model = self._get_or_create_model()
sio = StringIO()
sp = self._sp(depth + 1)
nl = '\n'
print(model, file=sio)
self._write_line('model:', depth, writer)
writer.write(nl.join(map(lambda s: sp + s, sio.getvalue().split(nl))))
def write_settings(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line('network settings:', depth, writer)
self._write_dict(self.net_settings.asdict(), depth + 1, writer)
self._write_line('model settings:', depth, writer)
self._write_dict(self.model_settings.asdict(), depth + 1, writer)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_settings: bool = False, include_model: bool = False):
sp = self._sp(depth)
writer.write(f'{sp}model: {self.model_settings.model_name}\n')
writer.write(f'{sp}feature splits:\n')
self.feature_stash.write(depth + 1, writer)
writer.write(f'{sp}batch splits:\n')
self.dataset_stash.write(depth + 1, writer)
if include_settings:
self.write_settings(depth, writer)
if include_model:
self.write_model(depth, writer) | zensols.deeplearn | /zensols.deeplearn-1.8.1-py3-none-any.whl/zensols/deeplearn/model/executor.py | executor.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Tuple
from dataclasses import dataclass, field
import logging
import itertools as it
import torch
from torch import Tensor
from torch import nn
from zensols.deeplearn import DropoutNetworkSettings
from zensols.deeplearn.batch import Batch
from zensols.deeplearn.model import (
SequenceNetworkModule, SequenceNetworkContext, SequenceNetworkOutput
)
from zensols.deeplearn.layer import DeepLinearNetworkSettings, DeepLinear
from zensols.deepnlp.layer import (
EmbeddingNetworkSettings, EmbeddingNetworkModule, EmbeddingLayer,
)
from . import (
TokenizedDocument, TransformerEmbedding,
TransformerNominalFeatureVectorizer
)
logger = logging.getLogger(__name__)
class TransformerEmbeddingLayer(EmbeddingLayer):
"""A transformer (i.e. BERT) embedding layer. This class generates embeddings
on a per sentence basis. See the initializer documentation for
configuration requirements.
"""
MODULE_NAME = 'transformer embedding'
def __init__(self, *args, embed_model: TransformerEmbedding, **kwargs):
"""Initialize with an embedding model. This embedding model must configured
with :obj:`.TransformerEmbedding.output` to ``last_hidden_state``.
:param embed_model: used to generate the transformer (i.e. BERT)
embeddings
"""
super().__init__(
*args, embedding_dim=embed_model.vector_dimension, **kwargs)
self.embed_model = embed_model
if self.embed_model.trainable:
self.emb = embed_model.model
def deallocate(self):
if not self.embed_model.cache:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocate: {self.__class__}')
super().deallocate()
def _forward_trainable(self, doc: Tensor) -> Tensor:
tok_doc: TokenizedDocument = TokenizedDocument.from_tensor(doc)
x = self.embed_model.transform(tok_doc)
tok_doc.deallocate()
if logger.isEnabledFor(logging.DEBUG):
self._shape_debug('embedding', x)
return x
def forward(self, x: Tensor) -> Tensor:
self._shape_debug('transformer input', x)
if self.embed_model.trainable:
x = self._forward_trainable(x)
self._shape_debug('transform', x)
return x
@dataclass
class TransformerSequenceNetworkSettings(EmbeddingNetworkSettings,
DropoutNetworkSettings):
"""Settings configuration for :class:`.TransformerSequence`.
"""
decoder_settings: DeepLinearNetworkSettings = field()
"""The decoder feed forward network."""
def get_module_class_name(self) -> str:
return __name__ + '.TransformerSequence'
class TransformerSequence(EmbeddingNetworkModule, SequenceNetworkModule):
"""A sequence based model for token classification use HuggingFace
transformers.
"""
MODULE_NAME = 'transformer sequence'
def __init__(self, net_settings: TransformerSequenceNetworkSettings,
sub_logger: logging.Logger = None):
super().__init__(net_settings, sub_logger or logger)
ns = self.net_settings
ds = ns.decoder_settings
ds.in_features = self.embedding_output_size
self._n_labels = ds.out_features
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'linear settings: {ds}')
self.decoder = DeepLinear(ds, self.logger)
self._init_range = 0.02
self.decoder.apply(self._init_weights)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
# taken directly from HuggingFace
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self._init_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self._init_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def deallocate(self):
super().deallocate()
self.decoder.deallocate()
def _to_lists(self, tdoc: TokenizedDocument, sents: Tensor) -> \
Tuple[List[List[int]]]:
"""Convert a document of sentences from a tensor to list of lists of nominial
labels.
:param tdoc: the tokenzied document representing this batch
:param sents: the sentences to convert to the list of lists, with rows
as sentences and columns as word piece label
:return: of list of lists with each sublist represents a sentence
"""
offsets: Tensor = tdoc.offsets
preds: List[List[int]] = []
n_sents: int = sents.size(1)
labels: List[List[int]] = [] if sents.size(0) > 1 else None
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'to collapse: {sents.shape}, ' +
f'offsets: {offsets.shape}')
for six in range(n_sents):
last = None
tixes = []
for wix, tix in enumerate(offsets[six]):
if tix >= 0 and last != tix:
last = tix
tixes.append(wix)
sl = sents[:, six, tixes]
preds.append(sl[0].tolist())
if labels is not None:
labels.append(sl[1].tolist())
return preds, labels
def _debug_preds(self, labels: Tensor, preds: List[List[str]],
tdoc: TokenizedDocument, batch: Batch, limit: int = 5):
vocab: Dict[str, int] = self.embedding.embed_model.resource.tokenizer.vocab
vocab = {vocab[k]: k for k in vocab.keys()}
input_ids = tdoc.input_ids
fsents = tuple(map(lambda d: d.doc.sents[0], batch.data_points))
for six, pred in enumerate(it.islice(preds, limit)):
print(fsents[six])
print('sent', ', '.join(
map(lambda ix: vocab[ix.item()], input_ids[six])))
print('predictions:', pred)
print('labels:', labels[six])
print('-' * 10)
def _forward(self, batch: Batch, context: SequenceNetworkContext) -> \
SequenceNetworkOutput:
DEBUG = False
if DEBUG and self.logger.isEnabledFor(logging.DEBUG):
for dp in batch.data_points:
self.logger.debug(f'data point: {dp}')
emb: Tensor = super()._forward(batch)
vec: TransformerNominalFeatureVectorizer = \
batch.get_label_feature_vectorizer()
pad_label: int = vec.pad_label
labels: Tensor = batch.get_labels()
tdoc: Tensor = self.get_embedding_tensors(batch)[0]
tdoc = TokenizedDocument.from_tensor(tdoc)
attention_mask: Tensor = tdoc.attention_mask
try:
self._shape_debug('labels', labels)
self._shape_debug('attention mask', attention_mask)
self._shape_debug('embedding', emb)
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'tokenized doc: {tdoc}, len: {len(tdoc)}')
emb = self._forward_dropout(emb)
self._shape_debug('dropout', emb)
logits = self.decoder(emb)
self._shape_debug('logits', logits)
preds = logits.argmax(dim=-1)
# labels are missing when predicting
if labels is None:
loss = batch.torch_config.singleton([0], dtype=torch.float32)
else:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self._n_labels)
active_labels = torch.where(
active_loss, labels.view(-1),
torch.tensor(pad_label).type_as(labels)
)
self._shape_debug('active_logits', active_logits)
self._shape_debug('active_labels', active_labels)
loss = context.criterion(active_logits, active_labels)
labels = labels.squeeze(-1)
if DEBUG:
sz = 5
print('active labels', active_labels.tolist()[:sz])
print(active_labels.shape)
print('active logits', active_logits.tolist()[:sz])
print(active_logits.shape)
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'loss: {loss}')
self._shape_debug('predictions', preds)
if labels is None:
to_collapse = preds.unsqueeze(0)
else:
to_collapse = torch.stack((preds, labels))
preds, mapped_labels = self._to_lists(tdoc, to_collapse)
out = SequenceNetworkOutput(
preds, loss, labels=mapped_labels, outputs=logits)
if DEBUG:
self._debug_preds(labels, preds, tdoc, batch)
finally:
tdoc.deallocate()
return out | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/layer.py | layer.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, Type, Tuple
from dataclasses import dataclass, field, InitVar
import logging
import collections
from io import TextIOBase
from functools import reduce
from pathlib import Path
from torch import Tensor
from transformers import PreTrainedTokenizer, PreTrainedModel
from zensols.util.time import time
from zensols.introspect import ClassImporter
from zensols.config import Dictable
from zensols.persist import persisted, PersistedWork, PersistableContainer
from zensols.deeplearn import TorchConfig, DeepLearnError
logger = logging.getLogger(__name__)
class TransformerError(DeepLearnError):
"""Raised for any transformer specific errors in this and child modules of
the parent.
"""
pass
@dataclass
class TransformerResource(PersistableContainer, Dictable):
"""A container base class that allows configuration and creates various
huggingface models.
"""
name: str = field()
"""The name of the model given by the configuration. Used for debugging.
"""
torch_config: TorchConfig = field()
"""The config device used to copy the embedding data."""
model_id: str = field()
"""The ID of the model (i.e. ``bert-base-uncased``). If this is not set, is
derived from the ``model_name`` and ``case``.
Token embeding using :class:`.TransformerEmbedding` as been tested with:
* ``bert-base-cased``
* ``bert-large-cased``
* ``roberta-base``
* ``distilbert-base-cased``
:see: `Pretrained Models <https://huggingface.co/transformers/pretrained_models.html>`_
"""
cased: bool = field(default=None)
"""``True`` for case sensitive models, ``False`` (default) otherwise. The
negated value of it is also used as the ``do_lower_case`` parameter in the
``*.from_pretrained`` calls to huggingface transformers.
"""
trainable: bool = field(default=False)
"""If ``False`` the weights on the transformer model are frozen and the use of
the model (including in subclasses) turn off autograd when executing..
"""
args: Dict[str, Any] = field(default_factory=dict)
"""Additional arguments to pass to the `from_pretrained` method for the
tokenizer and the model.
"""
tokenizer_args: Dict[str, Any] = field(default_factory=dict)
"""Additional arguments to pass to the `from_pretrained` method for the
tokenizer.
"""
model_args: Dict[str, Any] = field(default_factory=dict)
"""Additional arguments to pass to the `from_pretrained` method for the
model.
"""
model_class: str = field(default='transformers.AutoModel')
"""The model fully qualified class used to create models with the
``from_pretrained`` static method.
"""
tokenizer_class: str = field(default='transformers.AutoTokenizer')
"""The model fully qualified class used to create tokenizers with the
``from_pretrained`` static method.
"""
cache: InitVar[bool] = field(default=False)
"""When set to ``True`` cache a global space model using the parameters from
the first instance creation.
"""
cache_dir: Path = field(default=None)
"""The directory that is contains the BERT model(s)."""
def __post_init__(self, cache: bool):
super().__init__()
if self.cache_dir is not None and not self.cache_dir.exists():
if logger.isEnabledFor(logging.DEBUG):
logger.info(f'creating cache directory: {self.cache_dir}')
self.cache_dir.mkdir(parents=True, exist_ok=True)
if self.cased is None:
if self.model_id.find('uncased') >= 0:
self.cased = False
else:
logger.info("'cased' not given--assuming a cased model")
self.cased = True
self._tokenizer = PersistedWork('_tokenzier', self, cache)
self._model = PersistedWork('_model', self, cache)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'id: {self.model_id}, cased: {self.cased}, ' +
f'cached: {self.cache}')
@property
def cached(self) -> bool:
"""If the model is cached.
:see: :obj:`cache`
"""
return self._tokenizer.cache_global
@cached.setter
def cached(self, cached: bool):
"""If the model is cached.
:see: :obj:`cache`
"""
self._tokenizer.cache_global = cached
self._model.cache_global = cached
def _is_roberta(self):
return self.model_id.find('roberta') > -1
def _create_tokenizer_class(self) -> Type[PreTrainedTokenizer]:
"""Create the huggingface class used for tokenizer."""
ci = ClassImporter(self.tokenizer_class)
return ci.get_class()
@property
@persisted('_tokenizer')
def tokenizer(self) -> PreTrainedTokenizer:
params = {'do_lower_case': not self.cased}
if self.cache_dir is not None:
params['cache_dir'] = str(self.cache_dir.absolute())
params.update(self.args)
params.update(self.tokenizer_args)
if self._is_roberta():
if not self.cased:
raise TransformerError('RoBERTa only supports cased models')
params['add_prefix_space'] = True
cls = self._create_tokenizer_class()
return cls.from_pretrained(self.model_id, **params)
def _create_model_class(self) -> Type[PreTrainedModel]:
ci = ClassImporter(self.model_class)
return ci.get_class()
@property
@persisted('_model')
def model(self) -> PreTrainedModel:
# load pre-trained model (weights)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading model: {self.model_id}')
params = {}
if self.cache_dir is not None:
params['cache_dir'] = str(self.cache_dir.absolute())
params.update(self.args)
params.update(self.model_args)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating model using: {params}')
with time(f'loaded model from pretrained {self.model_id}'):
cls = self._create_model_class()
model = cls.from_pretrained(self.model_id, **params)
# put the model in `evaluation` mode, meaning feed-forward operation.
if self.trainable:
logger.debug('model is trainable')
else:
logger.debug('turning off grad for non-trainable transformer')
model.eval()
for param in model.base_model.parameters():
param.requires_grad = False
model = self.torch_config.to(model)
return model
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
dct = super()._from_dictable(*args, **kwargs)
secs = collections.OrderedDict()
name: str
param: Tensor
n_total_params = 0
for name, param in self.model.named_parameters():
prefix = name[:name.find('.')]
layer: Dict[str, Tuple[int, int]] = secs.get(prefix)
if layer is None:
layer = collections.OrderedDict()
secs[prefix] = layer
shape: Tuple[int, int] = tuple(param.shape)
n_total_params += reduce(lambda x, y: x * y, shape)
layer[name] = shape
dct['model'] = {'sections': secs, 'params': n_total_params}
return dct
def _write_dict(self, data: dict, depth: int, writer: TextIOBase):
is_param = False
if len(data) > 0:
val = next(iter(data.values()))
is_param = (isinstance(val, tuple) and len(val) == 2)
super()._write_dict(data, depth, writer, is_param)
def clear(self):
self._tokenizer.clear()
self._model.clear()
def __str__(self) -> str:
return f'{self.name}: id: {self.model_id}, cased: {self.cased}' | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/resource.py | resource.py |
__author__ = 'Paul Landes'
from typing import Optional, Iterable
from torch import nn
from torch.nn.parameter import Parameter
import logging
from torch.optim import AdamW, Optimizer
from transformers import get_scheduler
from zensols.deeplearn.model import ModelResourceFactory, ModelExecutor
logger = logging.getLogger(__name__)
class TransformerAdamFactory(ModelResourceFactory):
def __call__(self, params: Iterable[Parameter],
model: nn.Module, executor: ModelExecutor,
*args, weight_decay: float = 0.0, **kwargs):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using weight decay: {weight_decay}')
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{
"params": [p for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
return AdamW(optimizer_grouped_parameters, *args, **kwargs)
class TransformerSchedulerFactory(ModelResourceFactory):
"""Unified API to get any scheduler from its name. This simply calls
:func:`transformers.get_scheduler` and calculates ``num_training_steps`` as
``epochs * batch_size``.
Documentation taken directly from ``get_scheduler`` function in the
`PyTorch source tree <https://github.com/huggingface/transformers/blob/4ba203d9d3ab5f6ae8def490cbea44b61798fc54/src/transformers/optimization.py#L229>`_.
"""
def __call__(self, name: str,
optimizer: Optimizer,
executor: ModelExecutor,
num_warmup_steps: Optional[int] = None,
num_training_steps: Optional[int] = None,
split_name: Optional[str] = 'train'):
"""
Args:
name (:obj:`str` or `:obj:`SchedulerType`):
The name of the scheduler to use.
optimizer (:obj:`torch.optim.Optimizer`):
The optimizer that will be used during training.
num_warmup_steps (:obj:`int`, `optional`):
The number of warmup steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
num_training_steps (:obj:`int`, `optional`):
The number of training steps to do. This is not required by all schedulers (hence the argument being
optional), the function will raise an error if it's unset and the scheduler type requires it.
split_name (:obj:`str`, `optional`):
The name of the split to use to count training data points for the calculation of ``num_training_steps``
when ``None``.
"""
n_epochs = executor.model_settings.epochs
n_train_batches = len(executor.dataset_stash.splits[split_name])
if num_training_steps is None:
num_training_steps = n_epochs * n_train_batches
if isinstance(num_warmup_steps, float):
num_warmup_steps = int(num_warmup_steps * num_training_steps)
if logger.isEnabledFor(logging.INFO):
logger.info(f'epochs: {n_epochs}, batches: {n_train_batches}, ' +
f'training steps: {num_training_steps}, ' +
f'warm up steps: {num_warmup_steps}')
return get_scheduler(name, optimizer, num_warmup_steps,
num_training_steps) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/optimizer.py | optimizer.py |
from __future__ import annotations
"""Contains classes that are used to vectorize documents in to transformer
embeddings.
"""
__author__ = 'Paul Landes'
from typing import Tuple, List, Dict, Union, Sequence, Any
from dataclasses import dataclass, field
from abc import ABCMeta
import logging
from itertools import chain
import torch
from torch import Tensor
from zensols.persist import persisted, Deallocatable
from zensols.deeplearn.vectorize import (
VectorizerError, TensorFeatureContext, EncodableFeatureVectorizer,
FeatureContext, AggregateEncodableFeatureVectorizer,
NominalEncodedEncodableFeatureVectorizer, MaskFeatureVectorizer,
)
from zensols.nlp import FeatureDocument, FeatureSentence
from zensols.deepnlp.vectorize import (
EmbeddingFeatureVectorizer, TextFeatureType, FeatureDocumentVectorizer
)
from . import (
TransformerEmbedding, TransformerResource,
TransformerDocumentTokenizer, TokenizedDocument, TokenizedFeatureDocument,
)
logger = logging.getLogger(__name__)
class TransformerFeatureContext(FeatureContext, Deallocatable):
"""A vectorizer feature contex used with
:class:`.TransformerEmbeddingFeatureVectorizer`.
"""
def __init__(self, feature_id: str,
document: Union[TokenizedDocument, FeatureDocument]):
"""
:params feature_id: the feature ID used to identify this context
:params document: document used to create the transformer embeddings
"""
super().__init__(feature_id)
Deallocatable.__init__(self)
self._document = document
def get_document(self, vectorizer: TransformerFeatureVectorizer) -> \
TokenizedDocument:
document = self._document
if isinstance(document, FeatureDocument):
document = vectorizer.tokenize(document)
return document
def get_feature_document(self) -> FeatureDocument:
if not isinstance(self._document, FeatureDocument):
raise VectorizerError(
f'Expecting FeatureDocument but got: {type(self._document)}')
return self._document
def deallocate(self):
super().deallocate()
self._try_deallocate(self._document)
del self._document
@dataclass
class TransformerFeatureVectorizer(EmbeddingFeatureVectorizer,
FeatureDocumentVectorizer):
"""Base class for classes that vectorize transformer models. This class also
tokenizes documents.
"""
is_labeler: bool = field(default=False)
"""If ``True``, make this a labeling specific vectorizer. Otherwise, certain
layers will use the output of the vectorizer as features rather than the
labels.
"""
encode_tokenized: bool = field(default=False)
"""Whether to tokenize the document on encoding. Set this to ``True`` only if
the huggingface model ID (i.e. ``bert-base-cased``) will not change after
vectorization/batching.
Setting this to ``True`` tells the vectorizer to tokenize during encoding,
and thus will speed experimentation by providing the tokenized tensors to
the model directly.
"""
def __post_init__(self):
if self.encode_transformed and not self.encode_tokenized:
raise VectorizerError("""\
Can not transform while not tokenizing on the encoding side. Either set
encode_transformed to False or encode_tokenized to True.""")
def _assert_token_output(self, expected: str = 'last_hidden_state'):
if self.embed_model.output != expected:
raise VectorizerError(f"""\
Expanders only work at the token level, so output such as \
`{expected}`, which provides an output for each token in the \
transformer embedding, is required, got: {self.embed_model.output}""")
@property
def feature_type(self) -> TextFeatureType:
if self.is_labeler:
return TextFeatureType.NONE
else:
return self.FEATURE_TYPE
@property
def word_piece_token_length(self) -> int:
return self.embed_model.tokenizer.word_piece_token_length
def _get_shape(self) -> Tuple[int, int]:
return self.word_piece_token_length, self.embed_model.vector_dimension
def _get_tokenizer(self) -> TransformerDocumentTokenizer:
emb: TransformerEmbedding = self.embed_model
return emb.tokenizer
def _get_resource(self) -> TransformerResource:
return self._get_tokenizer().resource
def _create_context(self, doc: FeatureDocument) -> \
TransformerFeatureContext:
if self.encode_tokenized:
doc = self.tokenize(doc).detach()
return TransformerFeatureContext(self.feature_id, doc)
def _context_to_document(self, ctx: TransformerFeatureContext) -> \
TokenizedDocument:
return ctx.get_document(self)
def tokenize(self, doc: FeatureDocument) -> TokenizedFeatureDocument:
"""Tokenize the document in to a token document used by the encoding phase.
:param doc: the document to be tokenized
"""
emb: TransformerEmbedding = self.embed_model
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'synthesized document: {doc}')
return emb.tokenize(doc)
@dataclass
class TransformerEmbeddingFeatureVectorizer(TransformerFeatureVectorizer):
"""A feature vectorizer used to create transformer (i.e. BERT) embeddings. The
class uses the :obj:`.embed_model`, which is of type
:class:`.TransformerEmbedding`.
Note the encoding input ideally are sentences shorter than 512 tokens.
However, this vectorizer can accommodate both :class:`.FeatureSentence` and
:class:`.FeatureDocument` instances.
"""
DESCRIPTION = 'transformer document embedding'
FEATURE_TYPE = TextFeatureType.EMBEDDING
def __post_init__(self):
super().__post_init__()
if self.encode_transformed and self.embed_model.trainable:
# once the transformer last hidden state is dumped during encode
# the parameters are lost, which are needed to train the model
# properly
raise VectorizerError('a trainable model can not encode ' +
'transformed vectorized features')
def _encode(self, doc: FeatureDocument) -> FeatureContext:
return self._create_context(doc)
def _decode(self, context: TransformerFeatureContext) -> Tensor:
emb: TransformerEmbedding = self.embed_model
if logger.isEnabledFor(logging.INFO):
logger.info(f'decoding {context} with trainable: {emb.trainable}')
tok_doc: TokenizedDocument
arr: Tensor
if emb.trainable:
doc: TokenizedDocument = self._context_to_document(context)
arr = doc.tensor
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'passing through tensor: {arr.shape}')
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'transforming doc: {context}')
doc: TokenizedDocument = self._context_to_document(context)
arr = emb.transform(doc)
if logger.isEnabledFor(logging.INFO):
logger.info(f'decoded trans layer {arr.shape} on {arr.device}')
return arr
class TransformerExpanderFeatureContext(TransformerFeatureContext):
"""A vectorizer feature context used with
:class:`.TransformerExpanderFeatureVectorizer`.
"""
contexts: Tuple[FeatureContext] = field()
"""The subordinate contexts."""
def __init__(self, feature_id: str, contexts: Tuple[FeatureContext],
document: Union[TokenizedDocument, FeatureDocument]):
"""
:params feature_id: the feature ID used to identify this context
:params contexts: subordinate contexts given to
:class:`.MultiFeatureContext`
:params document: document used to create the transformer embeddings
"""
super().__init__(feature_id, document)
self.contexts = contexts
def deallocate(self):
super().deallocate()
if hasattr(self, 'contexts'):
self._try_deallocate(self.contexts)
del self.contexts
@dataclass
class TransformerExpanderFeatureVectorizer(TransformerFeatureVectorizer):
"""A vectorizer that expands lingustic feature vectors to their respective
locations as word piece token vectors.
This is used to concatenate lingustic features with Bert (and other
transformer) embeddings. Each lingustic token is copied in the word piece
token location across all vectorizers and sentences.
:shape: (-1, token length, X), where X is the sum of all the delegate
shapes across all three dimensions
"""
DESCRIPTION = 'transformer expander'
FEATURE_TYPE = TextFeatureType.TOKEN
delegate_feature_ids: Tuple[str] = field(default=None)
"""A list of feature IDs of vectorizers whose output will be expanded."""
def __post_init__(self):
super().__post_init__()
if self.delegate_feature_ids is None:
raise VectorizerError('expected attribute: delegate_feature_ids')
self._assert_token_output()
self._validated = False
def _validate(self):
if not self._validated:
for vec in self.delegates:
if hasattr(vec, 'feature_tye') and \
vec.feature_type != TextFeatureType.TOKEN:
raise VectorizerError('Only token level vectorizers are ' +
f'supported, but got {vec}')
self._validated = True
def _get_shape(self) -> Tuple[int, int]:
shape = [-1, self.word_piece_token_length, 0]
vec: FeatureDocumentVectorizer
for vec in self.delegates:
shape[2] += vec.shape[-1]
return tuple(shape)
@property
@persisted('_delegates', allocation_track=False)
def delegates(self) -> EncodableFeatureVectorizer:
"""The delegates used for encoding and decoding the lingustic features.
"""
return tuple(map(lambda f: self.manager[f], self.delegate_feature_ids))
def _encode(self, doc: FeatureDocument) -> FeatureContext:
udoc: Union[TokenizedDocument, FeatureDocument] = doc
self._validate()
if self.encode_tokenized:
udoc: TokenizedDocument = self.tokenize(doc).detach()
cxs = tuple(map(lambda vec: vec.encode(doc), self.delegates))
return TransformerExpanderFeatureContext(self.feature_id, cxs, udoc)
def _decode(self, context: TransformerExpanderFeatureContext) -> Tensor:
doc: TokenizedDocument = self._context_to_document(context)
arrs: List[Tensor] = []
# decode subordinate contexts
vec: FeatureDocumentVectorizer
ctx: FeatureContext
for vec, ctx in zip(self.delegates, context.contexts):
src = vec.decode(ctx)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded shape ({vec.feature_id}): {src.shape}')
arrs.append(src)
# get the mapping per sentence
wps_sents = tuple(map(lambda s: doc.map_word_pieces(s), doc.offsets))
tlen = self.word_piece_token_length
# use variable length tokens
if tlen <= 0:
tlen = max(chain.from_iterable(
chain.from_iterable(
map(lambda s: map(lambda t: t[1], s), wps_sents))))
# max findes the largest index, so add 1 for size
tlen += 1
# add another (to be zero) for the ending sentence boudary
tlen += 1 if doc.boundary_tokens else 0
# number of sentences
n_sents = len(wps_sents)
# feature dimension (last dimension)
dim = sum(map(lambda x: x.size(-1), arrs))
# tensor to populate
marr = self.torch_config.zeros((n_sents, tlen, dim))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sents: {n_sents}, token length: {tlen}, dim: {dim}')
sent: Tensor
arr: Tensor
wps: Tuple[Tuple[Tensor, List[int]]]
marrix = 0
# iterate feature vectors
for arr in arrs:
ln = arr.size(-1)
meix = marrix + ln
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'feature range: [{marrix}:{meix}]')
# iterate sentences
for six, (sent, wps) in enumerate(zip(doc.offsets, wps_sents)):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'expanding for {arr.shape} in ' +
f'[{six},:,{marrix}:{meix}]')
# iterate lingustic / word piece tokens
for tix, wpixs in wps:
# for each word piece mapping, copy the source feature
# vector to the target, thereby expanding and increasing
# the size of the last dimsion
for wix in wpixs:
if False and logger.isEnabledFor(logging.DEBUG):
logger.debug(f'[{six}, {wix}, {marrix}:{meix}] ' +
f'= [{six}, {tix}]')
marr[six, wix, marrix:meix] = arr[six, tix]
marrix += ln
return marr
@dataclass
class LabelTransformerFeatureVectorizer(TransformerFeatureVectorizer,
metaclass=ABCMeta):
"""A base class for vectorizing by mapping tokens to transformer consumable
word piece tokens. This includes creating labels and masks.
:shape: (|sentences|, |max word piece length|)
"""
is_labeler: bool = field(default=True)
"""If ``True``, make this a labeling specific vectorizer. Otherwise, certain
layers will use the output of the vectorizer as features rather than the
labels.
"""
FEATURE_TYPE = TextFeatureType.TOKEN
def _get_shape(self) -> Tuple[int, int]:
return (-1, self.word_piece_token_length)
def _decode_sentence(self, sent_ctx: FeatureContext) -> Tensor:
arr: Tensor = super()._decode_sentence(sent_ctx)
return arr.unsqueeze(2)
@dataclass
class TransformerNominalFeatureVectorizer(AggregateEncodableFeatureVectorizer,
LabelTransformerFeatureVectorizer):
"""This creates word piece (maps to tokens) labels. This class uses a
:class:`~zensols.deeplearn.vectorize.NominalEncodedEncodableFeatureVectorizer``
to map from string labels to their nominal long values. This allows a
single instance and centralized location where the label mapping happens in
case other (non-transformer) components need to vectorize labels.
:shape: (|sentences|, |max word piece length|)
"""
DESCRIPTION = 'transformer seq labeler'
delegate_feature_id: str = field(default=None)
"""The feature ID for the aggregate encodeable feature vectorizer."""
label_all_tokens: bool = field(default=False)
"""If ``True``, label all word piece tokens with the corresponding linguistic
token label. Otherwise, the default padded value is used, and thus,
ignored by the loss function when calculating loss.
"""
annotations_attribute: str = field(default='annotations')
"""The attribute used to get the features from the
:class:`~zensols.nlp.FeatureSentence`. For example,
:class:`~zensols.nlp.TokenAnnotatedFeatureSentence` has an ``annotations``
attribute.
"""
def __post_init__(self):
super().__post_init__()
if self.delegate_feature_id is None:
raise VectorizerError('Expected attribute: delegate_feature_id')
self._assert_token_output()
def _get_attributes(self, sent: FeatureSentence) -> Sequence[Any]:
return getattr(sent, self.annotations_attribute)
def _create_decoded_pad(self, shape: Tuple[int]) -> Tensor:
return self.create_padded_tensor(shape, self.delegate.data_type)
def _encode_nominals(self, doc: FeatureDocument) -> Tensor:
delegate: NominalEncodedEncodableFeatureVectorizer = self.delegate
tdoc: TokenizedDocument = self.tokenize(doc)
by_label: Dict[str, int] = delegate.by_label
dtype: torch.dtype = delegate.data_type
lab_all: bool = self.label_all_tokens
n_sents: int = len(doc)
if self.word_piece_token_length > 0:
n_toks = self.word_piece_token_length
else:
n_toks = len(tdoc)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('encoding using {n_toks} tokens with wp len: ' +
f'{self.word_piece_token_length}')
arr = self.create_padded_tensor((n_sents, n_toks), dtype)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'output shape: {arr.shape}/{self.shape}')
sent: FeatureSentence
for six, sent in enumerate(doc):
sent_labels: Sequence[Any] = self._get_attributes(sent)
word_ids: Tensor = tdoc.offsets[six]
previous_word_idx: int = None
tix: int
word_idx: int
for tix, word_idx in enumerate(word_ids):
# special tokens have a word id that is None. We set the label
# to -100 so they are automatically ignored in the loss
# function.
if word_idx == -1:
pass
# we set the label for the first token of each word.
elif word_idx != previous_word_idx:
lab: str = sent_labels[word_idx]
arr[six][tix] = by_label[lab]
# for the other tokens in a word, we set the label to either
# the current label or -100, depending on the label_all_tokens
# flag
elif lab_all:
arr[six][tix] = by_label[sent_labels[word_idx]]
previous_word_idx = word_idx
return arr
def _encode(self, doc: FeatureDocument) -> FeatureContext:
ctx: FeatureContext
if self.encode_tokenized:
arr: Tensor = self._encode_nominals(doc)
ctx = TensorFeatureContext(self.feature_id, arr)
else:
ctx = self._create_context(doc)
return ctx
def _decode(self, context: FeatureContext) -> Tensor:
if isinstance(context, TransformerFeatureContext):
doc: FeatureDocument = context.get_feature_document()
arr: Tensor = self._encode_nominals(doc)
context = TensorFeatureContext(self.feature_id, arr)
return LabelTransformerFeatureVectorizer._decode(self, context)
@dataclass
class TransformerMaskFeatureVectorizer(LabelTransformerFeatureVectorizer):
"""Creates a mask of word piece tokens to ``True`` and special tokens and
padding to ``False``. This maps tokens to word piece tokens like
:class:`.TransformerNominalFeatureVectorizer`.
:shape: (|sentences|, |max word piece length|)
"""
DESCRIPTION = 'transformer mask'
data_type: Union[str, None, torch.dtype] = field(default='bool')
"""The mask tensor type. To use the int type that matches the resolution of
the manager's :obj:`torch_config`, use ``DEFAULT_INT``.
"""
def __post_init__(self):
super().__post_init__()
self.data_type = MaskFeatureVectorizer.str_to_dtype(
self.data_type, self.manager.torch_config)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'init mask data type: {self.data_type}')
def _create_decoded_pad(self, shape: Tuple[int]) -> Tensor:
return self.torch_config.zeros(shape, dtype=self.data_type)
def _encode_mask(self, doc: FeatureDocument) -> Tensor:
tdoc: TokenizedDocument = self.tokenize(doc)
arr: Tensor = tdoc.attention_mask.type(dtype=self.data_type)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mask type: {arr.dtype}')
return arr
def _encode(self, doc: FeatureDocument) -> FeatureContext:
ctx: FeatureContext
if self.encode_tokenized:
arr: Tensor = self._encode_mask(doc)
ctx = TensorFeatureContext(self.feature_id, arr)
else:
ctx = self._create_context(doc)
return ctx
def _decode(self, context: FeatureContext) -> Tensor:
if isinstance(context, TransformerFeatureContext):
doc: FeatureDocument = context.get_feature_document()
arr: Tensor = self._encode_mask(doc)
context = TensorFeatureContext(self.feature_id, arr)
return super()._decode(context) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/vectorizers.py | vectorizers.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Tuple, List, Dict, Any, Union, Iterable, ClassVar
from dataclasses import dataclass, field
from abc import ABCMeta
import logging
import sys
from itertools import chain
from io import TextIOBase
import torch
from torch import Tensor
from zensols.util import Hasher
from zensols.persist import PersistableContainer, Stash
from zensols.config import Dictable
from zensols.nlp import (
TokenContainer, FeatureToken, FeatureSentence, FeatureDocument,
FeatureDocumentDecorator,
)
from . import (
TransformerError, TokenizedFeatureDocument, TransformerDocumentTokenizer,
TransformerEmbedding,
)
logger = logging.getLogger(__name__)
@dataclass(repr=False)
class WordPiece(PersistableContainer, Dictable):
"""The word piece data.
"""
UNKNOWN_TOKEN: ClassVar[str] = '[UNK]'
"""The string used for out of vocabulary word piece tokens."""
word: str = field()
"""The string representation of the word piece."""
vocab_index: int = field()
"""The vocabulary index."""
index: int = field()
"""The index of the word piece subword in the tokenization tensor, which
will have the same index in the output embeddings for
:obj:`.TransformerEmbedding.output` = ``last_hidden_state``.
"""
@property
def is_unknown(self) -> bool:
"""Whether this token is out of vocabulary."""
return self.word == self.UNKNOWN_TOKEN
def __str__(self):
s: str = self.word
if s.startswith('##'):
s = s[2:]
return s
class WordPieceTokenContainer(TokenContainer, metaclass=ABCMeta):
"""Like :class:`~zensols.nlp.container.TokenContainer` but contains word
pieces.
"""
def word_iter(self) -> Iterable[WordPiece]:
"""Return an iterable over the word pieces."""
return chain.from_iterable(
map(lambda wp: wp.word_iter(), self.token_iter()))
@property
def unknown_count(self) -> int:
"""Return the number of out of vocabulary tokens in the container."""
return sum(map(lambda t: t.is_unknown, self.token_iter()))
@dataclass(repr=False)
class WordPieceFeatureToken(FeatureToken):
"""The token and the word pieces that repesent it.
"""
words: Tuple[WordPiece] = field()
"""The word pieces that make up this token."""
embedding: Tensor = field(default=None)
"""The embedding for :obj:`words` after using the transformer.
:shape: (|words|, <embedding dimension>)
"""
@property
def indexes(self) -> Tuple[int]:
"""The indexes of the word piece subwords (see :obj:`.WordPiece.index`).
"""
return tuple(map(lambda wp: wp.index, self.words))
@property
def token_embedding(self) -> Tensor:
"""The embedding of this token, which is the sum of the word piece
embeddings.
"""
return self.embedding.sum(dim=0)
def word_iter(self) -> Iterable[WordPiece]:
"""Return an iterable over the word pieces."""
return iter(self.words)
@property
def is_unknown(self) -> bool:
"""Whether this token is out of vocabulary."""
return all(map(lambda wp: wp.is_unknown, self.word_iter()))
def copy_embedding(self, target: FeatureToken):
"""Copy embedding (and children) from this instance to ``target``."""
target.embedding = self.embedding
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(f'{self.norm}:', depth, writer)
for w in self.words:
self._write_line(f'{w}: i={w.index}, v={w.vocab_index}',
depth + 1, writer)
if self.embedding is not None:
self._write_line(f'embedding: {tuple(self.embedding.size())}',
depth + 1, writer)
def __str__(self) -> str:
return ''.join(map(str, self.words))
def __eq__(self, other: WordPieceFeatureToken) -> bool:
return self.i == other.i and \
self.idx == other.idx and \
self.norm == other.norm
@dataclass(repr=False)
class WordPieceFeatureSpan(FeatureSentence, WordPieceTokenContainer):
"""A sentence made up of word pieces.
"""
embedding: Tensor = field(default=None)
"""The sentence embedding level (i.e. ``[CLS]``) embedding from the
transformer.
:shape: (<embedding dimension>,)
"""
def copy_embedding(self, target: FeatureSentence):
"""Copy embeddings (and children) from this instance to ``target``."""
target.embedding = self.embedding
targ_tok: FeatureToken
org_tok: FeatureToken
for org_tok, targ_tok in zip(self.token_iter(), target.token_iter()):
org_tok.copy_embedding(targ_tok)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(super().__str__(), depth, writer)
self._write_line('word pieces:', depth, writer)
self._write_line(self, depth + 1, writer)
if self.embedding is not None:
self._write_line(f'embedding: {tuple(self.embedding.size())}',
depth + 1, writer)
def __str__(self) -> str:
return ' '.join(map(str, self.tokens))
@dataclass(repr=False)
class WordPieceFeatureSentence(WordPieceFeatureSpan, FeatureSentence):
pass
@dataclass(repr=False)
class WordPieceFeatureDocument(FeatureDocument, WordPieceTokenContainer):
"""A document made up of word piece sentences.
"""
tokenized: TokenizedFeatureDocument = field(default=None)
"""The tokenized feature document."""
@property
def embedding(self) -> Tensor:
"""The document embedding (see :obj:`.WordPieceFeatureSpan.embedding`).
:shape: (|sentences|, <embedding dimension>)
"""
return torch.stack(tuple(map(lambda s: s.embedding, self.sents)), dim=0)
def copy_embedding(self, target: FeatureDocument):
"""Copy embeddings (and children) from this instance to ``target``."""
targ_sent: FeatureSentence
org_sent: WordPieceFeatureSentence
for org_sent, targ_sent in zip(self.sents, target.sents):
org_sent.copy_embedding(targ_sent)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(self, depth, writer)
sent: WordPieceFeatureSentence
for sent in self.sents:
self._write_object(sent, depth + 1, writer)
def __str__(self) -> str:
return '. '.join(map(str, self.sents))
class _WordPieceDocKey(object):
"""A key class for caching in :class:`.WordPieceFeatureDocumentFactory`
needed to avoid token level equal compares with embeddings. These token
level compares raise a Pytorch error.
"""
def __init__(self, doc: FeatureDocument, tdoc: TokenizedFeatureDocument):
self._hash = hash(doc)
self._doc = doc
def __eq__(self, other: FeatureDocument) -> bool:
return self._doc.norm == other._doc.norm
def __hash__(self) -> int:
return self._hash
@dataclass
class WordPieceFeatureDocumentFactory(object):
"""Create instances of :class:`.WordPieceFeatureDocument` from
:class:`~zensols.nlp.container.FeatureDocument` instances. It does this by
iterating through a feature document data structure and adding
``WordPiece*`` object data and optionally adding the corresponding sentence
and/or token level embeddings.
The embeddings can also be added with :meth:`add_token_embeddings` and
:meth:`add_sent_embeddings` individually. If all you want are the sentence
level embeddings, you can use :meth:`add_sent_embeddings` on a
:class:`~zensols.nlp.container.FeatureSentence` instance.
"""
tokenizer: TransformerDocumentTokenizer = field()
"""Used to tokenize documents that aren't already in :meth:`__call__`."""
embed_model: TransformerEmbedding = field()
"""Used to populate the embeddings in ``WordPiece*`` classes."""
token_embeddings: bool = field(default=True)
"""Whether to add :class:`.WordPieceFeatureToken.embeddings`.
"""
sent_embeddings: bool = field(default=True)
"""Whether to add class:`.WordPieceFeatureSentence.embeddings`.
"""
def __post_init__(self):
FeatureToken.SKIP_COMPARE_FEATURE_IDS.add('embedding')
def add_token_embeddings(self, doc: WordPieceFeatureDocument, arr: Tensor):
"""Add token embeddings to the sentences of ``doc``. This assumes
tokens are of type :class:`.WordPieceFeatureToken` since the token
indices are needed.
:param doc: sentences of this doc have ``embeddings`` set to the
correpsonding sentence tensor with shape (1, <embedding
dimension>).
"""
six: int
sent: FeatureSentence
for six, sent in enumerate(doc.sents):
tok: WordPieceFeatureToken
for tok in sent.tokens:
tok.embedding = arr[six, tok.indexes]
def add_sent_embeddings(
self, doc: Union[WordPieceFeatureDocument, FeatureDocument],
arr: Tensor):
"""Add sentence embeddings to the sentences of ``doc``.
:param doc: sentences of this doc have ``embeddings`` set to the
correpsonding sentence tensor with shape ``(1, <embedding
dimension>)``.
"""
six: int
sent: FeatureSentence
for six, sent in enumerate(doc.sents):
sent.embedding = arr[six]
def create(self, fdoc: FeatureDocument,
tdoc: TokenizedFeatureDocument = None) -> \
WordPieceFeatureDocument:
"""Create a document in to an object graph that relates word pieces to
feature tokens. Note that if ``tdoc`` is provided, it must have been
tokenized from ``fdoc``.
:param fdoc: the feature document used to create `tdoc`
:param tdoc: a tokenized feature document generated by :meth:`tokenize`
:return: a data structure with the word piece information
"""
def map_tok(ftok: FeatureToken, wps: Tuple[str, int, int]) -> \
WordPieceFeatureToken:
words = tuple(map(lambda t: WordPiece(*t), wps))
return ftok.clone(cls=WordPieceFeatureToken, words=words)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating embeddings for: {fdoc}')
tdoc = self.tokenizer.tokenize(fdoc) if tdoc is None else tdoc
sents: List[WordPieceFeatureSentence] = []
wps: List[Dict[str, Any]] = tdoc.map_to_word_pieces(
sentences=fdoc,
map_wp=self.tokenizer.id2tok,
add_indices=True)
wp: Dict[str, Any]
for six, wp in enumerate(wps):
fsent: FeatureSentence = wp['sent']
tokens: Tuple[WordPieceFeatureToken] = tuple(
map(lambda t: map_tok(*t), wp['map']))
sents.append(fsent.clone(
cls=WordPieceFeatureSentence,
tokens=tokens))
doc = fdoc.clone(
cls=WordPieceFeatureDocument,
sents=tuple(sents),
tokenized=tdoc)
if self.add_token_embeddings or self.add_sent_embeddings:
arrs: Dict[str, Tensor] = self.embed_model.transform(
tdoc, TransformerEmbedding.ALL_OUTPUT)
if self.token_embeddings:
arr: Tensor = arrs[
TransformerEmbedding.LAST_HIDDEN_STATE_OUTPUT]
self.add_token_embeddings(doc, arr)
if self.sent_embeddings:
arr: Tensor = arrs[TransformerEmbedding.POOLER_OUTPUT]
self.add_sent_embeddings(doc, arr)
return doc
def __call__(self, fdoc: FeatureDocument,
tdoc: TokenizedFeatureDocument = None) -> \
WordPieceFeatureDocument:
return self.create(fdoc, tdoc)
@dataclass
class CachingWordPieceFeatureDocumentFactory(WordPieceFeatureDocumentFactory):
"""Caches the documents and their embeddings in a
:class:`~zensols.persist.stash.Stash`. For those that are cached, the
embeddings are copied over to the passed document in :meth:`create`.
"""
stash: Stash = field(default=None)
"""The stash that persists the feature document instances. If this is not
provided, no caching will happen.
"""
hasher: Hasher = field(default_factory=Hasher)
"""Used to hash the natural langauge text in to string keys."""
def _hash_text(self, text: str) -> str:
self.hasher.reset()
self.hasher.update(text)
return self.hasher()
def create(self, fdoc: FeatureDocument,
tdoc: TokenizedFeatureDocument = None) -> \
WordPieceFeatureDocument:
key: str = self._hash_text(fdoc.text)
wdoc: WordPieceFeatureDocument = self.stash.load(key)
if wdoc is None:
wdoc = super().create(fdoc, tdoc)
if self.stash is not None:
self.stash.dump(key, wdoc)
else:
if wdoc.text != fdoc.text:
raise TransformerError('Document text does not match: ' +
f'<{wdoc.text}> != >{fdoc.text}>')
return wdoc
def clear(self):
"""Clear the caching stash."""
self.stash.clear()
@dataclass
class WordPieceDocumentDecorator(FeatureDocumentDecorator):
"""Populates sentence and token embeddings in the documents.
:see: :class:`.WordPieceFeatureDocumentFactory`
"""
word_piece_doc_factory: WordPieceFeatureDocumentFactory = field()
"""The feature document factory that populates embeddings."""
def decorate(self, doc: FeatureDocument):
wpdoc: WordPieceFeatureDocument = self.word_piece_doc_factory(doc)
wpdoc.copy_embedding(doc) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/wordpiece.py | wordpiece.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Any
from dataclasses import dataclass, field
import logging
import torch
from transformers import PreTrainedTokenizer
from transformers.tokenization_utils_base import BatchEncoding
from zensols.nlp import FeatureDocument
from zensols.persist import persisted, PersistableContainer
from zensols.deeplearn import TorchConfig
from zensols.deepnlp.transformer import TransformerResource
from . import TransformerError, TokenizedFeatureDocument
logger = logging.getLogger(__name__)
@dataclass
class TransformerDocumentTokenizer(PersistableContainer):
resource: TransformerResource = field()
"""Contains the model used to create the tokenizer."""
word_piece_token_length: int = field(default=None)
"""The max number of word piece tokens. The word piece length is always the
same or greater in count than linguistic tokens because the word piece
algorithm tokenizes on characters.
If this value is less than 0, than do not fix sentence lengths.
"""
def __post_init__(self):
super().__init__()
if self.word_piece_token_length is None:
self.word_piece_token_length = \
self.resource.tokenizer.model_max_length
@property
@persisted('_id2tok')
def id2tok(self) -> Dict[int, str]:
vocab = self.resource.tokenizer.vocab
return {vocab[k]: k for k in vocab.keys()}
def tokenize(self, doc: FeatureDocument,
tokenizer_kwargs: Dict[str, Any] = None) -> \
TokenizedFeatureDocument:
"""Tokenize a feature document in a form that's easy to inspect and provide to
:class:`.TransformerEmbedding` to transform.
:param doc: the document to tokenize
"""
if not self.resource.tokenizer.is_fast:
raise TransformerError(
'only fast tokenizers are supported for needed offset mapping')
sents = list(map(lambda sent: list(
map(lambda tok: tok.text, sent)), doc))
return self._from_tokens(sents, doc, tokenizer_kwargs)
def _from_tokens(self, sents: List[List[str]], doc: FeatureDocument,
tokenizer_kwargs: Dict[str, Any] = None) -> \
TokenizedFeatureDocument:
torch_config: TorchConfig = self.resource.torch_config
tlen: int = self.word_piece_token_length
tokenizer: PreTrainedTokenizer = self.resource.tokenizer
params: Dict[str, bool] = {
'return_offsets_mapping': True,
'is_split_into_words': True,
'return_special_tokens_mask': True,
'padding': 'longest'}
for i, sent in enumerate(sents):
if len(sent) == 0:
raise TransformerError(
f'Sentence {i} is empty: can not tokenize')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'parsing {sents} with token length: {tlen}')
if tlen > 0:
params.update({'truncation': True,
'max_length': tlen})
else:
params.update({'truncation': False})
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using tokenizer parameters: {params}')
if tokenizer_kwargs is not None:
params.update(tokenizer_kwargs)
tok_dat: BatchEncoding = tokenizer(sents, **params)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"lengths: {[len(i) for i in tok_dat['input_ids']]}")
logger.debug(f"inputs: {tok_dat['input_ids']}")
input_ids = tok_dat.input_ids
char_offsets = tok_dat.offset_mapping
boundary_tokens = (tok_dat.special_tokens_mask[0][0]) == 1
sent_offsets = tuple(
map(lambda s: tuple(map(lambda x: -1 if x is None else x, s)),
map(lambda si: tok_dat.word_ids(batch_index=si),
range(len(input_ids)))))
if logger.isEnabledFor(logging.DEBUG):
for six, tids in enumerate(sent_offsets):
logger.debug(f'tok ids: {tids}')
for stix, tix in enumerate(tids):
bid = tok_dat['input_ids'][six][stix]
wtok = self.id2tok[bid]
if tix >= 0:
stok = sents[six][tix]
else:
stok = '-'
logger.debug(
f'sent={six}, idx={tix}, id={bid}: {wtok} -> {stok}')
tok_data = [input_ids, tok_dat.attention_mask, sent_offsets]
if hasattr(tok_dat, 'token_type_ids'):
tok_data.append(tok_dat.token_type_ids)
arr = torch_config.singleton(tok_data, dtype=torch.long)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'tok doc mat: shape={arr.shape}, dtype={arr.dtype}')
return TokenizedFeatureDocument(
tensor=arr,
# needed by expander vectorizer
boundary_tokens=boundary_tokens,
char_offsets=char_offsets,
feature=doc,
id2tok=self.id2tok)
def __call__(self, doc: FeatureDocument,
tokenizer_kwargs: Dict[str, Any] = None) -> \
TokenizedFeatureDocument:
return self.tokenize(doc, tokenizer_kwargs) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/tokenizer.py | tokenizer.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import List, Tuple, Dict, Any, Union, Iterable, Callable
from dataclasses import dataclass, field
import sys
import logging
import itertools as it
from io import TextIOBase
import numpy as np
import torch
from torch import Tensor
from zensols.nlp import FeatureDocument
from zensols.persist import PersistableContainer
from zensols.config import Writable
from zensols.nlp import FeatureToken, FeatureSentence
logger = logging.getLogger(__name__)
@dataclass
class TokenizedDocument(PersistableContainer, Writable):
"""This is the tokenized document output of
:class:`.TransformerDocumentTokenizer`. Instances of this class are
pickelable, in a feature context. Then give to the in the decoding phase
to create a tensor with a transformer model such as
:class:`.TransformerEmbedding`.
"""
tensor: Tensor = field()
"""Encodes the input IDs, attention mask, and word piece offset map."""
boundary_tokens: bool = field()
"""If the token document has sentence boundary tokens, such as ``[CLS]`` for
Bert.
"""
def __post_init__(self):
super().__init__()
@classmethod
def from_tensor(cls, tensor: Tensor) -> TokenizedDocument:
"""Create an instance of the class using a tensor. This is useful for
re-creating documents for mapping with :meth:`.map_word_pieces` after
unpickling from a document created with
:class:`.TransformerDocumentTokenizer.tokenize`.
:param tensor: the tensor to set in :obj:`.tensor`
"""
return cls(tensor, None)
@property
def input_ids(self) -> Tensor:
"""The token IDs as the output from the tokenizer."""
return self.tensor[0]
@property
def attention_mask(self) -> Tensor:
"""The attention mask (0/1s)."""
return self.tensor[1]
@property
def offsets(self) -> Tensor:
"""The offsets from word piece (transformer's tokenizer) to feature document
index mapping.
"""
return self.tensor[2]
@property
def token_type_ids(self) -> Tensor:
"""The token type IDs (0/1s)."""
if self.tensor.size(0) > 3:
return self.tensor[3]
@property
def shape(self) -> torch.Size:
"""Return the shape of the vectorized document."""
return self.tensor.shape
def __len__(self) -> int:
"""Return the size of the document in number of word pieces."""
return self.tensor.size(-1)
def detach(self) -> TokenizedDocument:
"""Return a version of the document that is pickleable."""
return self
def truncate(self, size: int) -> TokenizedDocument:
"""Truncate the the last (token) dimension to ``size``.
:return: a new instance of this class truncated to size
"""
cls = self.__class__
return cls(self.tensor[:, :, 0:size], self.boundary_tokens)
def params(self) -> Dict[str, Any]:
dct = {}
atts = 'input_ids attention_mask token_type_ids'
for att in atts.split():
val = getattr(self, att)
if val is not None:
dct[att] = val
return dct
@staticmethod
def map_word_pieces(token_offsets: List[int]) -> \
List[Tuple[FeatureToken, List[int]]]:
"""Map word piece tokens to linguistic tokens.
:return:
a list of tuples in the form:
``(<linguistic token|token index>, <list of word piece indexes>)``
if detatched, the linguistic token is an index as a tensor scalar
"""
ftoks = []
n_ftok = -1
for wix, tix in enumerate(token_offsets):
if tix >= 0:
if tix > n_ftok:
wptoks = []
ftoks.append((tix, wptoks))
n_ftok += 1
wptoks.append(wix)
return ftoks
def map_to_word_pieces(self, sentences: Iterable[List[Any]] = None,
map_wp: Union[Callable, Dict[int, str]] = None,
add_indices: bool = False) -> \
List[Dict[str, Union[List[Any], Tuple[FeatureToken, Tuple[str]]]]]:
"""Map word piece tokens to linguistic tokens.
:param sentences: an iteration of sentences, which is returned in the
output (i.e. :class:`~zensols.nlp.FeatureSentence`)
:param map_wp: either a function that takes the token index, sentence ID
and input IDs, or the mapping from word piece ID to
string token; return output is the string token (or
numerical output if no mapping is provided)
:param add_indices: whether to add the token ID and index after the
token string when ``id2tok`` is provided for
``map_wp``
:return: a list sentence maps, each with:
* ``sent`` -> the ``i``th list in ``sentences``
* ``map`` -> list of ``(sentence 'token', word pieces)``
"""
def map_wp_by_id(x: int, six: int, input_ids: List[int]):
tix = input_ids[x]
tok = id2tok[tix]
return (tok, tix, x) if add_indices else tok
id2tok = None
input_ids = self.input_ids.cpu().numpy()
sent_offsets = self.offsets
sents_map = []
if map_wp is None:
def map_str(x, *args, **kwargs):
return str(x)
map_wp = map_str
elif isinstance(map_wp, dict):
id2tok = map_wp
map_wp = map_wp_by_id
if sentences is None:
sentences = self.input_ids.cpu().numpy()
sents = enumerate(zip(sentences, sent_offsets))
for six, (sent, tok_offsets) in sents:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sent idx: {six}, sent: {sent}, ' +
f'offsets: {tok_offsets}')
input_sent = input_ids[six]
wps = self.map_word_pieces(tok_offsets)
sent_map = []
sents_map.append({'sent': sent, 'map': sent_map})
for tix, ixs in wps:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{ixs} -> {tix}')
tok = sent[tix]
ttoks = tuple(map(lambda i: map_wp(i[0], six, input_sent),
zip(ixs, it.count())))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{tok} -> {ttoks}')
sent_map.append((tok, ttoks))
return sents_map
def deallocate(self):
super().deallocate()
del self.tensor
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_tokens: bool = True, id2tok: Dict[int, str] = None):
def maptok(tup: int) -> str:
s = id2tok[tup[0]]
if s.startswith('##'):
s = s[2:]
return s
for sent_map in self.map_to_word_pieces():
sent: np.ndarray = sent_map['sent']
tmap: Tuple[int, Tuple[str]] = sent_map['map']
self._write_line(f'sentence len: {sent.shape[0]}', depth, writer)
if include_tokens:
self._write_line('tokens:', depth, writer)
if id2tok is not None:
toks = ' '.join(map(maptok, tmap))
else:
toks = ' '.join(map(lambda t: str(t[0]), tmap))
self._write_line(toks, depth + 1, writer)
else:
self._write_line(f'tokens: {len(tmap)}', depth + 1, writer)
def __str__(self) -> str:
return f'doc: {self.tensor.shape}'
def __repr__(self) -> str:
return self.__str__()
@dataclass
class TokenizedFeatureDocument(TokenizedDocument):
"""Instance of this class are created, then a picklable version returned with
:meth:`detach` as an instance of the super class.
"""
feature: FeatureDocument = field()
"""The document to tokenize."""
id2tok: Dict[int, str] = field()
"""If provided, a mapping of indexes to transformer tokens. This attribute is
always nulled out after being persisted.
"""
char_offsets: Tuple[Tuple[int, int]] = field()
"""The valid character offsets for each word piece token."""
def detach(self) -> TokenizedDocument:
return TokenizedDocument(self.tensor, self.boundary_tokens)
def map_word_pieces_to_tokens(self) -> \
List[Dict[str, Union[FeatureSentence,
Tuple[FeatureToken, Tuple[str]]]]]:
"""Map word piece tokens to linguistic tokens.
:return: a list sentence maps, each with:
* ``sent`` -> :class:`.FeatureSentence`
* ``map`` -> list of ``(token, word pieces)``
"""
def id2tok(x: int, six: int, input_ids: List[int]):
tix = input_ids[x]
tok = self.id2tok[tix]
off = self.char_offsets[six][x]
olen = off[1] - off[0]
if len(tok) > olen:
# bert
if tok.startswith('##'):
start = 2
# roberta
else:
start = 1
else:
start = 0
end = (off[1] - off[0]) + (start * 2)
tok = tok[start:end]
return tok
return super().map_to_word_pieces(self.feature, id2tok)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_tokens: bool = True, id2tok: Dict[int, str] = None):
id2tok = self.id2tok if id2tok is None else id2tok
sent_map: Dict[str, Union[FeatureSentence,
Tuple[FeatureToken, Tuple[str]]]]
for sent_map in self.map_word_pieces_to_tokens():
sent: FeatureSentence = sent_map['sent']
tmap: Tuple[FeatureToken, Tuple[str]] = sent_map['map']
self._write_line(f'sentence: {sent}', depth, writer)
if include_tokens:
self._write_line('tokens:', depth, writer)
tok: FeatureToken
ttoks: Tuple[str]
for tok, ttoks in tmap:
stext = tok.text.replace('\n', '\\n')
stext = f'<{stext}>'
self._write_line(f'{stext} -> {ttoks}', depth + 1, writer) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Dict, Iterable, Tuple, ClassVar, Union
from dataclasses import dataclass, field
import logging
from itertools import chain
import torch
from torch import Tensor
from torch import nn
from transformers import PreTrainedModel
from transformers.modeling_outputs import \
BaseModelOutput, BaseModelOutputWithPoolingAndCrossAttentions
from zensols.config import Dictable
from zensols.nlp import FeatureDocument
from zensols.deeplearn import TorchTypes
from zensols.deepnlp.transformer import TransformerResource
from zensols.persist import persisted, PersistedWork, PersistableContainer
from . import (
TransformerError, TokenizedDocument, TokenizedFeatureDocument,
TransformerDocumentTokenizer
)
logger = logging.getLogger(__name__)
@dataclass
class TransformerEmbedding(PersistableContainer, Dictable):
"""An model for transformer embeddings (such as BERT) that wraps the
HuggingFace transformms API.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
LAST_HIDDEN_STATE_OUTPUT: ClassVar[str] = 'last_hidden_state'
POOLER_OUTPUT: ClassVar[str] = 'pooler_output'
ALL_OUTPUT: ClassVar[str] = 'all_output'
name: str = field()
"""The name of the embedding as given in the configuration."""
tokenizer: TransformerDocumentTokenizer = field()
"""The tokenizer used for creating the input for the model."""
output: str = field(default=POOLER_OUTPUT)
"""The output from the huggingface transformer API to return.
This is set to one of:
* :obj:`LAST_HIDDEN_STATE_OUTPUT`: with the output embeddings of the last
layer with shape: ``(batch, N sentences, hidden layer dimension)``
* :obj:`POOLER_OUTPUT`: the last layer hidden-state of the first token of
the sequence (classification token) further processed by a Linear
layer and a Tanh activation function with shape: ``(batch, hidden
layer dimension)``
* :obj:`ALL_OUTPUT`: includes both as a dictionary with correpsonding
keys
"""
output_attentions: bool = field(default=False)
"""Whether or not to output the attention layer."""
def __post_init__(self):
super().__init__()
self._vec_dim = PersistedWork('_vec_dim', self, self.resource.cache)
@property
def resource(self) -> TransformerResource:
"""The transformer resource containing the model."""
return self.tokenizer.resource
@property
def cache(self):
"""When set to ``True`` cache a global space model using the parameters from
the first instance creation.
"""
return self.resource.cache
@property
def model(self) -> PreTrainedModel:
return self.resource.model
@property
@persisted('_vec_dim')
def vector_dimension(self) -> int:
"""Return the output embedding dimension of the final layer.
"""
toker: TransformerDocumentTokenizer = self.tokenizer
doc: TokenizedFeatureDocument = toker._from_tokens([['the']], None)
emb = self.transform(doc, self.POOLER_OUTPUT)
size = emb.size(-1)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'embedding dimension {size} for ' +
f'model {self.resource}')
doc.deallocate()
return size
@property
def trainable(self) -> bool:
"""Whether or not the model is trainable or frozen."""
return self.resource.trainable
def tokenize(self, doc: FeatureDocument) -> TokenizedFeatureDocument:
"""Tokenize the feature document, which is used as the input to
:meth:`transform`.
:doc: the document to tokenize
:return: the tokenization of ``doc``
"""
return self.tokenizer.tokenize(doc)
def _get_model(self, params: Dict[str, Tensor]) -> nn.Module:
"""Prepare the model and parameters used for inference on it.
:param params: the tokenization output later used on the model's
``__call__`` method
:return: the model that is ready for inferencing
"""
model: nn.Module = self.resource.model
if self.output_attentions:
params['output_attentions'] = True
# a bug in transformers 4.4.2 requires this; 4.12.5 still does
# https://github.com/huggingface/transformers/issues/2952
for attr in 'position_ids token_type_ids'.split():
if hasattr(model.embeddings, attr):
arr: Tensor = getattr(model.embeddings, attr)
if TorchTypes.is_float(arr.dtype):
setattr(model.embeddings, attr, arr.long())
return model
def _infer_pooler(self, output: BaseModelOutput) -> Tensor:
"""Create a pooler output if one is not available, such as with
Distilbert (and sounds like RoBERTa in the future). This assumes the
output has a hidden state at index 0.
:param output: the output from the model
:return: the pooler output tensor taken from ``output``
"""
hidden_state = output[0] # (bs, seq_len, dim)
pooled_output = hidden_state[:, 0] # (bs, dim)
return pooled_output
def _narrow_tensor(self, output_res: BaseModelOutput,
output: str) -> Tensor:
"""Resolve an embedding from the HuggingFace output instance."""
arr: Tensor
if output == 'pooler_output' and \
not hasattr(output_res, output):
arr = self._infer_pooler(output_res)
else:
if not hasattr(output_res, output):
raise TransformerError(
f'No such output attribte {output} for ' +
f'output {type(output_res)}')
arr = getattr(output_res, output)
return arr
def transform(self, doc: TokenizedDocument, output: str = None) -> \
Union[Tensor, Dict[str, Tensor]]:
"""Transform the documents in to the transformer output.
:param docs: the batch of documents to return
:param output: the output from the huggingface transformer API to return
(see class docs)
:return: a container object instance with the output, which contains
(among other data) ``last_hidden_state`` with the output
embeddings of the last layer with shape: ``(batch, N sentences,
hidden layer dimension)``
"""
output: str = self.output if output is None else output
output_res: BaseModelOutputWithPoolingAndCrossAttentions
output_data: Union[Tensor, Dict[str, Tensor]]
params: Dict[str, Tensor] = doc.params()
model: nn.Module = self._get_model(params)
if logger.isEnabledFor(logging.DEBUG):
for k, v in params.items():
if isinstance(v, Tensor):
logger.debug(f"{k}: dtype={v.dtype}, shape={v.shape}")
else:
logger.debug(f'{k}: {v}')
# predict hidden states features for each layer
if self.resource.trainable:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('model is trainable')
output_res = model(**params)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('turning off gradients since model not trainable')
model.eval()
with torch.no_grad():
output_res = model(**params)
if output == self.ALL_OUTPUT:
output_data = {
self.POOLER_OUTPUT: self._narrow_tensor(
output_res, self.POOLER_OUTPUT),
self.LAST_HIDDEN_STATE_OUTPUT: self._narrow_tensor(
output_res, self.LAST_HIDDEN_STATE_OUTPUT)}
elif output in {self.POOLER_OUTPUT, self.LAST_HIDDEN_STATE_OUTPUT}:
output_data = self._narrow_tensor(output_res, output)
else:
raise TransformerError(f'Unknown output type: {output}')
if isinstance(output, Tensor) and logger.isEnabledFor(logging.DEBUG):
logger.debug(f'embedding dim: {output_data.size()}')
return output_data
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return chain.from_iterable(
[super()._get_dictable_attributes(), [('resource', 'resource')]]) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/embed.py | embed.py |
__author__ = 'Paul Landes'
from typing import Tuple, List, Iterable, Dict, Any
from dataclasses import dataclass, field
import logging
import sys
from collections import OrderedDict
from io import TextIOBase
import pandas as pd
import torch
from torch import Tensor
from torch.return_types import topk
from transformers import PreTrainedTokenizer, PreTrainedModel
from zensols.config import Dictable
from zensols.nlp import FeatureToken, TokenContainer
from zensols.deeplearn import TorchConfig
from zensols.deepnlp.transformer import TransformerResource
from . import TransformerError
logger = logging.getLogger(__name__)
@dataclass
class TokenPrediction(Dictable):
"""Couples a masked model prediction token to which it belongs and its
score.
"""
token: FeatureToken = field()
prediction: str = field()
score: float = field()
def __str__(self) -> str:
return f"{self.token} -> {self.prediction} ({self.score:.4f})"
@dataclass
class Prediction(Dictable):
"""A container class for masked token predictions produced by
:class:`.MaskFiller`.
"""
cont: TokenContainer = field()
"""The document, sentence or span to predict masked tokens."""
masked_tokens: Tuple[FeatureToken] = field()
"""The masked tokens matched."""
df: pd.DataFrame = field()
"""The predictions with dataframe columns:
* ``k``: the *k* in the top-*k* highest scored masked token match
* ``mask_id``: the N-th masked token in the source ordered by position
* ``token``: the predicted token
* ``score``: the score of the prediction (``[0, 1]``, higher the better)
"""
def get_container(self, k: int = 0) -> TokenContainer:
"""Get the *k*-th top scored sentence. This method should be called
only once for each instance since it modifies the tokens of the
container for each invocation.
A client may call this method as many times as necessary (i.e. for
multiple values of ``k``) since :obj:``cont`` tokens are modified while
retaining the original masked tokens :obj:`masked_tokens`.
:param k: as *k* increases the less likely the mask substitutions, and
thus sentence; *k* = 0 is the most likely given the sentence
and masks
"""
cont: TokenContainer = self.cont
if len(self.df) == 0:
raise TransformerError(f'No predictions found for <{cont.text}>')
n_top_k: int = len(self) - 1
if k > n_top_k:
raise IndexError(f'Only {n_top_k} predictions but asked for {k}')
df: pd.DataFrame = self.df
df = df[df['k'] == k].sort_values('mask_id')
# iterate over the masked tokens, then for each, populate the prediction
tok: FeatureToken
repl: str
for tok, repl in zip(self.masked_tokens, df['token']):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{repl} -> {tok.norm}')
# modify the previously matched token clobbering the norm for each
# iteration
tok.norm = repl
# clear to force a container level norm to be generated
cont.clear()
return cont
def get_tokens(self) -> Iterable[TokenPrediction]:
"""Return an iterable of the prediction coupled with the token it
belongs to and its score.
"""
preds: Iterable[Tuple[str, float]] = self.df.\
sort_values('mask_id')['token score'.split()].\
itertuples(name=None, index=False)
return map(lambda t: TokenPrediction(t[0], t[1][0], t[1][1]),
zip(self.masked_tokens, preds))
@property
def masked_token_dicts(self) -> Tuple[Dict[str, Any]]:
"""A tuple of :class:`.builtins.dict` each having token index, norm and
text data.
"""
feats: Tuple[str] = ('i', 'idx', 'i_sent', 'norm', 'text')
return tuple(map(lambda t: t.get_features(feats), self.masked_tokens))
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_masked_tokens: bool = True,
include_predicted_tokens: bool = True,
include_predicted_sentences: bool = True):
self._write_line(f'source: {self.cont.text}', depth, writer)
if include_masked_tokens:
self._write_line('masked:', depth, writer)
for mt in self.masked_token_dicts:
self._write_dict(mt, depth + 1, writer, one_line=True)
if include_predicted_tokens:
self._write_line('predicted:', depth, writer)
for k, df in self.df.groupby('k')['mask_id token score'.split()]:
scs: List[str] = []
for mid, r in df.groupby('mask_id'):
s = f"{r['token'].item()} ({r['score'].item():.4f})"
scs.append(s)
self._write_line(f'k={k}: ' + ', '.join(scs), depth + 1, writer)
if include_predicted_sentences:
self._write_line('sentences:', depth, writer)
self._write_iterable(tuple(map(lambda t: t.norm, self)),
depth + 1, writer)
def _from_dictable(self, *args, **kwargs):
return OrderedDict(
[['source', self.cont.text],
['masked_tokens', self.masked_token_dicts],
['pred_tokens', self.df.to_dict('records')],
['pred_sentences', tuple(map(lambda t: t.norm, self))]])
def __iter__(self) -> Iterable[TokenContainer]:
return map(self.get_container, range(len(self)))
def __getitem__(self, i: int) -> TokenContainer:
return self.get_container(i)
def __len__(self) -> int:
return len(self.df['k'].drop_duplicates())
def __str__(self) -> str:
return self.get_container().norm
@dataclass
class MaskFiller(object):
"""The class fills masked tokens with the prediction of the underlying maked
model. Masked tokens with attribute :obj:`feature_id` having value
:obj:`feature_value` (:obj:`~zensols.nlp.tok.FeatureToken.norm` and ``MASK``
by default respectively) are substituted with model values.
To use this class, parse a sentence with a
:class:`~zensols.nlp.parser.FeatureDocumentParser` with masked tokens
using the string :obj:`feature_value`.
For example (with class defaults), the sentence::
Paris is the MASK of France.
becomes::
Parise is the <mask> of France.
The ``<mask>`` string becomes the
:obj:`~transformers.PreTrainedTokenizer.mask_token` for the model's
tokenzier.
"""
resource: TransformerResource = field()
"""A container class with the Huggingface tokenizer and model."""
k: int = field(default=1)
"""The number of top K predicted masked words per mask. The total number of
predictions will be <number of masks> X ``k`` in the source document.
"""
feature_id: str = field(default='norm')
"""The :class:`~zensols.nlp.FeatureToken` feature ID to match on masked
tokens.
:see: :obj:`feature_value`
"""
feature_value: str = field(default='MASK')
"""The value of feature ID :obj:`feature_id` to match on masked tokens."""
def _predict(self, text: str) -> pd.DataFrame:
tc: TorchConfig = self.resource.torch_config
# models are created in the resource
tokenizer: PreTrainedTokenizer = self.resource.tokenizer
model: PreTrainedModel = self.resource.model
# rows of the dataframe are the k, nth mask tok, token str, score/proba
rows: List[Tuple[int, int, str, float]] = []
# tokenization produces the vocabulary wordpiece ids
input_ids: Tensor = tc.to(tokenizer.encode(text, return_tensors='pt'))
# get the wordpiece IDs of the masks
mask_token_index: Tensor = torch.where(
input_ids == tokenizer.mask_token_id)[1]
# predict and get the masked wordpiece token logits
token_logits: Tensor = model(input_ids)[0]
mask_token_logits: Tensor = token_logits[0, mask_token_index, :]
mask_token_logits = torch.softmax(mask_token_logits, dim=1)
# get the top K matches based on the masked token logits
top: topk = torch.topk(mask_token_logits, k=self.k, dim=1)
# iterate over masks
top_ix: Tensor = top.indices
mix: int
for mix in range(top_ix.shape[0]):
top_tokens = zip(top_ix[mix].tolist(), top.values[mix].tolist())
token_id: int
score: float
# iterate over the top K tokens
for k, (token_id, score) in enumerate(top_tokens):
token: str = tokenizer.decode([token_id]).strip()
rows.append((k, mix, token, score))
return pd.DataFrame(rows, columns='k mask_id token score'.split())
def predict(self, source: TokenContainer) -> Prediction:
"""Predict subtitution values for token masks.
**Important:** ``source`` is modified as a side-effect of this method.
Use :meth:`~zensols.nlp.TokenContainer.clone` on the ``source`` document
passed to this method to preserve the original if necessary.
:param source: the source document, sentence, or span for which to
substitute values
"""
mask_tok: PreTrainedTokenizer = self.resource.tokenizer.mask_token
fid: str = self.feature_id
fval: str = self.feature_value
# identify the masked tokens
masked_tokens: Tuple[FeatureToken] = tuple(filter(
lambda t: getattr(t, fid) == fval, source.token_iter()))
# substitute the tokenizer's token mask needed for prediction
tok: FeatureToken
for tok in masked_tokens:
tok.norm = mask_tok
# clear to force a new norm with the tokenzier mask pattern
source.clear()
df: pd.DataFrame = self._predict(source.norm)
return Prediction(source, masked_tokens, df) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/transformer/mask.py | mask.py |
__author__ = 'Paul Landes'
from typing import Tuple, Any, List
from dataclasses import dataclass, field
import sys
from io import TextIOBase
import logging
from pathlib import Path
import pandas as pd
from zensols.persist import dealloc, Stash
from zensols.config import Settings
from zensols.cli import ActionCliManager, ApplicationError
from zensols.nlp import FeatureDocument
from zensols.deeplearn import ModelError
from zensols.deeplearn.batch import Batch, DataPoint
from zensols.deeplearn.model import ModelFacade
from zensols.deeplearn.cli import FacadeApplication
from zensols.deepnlp.classify import (
LabeledFeatureDocumentDataPoint, LabeledFeatureDocument
)
logger = logging.getLogger(__name__)
@dataclass
class NLPFacadeBatchApplication(FacadeApplication):
"""A facade application for creating mini-batches for training.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'mnemonic_overrides': {'dump_batches': 'dumpbatch'}})
def _add_row(self, split_name: str, batch: Batch, dp: DataPoint):
label: str = None
text: str = None
if isinstance(dp, LabeledFeatureDocumentDataPoint):
label = dp.label
if hasattr(dp, 'doc') and isinstance(dp.doc, FeatureDocument):
doc: FeatureDocument = dp.doc
text = doc.text
if label is None and \
(isinstance(LabeledFeatureDocument) or hasattr(doc, 'label')):
label = doc.label
if label is None and hasattr(dp, 'label'):
label = dp.label
if text is None:
text = str(dp)
return (batch.id, dp.id, split_name, label, text)
def dump_batches(self):
"""Dump the batch dataset with IDs, splits, labels and text.
"""
rows: List[Any] = []
with dealloc(self.create_facade()) as facade:
self._enable_cli_logging(facade)
out_csv = Path(f'{facade.model_settings.normal_model_name}.csv')
split_name: str
ss: Stash
for split_name, ss in facade.dataset_stash.splits.items():
batch: Batch
for batch in ss.values():
dp: DataPoint
for dp in batch.data_points:
rows.append(self._add_row(split_name, batch, dp))
df = pd.DataFrame(
rows, columns='batch_id data_point_id split label text'.split())
df.to_csv(out_csv)
if logger.isEnabledFor(logging.INFO):
logger.info(f'wrote {out_csv}')
@dataclass
class NLPFacadeModelApplication(FacadeApplication):
"""A base class facade application for predicting tokens or text.
"""
CLI_META = ActionCliManager.combine_meta(
FacadeApplication,
{'mnemonic_overrides': {'predict_text': 'predtext'},
'option_overrides': {'verbose': {'long_name': 'verbose',
'short_name': None}}})
def _get_sentences(self, text_input: str) -> Tuple[str]:
def map_sents(din: TextIOBase):
return map(lambda ln: ln.strip(), sys.stdin.readlines())
if text_input == '-':
return tuple(map_sents(sys.stdin))
else:
return [text_input]
def _predict(self, facade: ModelFacade, data: Any) -> Any:
try:
return facade.predict(data)
except ModelError as e:
raise ApplicationError(
'Could not predict, probably need to train a model ' +
f'first: {e}') from e
class NLPClassifyFacadeModelApplication(NLPFacadeModelApplication):
"""A facade application for predicting text (for example sentiment
classification tasks).
"""
def predict_text(self, text_input: str, verbose: bool = False):
"""Classify ad-hoc text and output the results.
:param text_input: the sentence to classify or standard in a dash (-)
:param verbose: if given, print the long format version of the document
"""
sents = self._get_sentences(text_input)
with dealloc(self.create_facade()) as facade:
docs: Tuple[FeatureDocument] = self._predict(facade, sents)
for doc in docs:
if verbose:
doc.write()
else:
print(doc)
@dataclass
class NLPSequenceClassifyFacadeModelApplication(NLPFacadeModelApplication):
"""A facade application for predicting tokens (for example NER tasks).
"""
model_path: Path = field(default=None)
"""The path to the model or use the last trained model if not provided.
"""
def predict_text(self, text_input: str, verbose: bool = False):
"""Classify ad-hoc text and output the results.
:param text_input: the sentence to classify or standard in a dash (-)
:param verbose: if given, print the long format version of the document
"""
sents = self._get_sentences(text_input)
with dealloc(self.create_facade()) as facade:
pred: Settings = self._predict(facade, sents)
docs: Tuple[FeatureDocument] = pred.docs
classes: Tuple[str] = pred.classes
for labels, doc in zip(classes, docs):
for label, tok in zip(labels, doc.token_iter()):
print(label, tok) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/cli/app.py | app.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
import sys
from io import TextIOBase
from zensols.nlp import FeatureDocument
from zensols.deeplearn.batch import (
Batch,
BatchFeatureMapping,
ManagerFeatureMapping,
FieldFeatureMapping,
)
from zensols.deepnlp.batch import FeatureDocumentDataPoint
@dataclass
class LabeledFeatureDocument(FeatureDocument):
"""A feature document with a label, used for text classification.
"""
label: str = field(default=None)
"""The document level classification gold label."""
pred: str = field(default=None)
"""The document level prediction label.
:see: :obj:`.ClassificationPredictionMapper.pred_attribute`
"""
softmax_logit: float = field(default=None)
"""The document level softmax of the logits.
:see: :obj:`.ClassificationSoftmax_LogitictionMapper.softmax_logit_attribute`
"""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
super().write(depth, writer)
sl = self.softmax_logit
sm = sl[self.pred] if sl is not None else ''
self._write_line(f'label: {self.label}', depth + 1, writer)
self._write_line(f'prediction: {self.pred}', depth + 1, writer)
self._write_line(f'softmax logits: {sm}', depth + 1, writer)
def __str__(self) -> str:
lab = '' if self.label is None else f'label: {self.label}'
pred = ''
if self.pred is not None:
pred = f'pred={self.pred}, logit={self.softmax_logit[self.pred]}'
mid = ', ' if len(lab) > 0 and len(pred) > 0 else ''
return (f'{lab}{mid}{pred}: {self.text}')
@dataclass
class LabeledFeatureDocumentDataPoint(FeatureDocumentDataPoint):
"""A representation of a data for a reivew document containing the sentiment
polarity as the label.
"""
@property
def label(self) -> str:
"""The label for the textual data point."""
return self.doc.label
@dataclass
class LabeledBatch(Batch):
"""A batch used for labeled text, usually used for text classification. This
batch class serves as a way for very basic funcationly, but also provides
an example and template from which to desigh your own batch implementation
for your custom application.
"""
LANGUAGE_FEATURE_MANAGER_NAME = 'language_vectorizer_manager'
"""The configuration section of the definition of the
:class:`~zensols.deepnlp.vectorize.FeatureDocumentVectorizerManager`.
"""
GLOVE_50_EMBEDDING = 'glove_50_embedding'
"""The configuration section name of the glove embedding
:class:`~zensols.deepnlp.embed.GloveWordEmbedModel` class.
"""
GLOVE_300_EMBEDDING = 'glove_300_embedding'
"""The configuration section name of the glove embedding
:class:`~zensols.deepnlp.embed.GloveWordEmbedModel` class.
"""
WORD2VEC_300_EMBEDDING = 'word2vec_300_embedding'
"""The configuration section name of the the Google word2vec embedding
:class:`~zensols.deepnlp.embed.Word2VecModel` class.
"""
FASTTEXT_NEWS_300_EMBEDDING = 'fasttext_news_300_embedding'
"""The configuration section name of the fasttext news embedding
:class:`~zensols.deepnlp.embed.FastTextEmbedModel` class.
"""
FASTTEXT_CRAWL_300_EMBEDDING = 'fasttext_crawl_300_embedding'
"""The configuration section name of the fasttext crawl embedding
:class:`~zensols.deepnlp.embed.FastTextEmbedModel` class.
"""
TRANSFORMER_TRAINBLE_EMBEDDING = 'transformer_trainable_embedding'
"""The configuration section name of the BERT transformer contextual embedding
:class:`~zensols.deepnlp.transformer.TransformerEmbedding` class.
"""
TRANSFORMER_FIXED_EMBEDDING = 'transformer_fixed_embedding'
"""Like :obj:`TRANSFORMER_TRAINBLE_EMBEDDING`, but all layers of the
tranformer are frozen and only the static embeddings are used.
"""
EMBEDDING_ATTRIBUTES = {GLOVE_50_EMBEDDING,
GLOVE_300_EMBEDDING,
GLOVE_300_EMBEDDING,
WORD2VEC_300_EMBEDDING,
FASTTEXT_NEWS_300_EMBEDDING,
FASTTEXT_CRAWL_300_EMBEDDING,
TRANSFORMER_TRAINBLE_EMBEDDING,
TRANSFORMER_FIXED_EMBEDDING}
"""All embedding feature section names."""
STATS_ATTRIBUTE = 'stats'
"""The statistics feature attribute name."""
ENUMS_ATTRIBUTE = 'enums'
"""The enumeration feature attribute name."""
COUNTS_ATTRIBUTE = 'counts'
"""The feature counts attribute name."""
DEPENDENCIES_ATTRIBUTE = 'dependencies'
"""The dependency feature attribute name."""
ENUM_EXPANDER_ATTRIBUTE = 'transformer_enum_expander'
"""Expands enumerated spaCy features to transformer wordpiece alignment."""
DEPENDENCY_EXPANDER_ATTRIBTE = 'transformer_dep_expander'
"""Expands dependency tree spaCy features to transformer wordpiece alignment.
"""
LANGUAGE_ATTRIBUTES = {
STATS_ATTRIBUTE, ENUMS_ATTRIBUTE, COUNTS_ATTRIBUTE,
DEPENDENCIES_ATTRIBUTE,
ENUM_EXPANDER_ATTRIBUTE, DEPENDENCY_EXPANDER_ATTRIBTE}
"""All linguistic feature attribute names."""
MAPPINGS = BatchFeatureMapping(
'label',
[ManagerFeatureMapping(
'classify_label_vectorizer_manager',
(FieldFeatureMapping('label', 'lblabel', True),)),
ManagerFeatureMapping(
LANGUAGE_FEATURE_MANAGER_NAME,
(FieldFeatureMapping(GLOVE_50_EMBEDDING, 'wvglove50', True, 'doc'),
FieldFeatureMapping(GLOVE_300_EMBEDDING, 'wvglove300', True, 'doc'),
FieldFeatureMapping(WORD2VEC_300_EMBEDDING, 'w2v300', True, 'doc'),
FieldFeatureMapping(FASTTEXT_NEWS_300_EMBEDDING, 'wvftnews300', True, 'doc'),
FieldFeatureMapping(FASTTEXT_CRAWL_300_EMBEDDING, 'wvftcrawl300', True, 'doc'),
FieldFeatureMapping(TRANSFORMER_TRAINBLE_EMBEDDING, 'transformer_trainable', True, 'doc'),
FieldFeatureMapping(TRANSFORMER_FIXED_EMBEDDING, 'transformer_fixed', True, 'doc'),
FieldFeatureMapping(STATS_ATTRIBUTE, 'stats', False, 'doc'),
FieldFeatureMapping(ENUMS_ATTRIBUTE, 'enum', True, 'doc'),
FieldFeatureMapping(COUNTS_ATTRIBUTE, 'count', True, 'doc'),
FieldFeatureMapping(DEPENDENCIES_ATTRIBUTE, 'dep', True, 'doc'),
FieldFeatureMapping(ENUM_EXPANDER_ATTRIBUTE, 'tran_enum_expander', True, 'doc'),
FieldFeatureMapping(DEPENDENCY_EXPANDER_ATTRIBTE, 'tran_dep_expander', True, 'doc')))])
"""The mapping from the labeled data's feature attribute to feature ID and
accessor information.
"""
def _get_batch_feature_mappings(self) -> BatchFeatureMapping:
return self.MAPPINGS | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/classify/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Iterable, Any, Type
from dataclasses import dataclass, field
import logging
import pandas as pd
from zensols.persist import Stash
from zensols.deeplearn import NetworkSettings
from zensols.deeplearn.result import (
PredictionsDataFrameFactory,
SequencePredictionsDataFrameFactory,
)
from zensols.deepnlp.model import (
LanguageModelFacade, LanguageModelFacadeConfig,
)
from . import LabeledBatch
logger = logging.getLogger(__name__)
@dataclass
class ClassifyModelFacade(LanguageModelFacade):
"""A facade for the text classification. See super classes for more
information on the purprose of this class.
All the ``set_*`` methods set parameters in the model.
"""
LANGUAGE_MODEL_CONFIG = LanguageModelFacadeConfig(
manager_name=LabeledBatch.LANGUAGE_FEATURE_MANAGER_NAME,
attribs=LabeledBatch.LANGUAGE_ATTRIBUTES,
embedding_attribs=LabeledBatch.EMBEDDING_ATTRIBUTES)
"""The label model configuration constructed from the batch metadata.
:see: :class:`.LabeledBatch`
"""
def __post_init__(self, *args, **kwargs):
super().__post_init__(*args, **kwargs)
settings: NetworkSettings = self.executor.net_settings
if hasattr(settings, 'dropout'):
# set to trigger writeback through to sub settings (linear, recur)
self.dropout = self.executor.net_settings.dropout
def _configure_debug_logging(self):
super()._configure_debug_logging()
for i in ['zensols.deeplearn.layer',
'zensols.deepnlp.transformer.layer',
'zensols.deepnlp.layer',
'zensols.deepnlp.classify']:
logging.getLogger(i).setLevel(logging.DEBUG)
def _get_language_model_config(self) -> LanguageModelFacadeConfig:
return self.LANGUAGE_MODEL_CONFIG
@property
def feature_stash(self) -> Stash:
"""The stash containing feature instances."""
return super().feature_stash.delegate
def get_predictions(self, *args, **kwargs) -> pd.DataFrame:
"""Return a Pandas dataframe of the predictions with columns that include the
correct label, the prediction, the text and the length of the text of
the text.
"""
return super().get_predictions(
('text', 'len'),
lambda dp: (dp.doc.text, len(dp.doc.text)),
*args, **kwargs)
def predict(self, datas: Iterable[Any]) -> Any:
# remove expensive to load vectorizers for prediction only when we're
# not using those models
if self.config.has_option('embedding', 'deeplearn_default'):
emb_conf = self.config.get_option('embedding', 'deeplearn_default')
attrs = ('glove_300_embedding fasttext_news_300 ' +
'fasttext_crawl_300 word2vec_300_embedding').split()
for feature_attr in attrs:
if emb_conf != feature_attr:
self.remove_metadata_mapping_field(feature_attr)
return super().predict(datas)
@dataclass
class TokenClassifyModelFacade(ClassifyModelFacade):
"""A token level classification model facade.
"""
predictions_datafrmae_factory_class: Type[PredictionsDataFrameFactory] = \
field(default=SequencePredictionsDataFrameFactory)
def get_predictions(self, *args, **kwargs) -> pd.DataFrame:
"""Return a Pandas dataframe of the predictions with columns that
include the correct label, the prediction, the text and the length of
the text of the text. This uses the token norms of the document.
:see: :meth:`get_predictions_factory`
:param args: arguments passed to :meth:`get_predictions_factory`
:param kwargs: arguments passed to :meth:`get_predictions_factory`
"""
return LanguageModelFacade.get_predictions(
self,
('text',),
lambda dp: tuple(map(lambda t: (t.norm,), dp.doc.token_iter())),
*args, **kwargs) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/classify/facade.py | facade.py |
__author__ = 'Paul Landes'
from typing import Tuple, List, Iterable
from dataclasses import dataclass, field
from itertools import chain as ch
import numpy as np
from zensols.config import Settings
from zensols.nlp import FeatureSentence, FeatureDocument
from zensols.deeplearn.vectorize import CategoryEncodableFeatureVectorizer
from zensols.deeplearn.model import PredictionMapper
from zensols.deeplearn.result import ResultsContainer
from zensols.deepnlp.vectorize import FeatureDocumentVectorizerManager
from . import LabeledFeatureDocument
@dataclass
class ClassificationPredictionMapper(PredictionMapper):
"""A prediction mapper for text classification. This mapper works at any
level (document, sentence, token).
"""
vec_manager: FeatureDocumentVectorizerManager = field()
"""The vectorizer manager used to parse and get the label vectorizer."""
label_feature_id: str = field()
"""The feature ID for the label vectorizer."""
pred_attribute: str = field(default='pred')
"""The prediction attribute to set on the :class:`.FeatureDocument` returned
from :meth:`map_results`.
"""
softmax_logit_attribute: str = field(default='softmax_logit')
"""The softmax of the logits attribute to set on the :class:`.FeatureDocument`
returned from :meth:`map_results`.
:see: `On Calibration of Modern Neural Networks <https://arxiv.org/abs/1706.04599>`_
"""
def __post_init__(self):
super().__post_init__()
self._docs: List[FeatureDocument] = []
@property
def label_vectorizer(self) -> CategoryEncodableFeatureVectorizer:
"""The label vectorizer used to map classes in :meth:`get_classes`."""
return self.vec_manager[self.label_feature_id]
def _create_features(self, sent_text: str) -> Tuple[FeatureDocument]:
doc: FeatureDocument = self.vec_manager.parse(sent_text)
self._docs.append(doc)
return [doc]
def _map_classes(self, result: ResultsContainer) -> List[List[str]]:
"""Return the label string values for indexes ``nominals``.
:param nominals: the integers that map to the respective string class;
each tuple is a batch, and each item in the iterable
is a data point
:return: a list for every tuple in ``nominals``
"""
vec: CategoryEncodableFeatureVectorizer = self.label_vectorizer
nominals: List[np.ndarray] = result.batch_predictions
return list(map(lambda cl: vec.get_classes(cl).tolist(), nominals))
def map_results(self, result: ResultsContainer) -> \
Tuple[LabeledFeatureDocument]:
"""Map class predictions, logits, and documents generated during use of this
instance. Each data point is aggregated across batches.
:return: a :class:`.Settings` instance with ``classess``, ``logits``
and ``docs`` attributes
"""
class_groups: List[List[str]] = self._map_classes(result)
classes: Iterable[str] = ch.from_iterable(class_groups)
logits: Iterable[np.ndarray] = ch.from_iterable(result.batch_outputs)
docs: List[FeatureDocument] = self._docs
labels: List[str] = self.label_vectorizer.label_encoder.classes_
for cl, doc, logits in zip(classes, docs, logits):
conf = np.exp(logits) / sum(np.exp(logits))
sms = dict(zip(labels, conf))
setattr(doc, self.pred_attribute, cl)
setattr(doc, self.softmax_logit_attribute, sms)
return tuple(docs)
@dataclass
class SequencePredictionMapper(ClassificationPredictionMapper):
"""Predicts sequences as a :class:`~zensols.config.serial.Settings` with
keys `classes` as the token level predictions and `docs` containing the
parsed documents from the sentence text.
"""
def _create_features(self, sent_text: str) -> Tuple[FeatureSentence]:
doc: FeatureDocument = self.vec_manager.parse(sent_text)
self._docs.append(doc)
return doc.sents
def map_results(self, result: ResultsContainer) -> Settings:
classes = self._map_classes(result)
return Settings(classes=tuple(classes), docs=tuple(self._docs)) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/classify/pred.py | pred.py |
__author__ = 'Paul Landes'
from typing import Any
from dataclasses import dataclass, field
import logging
import torch
from zensols.deeplearn import DropoutNetworkSettings
from zensols.deeplearn.batch import Batch
from zensols.deeplearn.layer import (
DeepLinear,
DeepLinearNetworkSettings,
RecurrentAggregation,
RecurrentAggregationNetworkSettings,
)
from zensols.deepnlp.layer import (
EmbeddingNetworkSettings,
EmbeddingNetworkModule,
)
logger = logging.getLogger(__name__)
@dataclass
class ClassifyNetworkSettings(DropoutNetworkSettings, EmbeddingNetworkSettings):
"""A utility container settings class for convulsion network models. This
class also updates the recurrent network's drop out settings when changed.
"""
recurrent_settings: RecurrentAggregationNetworkSettings = field()
"""Contains the confgiuration for the models RNN."""
linear_settings: DeepLinearNetworkSettings = field()
"""Contains the configuration for the model's FF *decoder*."""
def _set_option(self, name: str, value: Any):
super()._set_option(name, value)
# propogate dropout to recurrent network
if name == 'dropout' and hasattr(self, 'recurrent_settings'):
if self.recurrent_settings is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting dropout: {value}')
self.recurrent_settings.dropout = value
self.linear_settings.dropout = value
def get_module_class_name(self) -> str:
return __name__ + '.ClassifyNetwork'
class ClassifyNetwork(EmbeddingNetworkModule):
"""A model that either allows for an RNN or a BERT transforemr to classify
text.
"""
MODULE_NAME = 'classify'
def __init__(self, net_settings: ClassifyNetworkSettings):
super().__init__(net_settings, logger)
ns = self.net_settings
rs = ns.recurrent_settings
ls = ns.linear_settings
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'embedding output size: {self.embedding_output_size}')
if rs is None:
ln_in_features = self.embedding_output_size + self.join_size
self.recur = None
else:
rs.input_size = self.embedding_output_size
self._debug(f'recur settings: {rs}')
self.recur = RecurrentAggregation(rs)
self._debug(f'embedding join size: {self.join_size}')
self.join_size += self.recur.out_features
self._debug(f'after lstm join size: {self.join_size}')
ln_in_features = self.join_size
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'linear in size: {ln_in_features}')
ls.in_features = ln_in_features
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'linear input settings: {ls}')
self.fc_deep = DeepLinear(ls, self.logger)
def _forward(self, batch: Batch) -> torch.Tensor:
if self.logger.isEnabledFor(logging.DEBUG):
self._debug('review batch:')
batch.write()
x = self.forward_embedding_features(batch)
self._shape_debug('embedding', x)
x = self.forward_token_features(batch, x)
self._shape_debug('token', x)
if self.recur is not None:
x = self.recur(x)[0]
self._shape_debug('lstm', x)
x = self.forward_document_features(batch, x)
x = self.fc_deep(x)
self._shape_debug('deep linear', x)
return x | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/classify/model.py | model.py |
__author__ = 'Paul Landes'
from typing import List, Tuple, Set, Union, Dict, Iterable
from dataclasses import dataclass, field
import logging
import sys
from functools import reduce
import torch
import numpy as np
from torch import Tensor
from zensols.deeplearn.vectorize import (
VectorizerError,
FeatureContext,
TensorFeatureContext,
SparseTensorFeatureContext,
MultiFeatureContext,
EncodableFeatureVectorizer,
OneHotEncodedEncodableFeatureVectorizer,
AggregateEncodableFeatureVectorizer,
TransformableFeatureVectorizer,
)
from zensols.nlp import (
FeatureToken, FeatureSentence, FeatureDocument, TokenContainer,
)
from ..embed import WordEmbedModel
from . import (
SpacyFeatureVectorizer, FeatureDocumentVectorizer,
TextFeatureType, MultiDocumentVectorizer,
)
logger = logging.getLogger(__name__)
@dataclass
class EnumContainerFeatureVectorizer(FeatureDocumentVectorizer):
"""Encode tokens found in the container by aggregating the spaCy vectorizers
output. The result is a concatenated binary representation of all
configured token level features for each token. This adds only token
vectorizer features generated by the spaCy vectorizers (subclasses of
:class:`.SpacyFeatureVectorizer`), and not the features themselves (such as
``is_stop`` etc).
All spaCy features are encoded given by
:obj:`~.FeatureDocumentVectorizerManager.spacy_vectorizers`.
However, only those given in :obj:`decoded_feature_ids` are produced in the
output tensor after decoding.
The motivation for encoding all, but decoding a subset of features is for
feature selection during training. This is because encoding the features
(in a sparse matrix) takes comparatively less time and space over having to
re-encode all batches.
Rows are tokens, columns intervals of features. The encoded matrix is
sparse, and decoded as a dense matrix.
:shape: (|sentences|, |sentinel tokens|, |decoded features|)
:see: :class:`.SpacyFeatureVectorizer`
"""
ATTR_EXP_META = ('decoded_feature_ids',)
DESCRIPTION = 'spacy feature vectorizer'
FEATURE_TYPE = TextFeatureType.TOKEN
decoded_feature_ids: Set[str] = field(default=None)
"""The spaCy generated features used during *only* decoding (see class docs).
Examples include ``norm``, ``ent``, ``dep``, ``tag``. When set to
``None``, use all those given in the
:obj:`~.FeatureDocumentVectorizerManager.spacy_vectorizers`.
"""
def _get_shape_with_feature_ids(self, feature_ids: Set[str]):
"""Compute the shape based on what spacy feature ids are given.
:param feature_ids: the spacy feature ids used to filter the result
"""
flen = 0
for fvec in self.manager.spacy_vectorizers.values():
if feature_ids is None or fvec.feature_id in feature_ids:
flen += fvec.shape[1]
return -1, self.token_length, flen
def _get_shape_decode(self) -> Tuple[int, int]:
"""Return the shape needed for the tensor when encoding."""
return self._get_shape_with_feature_ids(None)
def _get_shape_for_document(self, doc: FeatureDocument):
"""Return the shape of the vectorized output for the given document."""
return (len(doc.sents),
self.manager.get_token_length(doc),
self._get_shape_decode()[-1])
def _get_shape(self) -> Tuple[int, int]:
"""Compute the shape based on what spacy feature ids are given."""
return self._get_shape_with_feature_ids(self.decoded_feature_ids)
def _populate_feature_vectors(self, sent: FeatureSentence, six: int,
fvec: SpacyFeatureVectorizer, arr: Tensor,
col_start: int, col_end: int):
"""Populate ``arr`` with every feature available from the vectorizer set
defined in the manager. This fills in the corresponding vectors from
the spacy vectorizer ``fvec`` across all tokens for a column range.
"""
attr_name = fvec.feature_id
col_end = col_start + fvec.shape[1]
toks = sent.tokens[:arr.shape[1]]
for tix, tok in enumerate(toks):
val = getattr(tok, attr_name)
vec = fvec.from_spacy(val)
if vec is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding vec {fvec} for {tok}: {vec.shape}')
arr[six, tix, col_start:col_end] = vec
def _encode(self, doc: FeatureDocument) -> FeatureContext:
"""Encode tokens found in the container by aggregating the spaCy vectorizers
output.
"""
arr = self.torch_config.zeros(self._get_shape_for_document(doc))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'type array shape: {arr.shape}')
sent: FeatureSentence
for six, sent in enumerate(doc.sents):
col_start = 0
for fvec in self.manager.spacy_vectorizers.values():
col_end = col_start + fvec.shape[1]
self._populate_feature_vectors(
sent, six, fvec, arr, col_start, col_end)
col_start = col_end
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoded array shape: {arr.shape}')
return SparseTensorFeatureContext.instance(
self.feature_id, arr, self.torch_config)
def _slice_by_attributes(self, arr: Tensor) -> Tensor:
"""Create a new tensor from column based slices of the encoded tensor for each
specified feature id given in :obj:`decoded_feature_ids`.
"""
keeps = set(self.decoded_feature_ids)
col_start = 0
tensors = []
for fvec in self.manager.spacy_vectorizers.values():
col_end = col_start + fvec.shape[1]
fid = fvec.feature_id
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'type={fid}, to keep={keeps}')
if fid in keeps:
tensors.append(arr[:, :, col_start:col_end])
keeps.remove(fid)
col_start = col_end
if len(keeps) > 0:
raise VectorizerError(f'Unknown feature type IDs: {keeps}')
sarr = torch.cat(tensors, dim=2)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'slice dim: {sarr.shape}')
return sarr
def to_symbols(self, tensor: Tensor) -> List[List[Dict[str, float]]]:
"""Reverse map the tensor to spaCy features.
:return: a list of sentences, each with a list of tokens, each having a
map of name/count pairs
"""
sents = []
for six in range(tensor.size(0)):
toks = []
sents.append(toks)
for tix in range(tensor.size(1)):
col_start = 0
by_fid = {}
toks.append(by_fid)
for fvec in self.manager.spacy_vectorizers.values():
col_end = col_start + fvec.shape[1]
fid = fvec.feature_id
vec = tensor[six, tix, col_start:col_end]
cnts = dict(filter(lambda x: x[1] > 0,
zip(fvec.as_list, vec.tolist())))
by_fid[fid] = cnts
col_start = col_end
return sents
def _decode(self, context: FeatureContext) -> Tensor:
arr = super()._decode(context)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded features: {self.decoded_feature_ids}, ' +
f'shape: {arr.shape}')
self._assert_decoded_doc_dim(arr, 3)
if self.decoded_feature_ids is not None:
arr = self._slice_by_attributes(arr)
return arr
@dataclass
class CountEnumContainerFeatureVectorizer(FeatureDocumentVectorizer):
"""Vectorize the counts of parsed spaCy features. This generates the count of
tokens as a S X M * N tensor where S is the number of sentences, M is the
number of token feature ids and N is the number of columns of the output of
the :class:`.SpacyFeatureVectorizer` vectorizer. Each column position's
count represents the number of counts for that spacy symol for that index
position in the output of :class:`.SpacyFeatureVectorizer`.
This class uses the same efficiency in decoding features given in
:class:`.EnumContainerFeatureVectorizer`.
:shape: (|sentences|, |decoded features|)
"""
ATTR_EXP_META = ('decoded_feature_ids',)
DESCRIPTION = 'token level feature counts'
FEATURE_TYPE = TextFeatureType.DOCUMENT
decoded_feature_ids: Set[str] = field(default=None)
def _get_shape(self) -> Tuple[int, int]:
"""Compute the shape based on what spacy feature ids are given.
"""
feature_ids = self.decoded_feature_ids
flen = 0
for fvec in self.manager.spacy_vectorizers.values():
if feature_ids is None or fvec.feature_id in feature_ids:
flen += fvec.shape[1]
return -1, flen
def get_feature_counts(self, sent: FeatureSentence,
fvec: SpacyFeatureVectorizer) -> Tensor:
"""Return the count of all tokens as a S X N tensor where S is the number of
sentences, N is the columns of the ``fvec`` vectorizer. Each column
position's count represents the number of counts for that spacy symol
for that index position in the ``fvec``.
"""
fid = fvec.feature_id
fcounts = self.torch_config.zeros(fvec.shape[1])
for tok in sent.tokens:
val = getattr(tok, fid)
fnid = fvec.id_from_spacy(val, -1)
if fnid > -1:
fcounts[fnid] += 1
return fcounts
def _encode(self, doc: FeatureDocument) -> FeatureContext:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding doc: {doc}')
sent_arrs = []
for sent in doc.sents:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding sentence: {sent}')
tok_arrs = []
for fvec in self.manager.spacy_vectorizers.values():
cnts: Tensor = self.get_feature_counts(sent, fvec)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding with {fvec}')
tok_arrs.append(cnts)
sent_arrs.append(torch.cat(tok_arrs))
arr = torch.stack(sent_arrs)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoded shape: {arr.shape}')
return SparseTensorFeatureContext.instance(
self.feature_id, arr, self.torch_config)
def _slice_by_attributes(self, arr: Tensor) -> Tensor:
"""Create a new tensor from column based slices of the encoded tensor for each
specified feature id given in :obj:`decoded_feature_ids`.
"""
keeps = set(self.decoded_feature_ids)
col_start = 0
tensors = []
for fvec in self.manager.spacy_vectorizers.values():
col_end = col_start + fvec.shape[1]
fid = fvec.feature_id
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'type={fid}, to keep={keeps}')
if fid in keeps:
keep_vec = arr[:, col_start:col_end]
tensors.append(keep_vec)
keeps.remove(fid)
col_start = col_end
if len(keeps) > 0:
raise VectorizerError(f'Unknown feature type IDs: {keeps}')
sarr = torch.cat(tensors, dim=1)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'slice dim: {sarr.shape}')
return sarr
def to_symbols(self, tensor: Tensor) -> List[Dict[str, float]]:
"""Reverse map the tensor to spaCy features.
:return: a list of sentences, each a map of name/count pairs.
"""
sents = []
for six in range(tensor.size(0)):
col_start = 0
by_fid = {}
sents.append(by_fid)
arr = tensor[six]
for fvec in self.manager.spacy_vectorizers.values():
col_end = col_start + fvec.shape[1]
fid = fvec.feature_id
vec = arr[col_start:col_end]
cnts = dict(filter(lambda x: x[1] > 0,
zip(fvec.as_list, vec.tolist())))
by_fid[fid] = cnts
col_start = col_end
return sents
def _decode(self, context: FeatureContext) -> Tensor:
arr = super()._decode(context)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded features: {self.decoded_feature_ids}, ' +
f'shape: {arr.shape}')
if self.decoded_feature_ids is not None:
arr = self._slice_by_attributes(arr)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded shape: {arr.shape}')
return arr
@dataclass
class DepthFeatureDocumentVectorizer(FeatureDocumentVectorizer):
"""Generate the depths of tokens based on how deep they are in a head
dependency tree.
Even though this is a document level vectorizer and is usually added in a
join layer rather than stacked on to the embedded layer, it still assumes
congruence with the token length, which is used in its shape.
**Important**: do not combine sentences in to a single document with
:meth:`~zensols.nlp.container.FeatureDocument.combine_sentences` since
features are created as a dependency parse tree at the sentence level.
Otherwise, the dependency relations are broken and results in a zeored
tensor.
:shape: (|sentences|, |sentinel tokens|, 1)
"""
DESCRIPTION = 'head depth'
FEATURE_TYPE = TextFeatureType.TOKEN
def _get_shape(self) -> Tuple[int, int]:
return -1, self.token_length, 1
def encode(self, doc: Union[Tuple[FeatureDocument], FeatureDocument]) -> \
FeatureContext:
ctx: TensorFeatureContext
if isinstance(doc, (tuple, list)):
self._assert_doc(doc)
docs = doc
comb_doc = FeatureDocument.combine_documents(docs)
n_toks = self.manager.get_token_length(comb_doc)
arrs = tuple(map(lambda d:
self._encode_doc(d.combine_sentences(), n_toks),
docs))
arr = torch.cat(arrs, dim=0)
arr = arr.unsqueeze(-1)
ctx = SparseTensorFeatureContext.instance(
self.feature_id, arr, self.torch_config)
else:
ctx = super().encode(doc)
return ctx
def _encode(self, doc: FeatureDocument) -> FeatureContext:
n_toks = self.manager.get_token_length(doc)
arr = self._encode_doc(doc, n_toks)
arr = arr.unsqueeze(-1)
return SparseTensorFeatureContext.instance(
self.feature_id, arr, self.torch_config)
def _encode_doc(self, doc: FeatureDocument, n_toks: int) -> Tensor:
n_sents = len(doc.sents)
arr = self.torch_config.zeros((n_sents, n_toks))
u_doc = doc.uncombine_sentences()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding doc: {len(doc)}/{len(u_doc)}: {doc}')
# if the doc is combined as several sentences concatenated in one, un
# pack and write all features in one row
if len(doc) != len(u_doc):
soff = 0
for sent in u_doc.sents:
self._transform_sent(sent, arr, 0, soff, n_toks)
soff += len(sent)
else:
# otherwise, each row is a separate sentence
for six, sent in enumerate(doc.sents):
self._transform_sent(sent, arr, six, 0, n_toks)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoded shape: {arr.shape}')
return arr
def _transform_sent(self, sent: FeatureSentence, arr: Tensor,
six: int, soff: int, slen: int):
head_depths = self._get_head_depth(sent)
for tix, tok, depth in head_depths:
off = tix + soff
val = 1. / depth
in_range = (off < slen)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting ({six}, {off}) = {val}: set={in_range}')
if in_range:
arr[six, off] = val
def _dep_branch(self, node: FeatureToken, toks: Tuple[FeatureToken],
tid_to_idx: Dict[int, int], depth: int,
depths: Dict[int, int]) -> \
Dict[FeatureToken, List[FeatureToken]]:
idx = tid_to_idx.get(node.i)
if idx is not None:
depths[idx] = depth
for c in node.children:
cix = tid_to_idx.get(c)
if cix is not None:
child = toks[cix]
self._dep_branch(child, toks, tid_to_idx, depth + 1, depths)
def _get_head_depth(self, sent: FeatureSentence) -> \
Tuple[Tuple[int, FeatureToken, int]]:
"""Calculate the depth of tokens in a sentence.
:param sent: the sentence that has the tokens to get depts
:return: a tuple of (sentence token index, token, depth)
"""
tid_to_idx: Dict[int, int] = {}
toks = sent.tokens
for i, tok in enumerate(toks):
tid_to_idx[tok.i] = i
if logger.isEnabledFor(logging.DEBUG):
logger.debug('|'.join(
map(lambda t: f'{tid_to_idx[t.i]}:{t.i}:{t.text}({t.dep_})',
sent.token_iter())))
logger.debug(f'tree: {sent.dependency_tree}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'tokens: {toks}')
root = tuple(
filter(lambda t: t.dep_ == 'ROOT' and not t.is_punctuation, toks))
if len(root) == 1:
root = root[0]
tree = {tid_to_idx[root.i]: 0}
self._dep_branch(root, toks, tid_to_idx, 1, tree)
return map(lambda x: (x[0], toks[x[0]], x[1]), tree.items())
else:
return ()
@dataclass
class OneHotEncodedFeatureDocumentVectorizer(
FeatureDocumentVectorizer, OneHotEncodedEncodableFeatureVectorizer):
"""Vectorize nominal enumerated features in to a one-hot encoded vectors.
The feature is taken from a :class:`~zensols.nlp.FeatureToken`. If
:obj:`level` is ``token`` then the features are token attributes identified
by :obj:`feature_attribute`. If the :obj:`level` is ``document`` feature is
taken from the document.
:shape:
* level = document: (1, |categories|)
* level = token: (|<sentences>|, |<sentinel tokens>|, |categories|)
"""
DESCRIPTION = 'encoded feature document vectorizer'
feature_attribute: Tuple[str] = field(default=None)
"""The feature attributes to vectorize."""
level: str = field(default='token')
"""The level at which to take the attribute value, which is ``document``,
``sentence`` or ``token``.
"""
def __post_init__(self):
super().__post_init__()
self.optimize_bools = False
@property
def feature_type(self) -> TextFeatureType:
return {'document': TextFeatureType.DOCUMENT,
'token': TextFeatureType.TOKEN,
}[self.level]
def _get_shape(self) -> Tuple[int, int]:
if self.level == 'document':
return -1, super()._get_shape()[1]
else:
return -1, self.token_length, super()._get_shape()[1]
def _encode(self, doc: FeatureDocument) -> FeatureContext:
attr = self.feature_attribute
if self.level == 'document':
arr = self.torch_config.zeros((1, self.shape[1]))
feats = [getattr(doc, attr)]
self._encode_cats(feats, arr)
elif self.level == 'token':
# not tested
tlen = self.manager.get_token_length(doc)
arr = self.torch_config.zeros((len(doc), tlen, self.shape[2]))
for six, sent in enumerate(doc.sents):
feats = tuple(map(lambda s: getattr(s, attr), sent))
self._encode_cats(feats, arr[six])
else:
raise VectorizerError(f'Unknown doc level: {self.level}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'vectorized: {len(doc)} sents in to {arr.shape}')
return SparseTensorFeatureContext.instance(
self.feature_id, arr, self.torch_config)
@dataclass
class TokenEmbeddingFeatureVectorizer(
AggregateEncodableFeatureVectorizer, FeatureDocumentVectorizer):
"""A :class:`~zensols.deepnlp.vectorize.AggregateEncodableFeatureVectorizer`
that is useful for token level classification (i.e. NER). It uses a
delegate to first vectorizer the features, then concatenates in to one
aggregate.
In shape terms, this takes the single sentence position. The additional
unsqueezed dimensions set with :obj:`n_unsqueeze` is useful when the
delegate vectorizer encodes booleans or any other value that does not take
an additional dimension.
:shape: (1, |tokens|, <delegate vectorizer shape>[, <unsqueeze dimensions])
"""
DESCRIPTION = 'token aggregate vectorizer'
level: TextFeatureType = field(default=TextFeatureType.TOKEN)
"""The level at which to take the attribute value, which is ``document``,
``sentence`` or ``token``.
"""
add_dims: int = field(default=0)
"""The number of dimensions to add (see class docs)."""
def _get_shape(self):
dim = [1]
dim.extend(super()._get_shape())
dim.extend([1] * self.add_dims)
return tuple(dim)
@property
def feature_type(self) -> TextFeatureType:
return self.level
def encode(self, doc: Union[Tuple[FeatureDocument], FeatureDocument]) -> \
FeatureContext:
return TransformableFeatureVectorizer.encode(self, doc)
def _decode(self, context: MultiFeatureContext) -> Tensor:
tensor: Tensor = super()._decode(context)
for _ in range(self.add_dims):
return tensor.unsqueeze(-1)
return tensor
@dataclass
class StatisticsFeatureDocumentVectorizer(FeatureDocumentVectorizer):
"""Vectorizes basic surface language statics which include:
* character count
* token count
* min token length in characters
* max token length in characters
* average token length in characters (|characters| / |tokens|)
* sentence count (for FeatureDocuments)
* average sentence length (|tokens| / |sentences|)
* min sentence length
* max sentence length
:shape: (1, 9,)
"""
DESCRIPTION = 'statistics'
FEATURE_TYPE = TextFeatureType.DOCUMENT
def _get_shape(self) -> Tuple[int, int]:
return -1, 9
def _encode(self, doc: FeatureDocument) -> FeatureContext:
n_toks = len(doc.tokens)
n_sents = 1
min_tlen = sys.maxsize
max_tlen = 0
ave_tlen = 1
min_slen = sys.maxsize
max_slen = 0
ave_slen = 1
n_char = 0
for t in doc.tokens:
tlen = len(t.norm)
n_char += tlen
min_tlen = min(min_tlen, tlen)
max_tlen = max(max_tlen, tlen)
ave_tlen = n_char / n_toks
if isinstance(doc, FeatureDocument):
n_sents = len(doc.sents)
ave_slen = n_toks / n_sents
for s in doc.sents:
slen = len(s.tokens)
min_slen = min(min_slen, slen)
max_slen = max(max_slen, slen)
stats = (n_char, n_toks, min_tlen, max_tlen, ave_tlen,
n_sents, ave_slen, min_slen, max_slen)
arr = self.torch_config.from_iterable(stats).unsqueeze(0)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'array shape: {arr.shape}')
return TensorFeatureContext(self.feature_id, arr)
@dataclass
class OverlappingFeatureDocumentVectorizer(MultiDocumentVectorizer):
"""Vectorize the number of normalized and lemmatized tokens (in this order)
across multiple documents.
The input to this feature vectorizer are a tuple N of
:class:`.FeatureDocument` instances.
:shape: (2,)
"""
DESCRIPTION = 'overlapping token counts'
def _get_shape(self) -> Tuple[int, int]:
return 2,
@staticmethod
def _norms(ac: TokenContainer, bc: TokenContainer) -> Tuple[int]:
a = set(map(lambda s: s.norm.lower(), ac.token_iter()))
b = set(map(lambda s: s.norm.lower(), bc.token_iter()))
return a & b
@staticmethod
def _lemmas(ac: TokenContainer, bc: TokenContainer) -> Tuple[int]:
a = set(map(lambda s: s.lemma_.lower(), ac.token_iter()))
b = set(map(lambda s: s.lemma_.lower(), bc.token_iter()))
return a & b
def _encode(self, docs: Tuple[FeatureDocument]) -> FeatureContext:
norms = reduce(self._norms, docs)
lemmas = reduce(self._lemmas, docs)
arr = self.torch_config.from_iterable((len(norms), len(lemmas)))
return TensorFeatureContext(self.feature_id, arr)
@dataclass
class MutualFeaturesContainerFeatureVectorizer(MultiDocumentVectorizer):
"""Vectorize the shared count of all tokens as a S X M * N tensor, where S is
the number of sentences, M is the number of token feature ids and N is the
columns of the output of the :class:`.SpacyFeatureVectorizer` vectorizer.
This uses an instance of :class:`CountEnumContainerFeatureVectorizer` to
compute across each spacy feature and then sums them up for only those
features shared. If at least one shared document has a zero count, the
features is zeroed.
The input to this feature vectorizer are a tuple of N
:class:`.TokenContainer` instances.
:shape: (|sentences|, |decoded features|,) from the referenced
:class:`CountEnumContainerFeatureVectorizer` given by
:obj:`count_vectorizer_feature_id`
"""
DESCRIPTION = 'mutual feature counts'
count_vectorizer_feature_id: str = field()
"""The string feature ID configured in the
:class:`.FeatureDocumentVectorizerManager` of the
:class:`CountEnumContainerFeatureVectorizer` to use for the count features.
"""
@property
def count_vectorizer(self) -> CountEnumContainerFeatureVectorizer:
"""Return the count vectorizer used for the count features.
:see: :obj:`count_vectorizer_feature_id`
"""
return self.manager[self.count_vectorizer_feature_id]
@property
def ones(self) -> Tensor:
"""Return a tensor of ones for the shape of this instance.
"""
return self.torch_config.ones((1, self.shape[1]))
def _get_shape(self) -> Tuple[int, int]:
return -1, self.count_vectorizer.shape[1]
def _encode(self, docs: Tuple[FeatureDocument]) -> FeatureContext:
ctxs = tuple(map(self.count_vectorizer.encode,
map(lambda doc: doc.combine_sentences(), docs)))
return MultiFeatureContext(self.feature_id, ctxs)
def _decode(self, context: MultiFeatureContext) -> Tensor:
def decode_context(ctx):
sents = self.count_vectorizer.decode(ctx)
return torch.sum(sents, axis=0)
ones = self.ones
arrs = tuple(map(decode_context, context.contexts))
if len(arrs) == 1:
# return the single document as a mutual count against itself
return arrs[0]
else:
arrs = torch.stack(arrs, axis=0).squeeze(1)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'combined counts (doc/row): {arrs.shape}')
# clone so the operations of this vectorizer do not effect the
# tensors from the delegate count vectorizer
cnts = self.torch_config.clone(arrs)
# multiple counts of all docs so any 0 count feature will be 0 in
# the mask
prod = cnts.prod(axis=0).unsqueeze(0)
# create 2 X N with count product with ones
cat_ones = torch.cat((prod, ones))
# keep 0s for no count features or 1 if there is at least one for
# the mask
mask = torch.min(cat_ones, axis=0)[0]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'counts mask: {cat_ones.shape}')
# use the mask to zero out counts that aren't mutual across all
# documents, then sum the counts across docuemnts
return (cnts * mask).sum(axis=0).unsqueeze(0)
@dataclass
class WordEmbeddingFeatureVectorizer(EncodableFeatureVectorizer):
"""Vectorizes string tokens in to word embedded vectors. This class works
directly with the string tokens rather than
:class:`~zensols.nlp.FeatureDocument` instances. It can be useful when
there's a need to vectorize tokens outside of a feature document
(i.e. ``cui2vec``).
"""
FEATURE_TYPE = TextFeatureType.EMBEDDING
DESCRIPTION = 'word embedding encoder'
embed_model: WordEmbedModel = field()
"""The word embedding model that has the string tokens to vector mapping."""
def _get_shape(self):
return (-1, self.embed_model.vector_dimension)
def _encode(self, keys: Iterable[str]) -> FeatureContext:
em: WordEmbedModel = self.embed_model
vecs: np.ndarray = tuple(map(lambda k: em.get(k), keys))
arr: np.ndarray = np.stack(vecs)
return TensorFeatureContext(self.feature_id, torch.from_numpy(arr)) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/vectorize/vectorizers.py | vectorizers.py |
__author__ = 'Paul Landes'
from typing import Tuple, Iterable, List, Union
from dataclasses import dataclass, field
import logging
from itertools import chain
import torch
from torch import Tensor
from zensols.config import Dictable
from zensols.persist import persisted, Primeable
from zensols.deeplearn.vectorize import FeatureContext, TensorFeatureContext
from zensols.nlp import FeatureToken, FeatureDocument, FeatureSentence
from zensols.deepnlp.embed import WordEmbedModel
from zensols.deepnlp.vectorize import TextFeatureType
from . import FoldingDocumentVectorizer
logger = logging.getLogger(__name__)
@dataclass
class EmbeddingFeatureVectorizer(FoldingDocumentVectorizer,
Primeable, Dictable):
"""Vectorize a :class:`~zensols.nlp.container.FeatureDocument` as a vector of
embedding indexes. Later, these indexes are used in a
:class:`~zensols.deepnlp.layer.embed.EmbeddingLayer` to create the input
word embedding during execution of the model.
"""
embed_model: Union[WordEmbedModel, 'TransformerEmbedding'] = field()
"""The word vector model.
Types for this value include:
* :class:`~znesols.deepnlp.embed.domain.WordEmbedModel`
* :class:`~zensols.deepnlp.transformer.embed.TransformerEmbedding`
"""
decode_embedding: bool = field(default=False)
"""Whether or not to decode the embedding during the decode phase, which is
helpful when caching batches; otherwise, the data is decoded from indexes
to embeddings each epoch.
Note that this option and functionality can not be obviated by that
implemented with the :obj:`encode_transformed` attribute. The difference
is over whether or not more work is done on during decoding rather than
encoding. An example of when this is useful is for large word embeddings
(i.e. Google 300D pretrained) where the the index to tensor embedding
transform is done while decoding rather than in the `forward` so it's not
done for every epoch.
"""
def _get_shape(self) -> Tuple[int, int]:
return self.manager.token_length, self.embed_model.vector_dimension
def prime(self):
if isinstance(self.embed_model, Primeable):
self.embed_model.prime()
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return chain.from_iterable(
[super()._get_dictable_attributes(), [('model', 'embed_model')]])
@dataclass
class WordVectorEmbeddingFeatureVectorizer(EmbeddingFeatureVectorizer):
"""Vectorize sentences using an embedding model (:obj:`embed_model`) of type
:class:`.WordEmbedModel`.
The encoder returns the indicies of the word embedding for each token in
the input :class:`.FeatureDocument`. The decoder returns the corresponding
word embedding vectors if :obj:`decode_embedding` is ``True``. Otherwise
it returns the same indicies, which later used by the embedding layer
(usually :class:`~zensols.deepnlp.layer.EmbeddingLayer`).
"""
DESCRIPTION = 'word vector document embedding'
FEATURE_TYPE = TextFeatureType.EMBEDDING
token_feature_id: str = field(default='norm')
"""The :class:`~zensols.nlp.FeatureToken` attribute used to index the
embedding vectors.
"""
def _encode(self, doc: FeatureDocument) -> FeatureContext:
emodel = self.embed_model
tw: int = self.manager.get_token_length(doc)
sents: Tuple[FeatureSentence] = doc.sents
shape: Tuple[int, int] = (len(sents), tw)
tfid: str = self.token_feature_id
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using token length: {tw} with shape: {shape}, ' +
f'sents: {len(sents)}')
arr = self.torch_config.empty(shape, dtype=torch.long)
row: int
sent: FeatureSentence
for row, sent in enumerate(sents):
tokens: List[FeatureToken] = sent.tokens[0:tw]
slen: int = len(tokens)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'row: {row}, ' + 'toks: ' +
' '.join(map(lambda x: x.norm, tokens)))
tokens = list(map(lambda t: getattr(t, tfid), tokens))
if slen < tw:
tokens += [WordEmbedModel.ZERO] * (tw - slen)
for i, tok in enumerate(tokens):
arr[row][i] = emodel.word2idx_or_unk(tok)
return TensorFeatureContext(self.feature_id, arr)
@property
@persisted('_vectors')
def vectors(self) -> Tensor:
embed_model: WordEmbedModel = self.embed_model
return embed_model.to_matrix(self.torch_config)
def _decode(self, context: FeatureContext) -> Tensor:
x: Tensor = super()._decode(context)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'indexes: {x.shape} ({x.dtype}), ' +
f'will decode in vectorizer: {self.decode_embedding}')
if self.decode_embedding:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoding using: {self.decode_embedding}')
src_vecs: Tensor = self.vectors
batches: List[Tensor] = []
vecs = []
for batch_idx in x:
for idxt in batch_idx:
vecs.append(src_vecs[idxt])
batches.append(torch.stack(vecs))
vecs.clear()
x = torch.stack(batches)
return x | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/vectorize/embed.py | embed.py |
__author__ = 'Paul Landes'
from typing import List, Union, Set, Dict, Tuple, Sequence
from dataclasses import dataclass, field
from enum import Enum, auto
from abc import abstractmethod, ABCMeta
import logging
import collections
import torch
from torch import Tensor
from zensols.persist import persisted, PersistedWork
from zensols.deeplearn.vectorize import (
FeatureContext,
FeatureVectorizerManager,
VectorizerError,
TransformableFeatureVectorizer,
MultiFeatureContext,
)
from zensols.nlp import FeatureSentence, FeatureDocument, FeatureDocumentParser
from . import SpacyFeatureVectorizer
logger = logging.getLogger(__name__)
class TextFeatureType(Enum):
"""The type of :class:`.FeatureDocumentVectorizer`.
"""
TOKEN = auto()
"""Token level with a shape congruent with the number of tokens, typically
concatenated with the embedding layer.
"""
DOCUMENT = auto()
"""Document level, typically added to a join layer."""
MULTI_DOCUMENT = auto()
""""Multiple documents for the purposes of aggregating shared features."""
EMBEDDING = auto()
"""Embedding layer, typically used as the input layer."""
NONE = auto()
"""Other type, which tells the framework to ignore the vectorized features.
:see: :class:`~zensols.deepnlp.layer.embed.EmbeddingNetworkModule`
"""
@dataclass
class FeatureDocumentVectorizer(TransformableFeatureVectorizer,
metaclass=ABCMeta):
"""Creates document or sentence level features using instances of
:class:`.TokenContainer`.
Subclasses implement specific vectorization on a single document using
:meth:`_encode`, and it is up to the subclass to decide how to vectorize
the document.
Multiple documents as an aggregrate given as a list or tuple of documents
is supported. Only the document level vectorization is supported to
provide one standard contract across framework components and vectorizers.
If more than one document is given during encoding it and will be combined
in to one document as described using an
:obj:`.FoldingDocumentVectorizer.encoding_level` = ``concat_tokens``.
:see: :class:`.FoldingDocumentVectorizer`
"""
@abstractmethod
def _encode(self, doc: FeatureDocument) -> FeatureContext:
pass
def _is_mult(self, doc: Union[Tuple[FeatureDocument], FeatureDocument]) \
-> bool:
"""Return ``True`` or not the input is a tuple (multiple) documents."""
return isinstance(doc, (Tuple, List))
def _is_doc(self, doc: Union[Tuple[FeatureDocument], FeatureDocument]):
"""Return whether ``doc`` is a :class:`.FeatureDocument`."""
if self._is_mult(doc):
docs = doc
for doc in docs:
if not self._is_doc(doc):
return False
elif not isinstance(doc, FeatureDocument):
return False
return True
def _combine_documents(self, docs: Tuple[FeatureDocument]) -> \
FeatureDocument:
return FeatureDocument.combine_documents(docs)
def encode(self, doc: Union[Tuple[FeatureDocument], FeatureDocument]) -> \
FeatureContext:
"""Encode by combining documents in to one monolithic document when a tuple is
passed, otherwise default to the super class's encode functionality.
"""
self._assert_doc(doc)
if self._is_mult(doc):
doc = self._combine_documents(doc)
return super().encode(doc)
def _assert_doc(self, doc: Union[Tuple[FeatureDocument], FeatureDocument]):
if not self._is_doc(doc):
raise VectorizerError(
f'Expecting FeatureDocument, but got type: {type(doc)}')
def _assert_decoded_doc_dim(self, arr: Tensor, expect: int):
"""Check the decoded document dimesion and rase an error for those that do not
match.
"""
if len(arr.size()) != expect:
raise VectorizerError(f'Expecting {expect} tensor dimensions, ' +
f'but got shape: {arr.shape}')
@property
def feature_type(self) -> TextFeatureType:
"""The type of feature this vectorizer generates. This is used by classes such
as :class:`~zensols.deepnlp.layer.EmbeddingNetworkModule` to determine
where to add the features, such as concating to the embedding layer,
join layer etc.
"""
return self.FEATURE_TYPE
@property
def token_length(self) -> int:
"""The number of token features (if token level) generated."""
return self.manager.token_length
def __str__(self):
return (f'{super().__str__()}, ' +
f'feature type: {self.feature_type.name} ')
@dataclass
class FoldingDocumentVectorizer(FeatureDocumentVectorizer, metaclass=ABCMeta):
"""This class is like :class:`.FeatureDocumentVectorizer`, but provides more
options in how to fold multiple documents in a single document for
vectorization.
Based on the value of :obj:`fold_method`, this class encodes a sequence of
:class:`~zensols.nlp.container.FeatureDocument` instances differently.
Subclasses must implement :meth:`_encode`.
*Note*: this is not to be confused with the
:class:`.MultiDocumentVectorizer` vectorizer, which vectorizes multiple
documents in to document level features.
"""
_FOLD_METHODS = frozenset('raise concat_tokens sentence separate'.split())
fold_method: str = field()
"""How multiple documents are merged in to a single document for vectorization,
which is one of:
* ``raise``: raise an error allowing only single documents to be
vectorized
* ``concat_tokens``: concatenate tokens of each document in to
singleton sentence documents; uses
:meth:`~zensols.nlp.container.FeatureDocument.combine_documents` with
``concat_tokens = True``
* ``sentence``: all sentences of all documents become singleton
sentence documents; uses
:meth:`~zensols.nlp.container.FeatureDocument.combine_documents` with
``concat_tokens = False``
* ``separate``: every sentence of each document is encoded separately,
then the each sentence output is concatenated as the respsective
document during decoding; this uses the :meth:`_encode` for each
sentence of each document and :meth:`_decode` to decode back in to
the same represented document structure as the original
"""
def __post_init__(self):
super().__post_init__()
if self.fold_method not in self._FOLD_METHODS:
raise VectorizerError(f'No such fold method: {self.fold_method}')
def _combine_documents(self, docs: Tuple[FeatureDocument]) -> \
FeatureDocument:
if self.fold_method == 'raise' and len(docs) > 1:
raise VectorizerError(
f'Configured to support single document but got {len(docs)}')
concat_tokens = self.fold_method == 'concat_tokens'
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'foldl method: {self.fold_method}, ' +
f'concat_tokens={concat_tokens}')
return FeatureDocument.combine_documents(
docs, concat_tokens=concat_tokens)
def _encode_sentence(self, sent: FeatureSentence) -> FeatureContext:
"""Encode a single sentence document.
"""
sent_doc: FeatureDocument = sent.to_document()
return super().encode(sent_doc)
def _encode_sentences(self, doc: FeatureDocument) -> FeatureContext:
docs: Sequence[FeatureDocument] = doc if self._is_mult(doc) else [doc]
doc_ctxs: List[List[FeatureContext]] = []
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoding {len(docs)} documents')
# iterate over each document passed (usually as an aggregate from the
# batch framework)
doc: FeatureDocument
for doc in docs:
sent_ctxs: List[FeatureContext] = []
# concatenate each encoded sentence to become the document
sent: FeatureSentence
for sent in doc.sents:
ctx = self._encode_sentence(sent)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'encoded {sent}: {ctx}')
sent_ctxs.append(ctx)
# add the multi-context of the sentences
doc_ctxs.append(MultiFeatureContext(
feature_id=None, contexts=tuple(sent_ctxs)))
return MultiFeatureContext(self.feature_id, tuple(doc_ctxs))
def encode(self, doc: Union[Tuple[FeatureDocument], FeatureDocument]) -> \
FeatureContext:
ctx: FeatureContext
if self.fold_method == 'concat_tokens' or \
self.fold_method == 'sentence':
ctx = super().encode(doc)
elif self.fold_method == 'separate':
self._assert_doc(doc)
ctx = self._encode_sentences(doc)
elif self.fold_method == 'raise':
if self._is_mult(doc):
raise VectorizerError(
f'Expecting single document but got: {len(doc)} documents')
ctx = super().encode(doc)
return ctx
def _create_decoded_pad(self, shape: Tuple[int]) -> Tensor:
return self.torch_config.zeros(shape)
def _decode_sentence(self, sent_ctx: FeatureContext) -> Tensor:
return super().decode(sent_ctx)
def _decode_sentences(self, context: MultiFeatureContext,
sent_dim: int = 1) -> Tensor:
darrs: List[Tensor] = []
# each multi-context represents a document with sentence context
# elements
doc_ctx: Tuple[MultiFeatureContext]
for doc_ctx in context.contexts:
sent_arrs: List[Tensor] = []
# decode each sentence and track their decoded tensors for later
# concatenation
sent_ctx: FeatureContext
for sent_ctx in doc_ctx.contexts:
arr = self._decode_sentence(sent_ctx)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded sub context: {sent_ctx} ' +
f'-> {arr.size()}')
sent_arrs.append(arr)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'concat {len(sent_arrs)} along dim {sent_dim}')
# concat all sentences for this document in to one long vector with
# shape (batch, |tokens|, transformer dim)
sent_arr: Tensor = torch.cat(sent_arrs, dim=sent_dim)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sentence cat: {sent_arr.size()}')
darrs.append(sent_arr)
# create document array of shape (batch, |tokens|, transformer dim) by
# first finding the longest document token count
max_sent_len = max(map(lambda t: t.size(sent_dim), darrs))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'max sent len: {max_sent_len}')
arr = self._create_decoded_pad((
len(context.contexts),
max_sent_len,
darrs[0][0].size(-1)))
# copy over each document (from sentence concats) to the decoded tensor
for dix, doc_arr in enumerate(darrs):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sent array: {doc_arr.shape}')
arr[dix, :doc_arr.size(1), :] = doc_arr
n_squeeze = len(arr.shape) - len(self.shape)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'squeezing {n_squeeze}, {arr.shape} -> {self.shape}')
for _ in range(n_squeeze):
arr = arr.squeeze(dim=-1)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'vectorized shape: {arr.shape}')
return arr
def decode(self, context: FeatureContext) -> Tensor:
arr: Tensor
if self.fold_method == 'separate':
arr = self._decode_sentences(context)
else:
arr = super().decode(context)
return arr
@dataclass
class MultiDocumentVectorizer(FeatureDocumentVectorizer, metaclass=ABCMeta):
"""Vectorizes multiple documents into document level features. Features
generated by subclasses are sometimes used in join layers. Examples
include :class:`.OverlappingFeatureDocumentVectorizer`.
This is not to be confused with :class:`.FoldingDocumentVectorizer`, which
merges multiple documents in to a single document for vectorization.
"""
FEATURE_TYPE = TextFeatureType.DOCUMENT
def encode(self, docs: Tuple[FeatureDocument]) -> FeatureContext:
return self._encode(docs)
@dataclass
class FeatureDocumentVectorizerManager(FeatureVectorizerManager):
"""Creates and manages instances of :class:`.FeatureDocumentVectorizer`
and parses text in to feature based document.
This is used to manage the relationship of a given set of parsed features
keeping in mind that parsing will usually happen as a preprocessing step.
A second step is the vectorization of those features, which can be any
proper subset of those features parsed in the previous step. However,
these checks, of course, are not necessary if pickling isn't used across
the parse and vectorization steps.
Instances can set a hard fixed token length, but which vectorized tensors
have a like fixed width based on the setting of :obj:`token_length`.
However, this can also be set to use the longest sentence of the document,
which is useful when computing vectorized tensors from the document as a
batch, even if the input data are batched as a group of sentences in a
document.
:see: :class:`.FeatureDocumentVectorizer`
:see :meth:`parse`
"""
doc_parser: FeatureDocumentParser = field()
"""Used to :meth:`parse` documents."""
token_length: int = field()
"""The length of tokens used in fixed length features. This is used as a
dimension in decoded tensors. If this value is ``-1``, use the longest
sentence of the document as the token length, which is usually counted as
the batch.
:see: :meth:`get_token_length`
"""
token_feature_ids: Set[str] = field(default=None)
"""Indicates which spaCy parsed features to generate in the vectorizers held in
this instance. Examples include ``norm``, ``ent``, ``dep``, ``tag``.
If this is not set, it defaults to the the `token_feature_ids` in
:obj:`doc_parser`.
:see: :obj:`.SpacyFeatureVectorizer.VECTORIZERS`
"""
def __post_init__(self):
super().__post_init__()
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating fd vec manager')
if self.token_feature_ids is None:
self.token_feature_ids = self.doc_parser.token_feature_ids
else:
feat_diff = self.token_feature_ids - self.doc_parser.token_feature_ids
if len(feat_diff) > 0:
fdiffs = ', '.join(feat_diff)
raise VectorizerError(
'Parser token features do not exist in vectorizer: ' +
f'{self.token_feature_ids} - ' +
f'{self.doc_parser.token_feature_ids} = {fdiffs}')
self._spacy_vectorizers = PersistedWork('_spacy_vectorizers', self)
@property
def is_batch_token_length(self) -> bool:
"""Return whether or not the token length is variable based on the longest
token length in the batch.
"""
return self.token_length < 0
def get_token_length(self, doc: FeatureDocument) -> int:
"""Get the token length for the document. If :obj:`is_batch_token_length` is
``True``, then the token length is computed based on the longest
sentence in the document ``doc``. See the class docs.
:param doc: used to compute the longest sentence if
:obj:`is_batch_token_length` is ``True``
:return: the (global) token length for the document
"""
if self.is_batch_token_length:
return doc.max_sentence_len
else:
return self.token_length
def parse(self, text: Union[str, List[str]], *args, **kwargs) -> \
FeatureDocument:
"""Parse text or a text as a list of sentences.
**Important**: Parsing documents through this manager instance is
better since safe checks are made that features are available from
those used when documents are parsed before pickling.
:param text: either a string or a list of strings; if the former a
document with one sentence will be created, otherwise a
document is returned with a sentence for each string in
the list
"""
return self.doc_parser.parse(text, *args, **kwargs)
@property
@persisted('_spacy_vectorizers')
def spacy_vectorizers(self) -> Dict[str, SpacyFeatureVectorizer]:
"""Return vectorizers based on the :obj:`token_feature_ids` configured on this
instance. Keys are token level feature ids found in
:obj:`.SpacyFeatureVectorizer.VECTORIZERS`.
:return: an :class:`collections.OrderedDict` of vectorizers
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating spacy vectorizers')
token_feature_ids = set(SpacyFeatureVectorizer.VECTORIZERS.keys())
token_feature_ids = token_feature_ids & self.token_feature_ids
token_feature_ids = sorted(token_feature_ids)
vectorizers = collections.OrderedDict()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating token features: {token_feature_ids}')
for feature_id in sorted(token_feature_ids):
cls = SpacyFeatureVectorizer.VECTORIZERS[feature_id]
inst = cls(name=f'spacy vectorizer: {feature_id}',
config_factory=self.config_factory,
feature_id=feature_id,
torch_config=self.torch_config,
vocab=self.doc_parser.model.vocab)
vectorizers[feature_id] = inst
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created {len(vectorizers)} vectorizers')
return vectorizers
def deallocate(self):
if self._spacy_vectorizers.is_set():
vecs = self.spacy_vectorizers
for vec in vecs.values():
vec.deallocate()
vecs.clear()
super().deallocate() | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/vectorize/manager.py | manager.py |
__author__ = 'Paul Landes'
from typing import Tuple, Any
from dataclasses import dataclass, field
import sys
import math
import itertools as it
from spacy.vocab import Vocab
from torch import Tensor
from zensols.deeplearn import TorchConfig
from zensols.deeplearn.vectorize import FeatureVectorizer
@dataclass
class SpacyFeatureVectorizer(FeatureVectorizer):
"""This normalizes feature IDs of parsed token features in to a number between
[0, 1]. This is useful for normalized feature vectors as input to neural
networks. Input to this would be strings like ``token.ent_`` found on a
:class:`zensols.nlp.feature.TokenAttributes` instance.
The class is also designed to create features using indexes, so there are
methods to resolve to a unique ID from an identifier.
Instances of this class behave like a ``dict``.
All symbols are taken from :obj:`spacy.glossary.GLOSSARY`.
:param vocab: the vocabulary used for ``from_spacy`` to compute the
normalized feature from the spacy ID (i.e. ``token.ent_``,
``token.tag_`` etc.)
:see: :obj:`spacy.glossary.GLOSSARY`
:see: :class:`zensols.nlp.feature.TokenAttributes`
"""
torch_config: TorchConfig = field()
"""The torch configuration used to create tensors."""
vocab: Vocab = field()
"""The spaCy vocabulary used to create IDs from strings.
:see meth:`id_from_spacy_symbol`
"""
def __post_init__(self):
super().__post_init__()
self.as_list = tuple(self.SYMBOLS.split())
syms = dict(zip(self.as_list, it.count()))
self.symbol_to_id = syms
self.id_to_symbol = dict(map(lambda x: (x[1], x[0]), syms.items()))
n = len(syms)
q = n - 1
arr = self._to_hot_coded_matrix(n)
rows = zip(syms, map(lambda i: arr[i], range(n)))
self.symbol_to_vector = dict(rows)
self.symbol_to_norm = {k: syms[k] / q for k in syms}
def _is_settable(self, name: str, value: Any) -> bool:
return False
def _to_hot_coded_matrix(self, rows: int):
arr = self.torch_config.zeros((rows, rows))
for i in range(rows):
arr[i][i] = 1
return arr
def _to_binary_matrix(self, rows: int):
cols = math.ceil(math.log2(rows))
arr = self.torch_config.empty((rows, rows))
for i in range(rows):
sbin = '{0:b}'.format(i).zfill(cols)
arr[i] = self.torch_config.from_iterable(map(float, sbin))
return arr
def _get_shape(self) -> Tuple[int, int]:
return 1, len(self.as_list)
def transform(self, symbol: str) -> Tensor:
return self.symbol_to_vector[symbol]
def dist(self, symbol: str) -> float:
"""Return a normalized feature float if ``symbol`` is found.
:return: a normalized value between [0 - 1] or ``None`` if the symbol
isn't found
"""
return self.symbol_to_norm[symbol]
def id_from_spacy_symbol(self, id: int, default: int = -1) -> str:
"""Return the Spacy text symbol for it's ID (``token.ent`` -> ``token.ent_``).
"""
strs = self.vocab.strings
if id in strs:
return strs[id]
else:
return default
def from_spacy(self, id: int) -> Tensor:
"""Return a binary feature from a Spacy ID or ``None`` if it doesn't have a
mapping the ID.
"""
symbol = self.id_from_spacy_symbol(id)
return self.symbol_to_vector.get(symbol, None)
def id_from_spacy(self, id: int, default: int = -1) -> int:
"""Return the ID of this vectorizer for the Spacy ID or -1 if not found.
"""
symbol = self.id_from_spacy_symbol(id)
return self.symbol_to_id.get(symbol, default)
def write(self, writer=sys.stdout):
"""Pretty print a human readable representation of this feature vectorizer.
"""
syms = self.symbol_to_id
writer.write(f'{self.description}:\n')
for k in sorted(syms.keys()):
writer.write(f' {k} => {syms[k]} ({self.transform(k)})\n')
def __str__(self):
return f'{self.description} ({self.feature_id})'
@dataclass
class NamedEntityRecognitionFeatureVectorizer(SpacyFeatureVectorizer):
"""A feature vectorizor for NER tags.
:see: :class:`.SpacyFeatureVectorizer`
"""
DESCRIPTION = 'named entity recognition'
LANG = 'en'
FEATURE_ID = 'ent'
SYMBOLS = """PERSON NORP FACILITY FAC ORG GPE LOC PRODUCT EVENT WORK_OF_ART LAW LANGUAGE
DATE TIME PERCENT MONEY QUANTITY ORDINAL CARDINAL PER MISC"""
@dataclass
class DependencyFeatureVectorizer(SpacyFeatureVectorizer):
"""A feature vectorizor for dependency head trees.
:see: :class:`.SpacyFeatureVectorizer`
"""
DESCRIPTION = 'dependency'
LANG = 'en'
FEATURE_ID = 'dep'
SYMBOLS = """acl acomp advcl advmod agent amod appos attr aux auxpass case cc ccomp clf
complm compound conj cop csubj csubjpass dative dep det discourse dislocated
dobj expl fixed flat goeswith hmod hyph infmod intj iobj list mark meta neg
nmod nn npadvmod nsubj nsubjpass nounmod npmod num number nummod oprd obj obl
orphan parataxis partmod pcomp pobj poss possessive preconj prep prt punct
quantmod rcmod relcl reparandum root vocative xcomp ROOT"""
@dataclass
class PartOfSpeechFeatureVectorizer(SpacyFeatureVectorizer):
"""A feature vectorizor for POS tags.
:see: :class:`.SpacyFeatureVectorizer`
"""
DESCRIPTION = 'part of speech'
LANG = 'en'
FEATURE_ID = 'tag'
SYMBOLS = """ADJ ADP ADV AUX CONJ CCONJ DET INTJ NOUN NUM PART PRON PROPN PUNCT SCONJ SYM
VERB X EOL SPACE . , -LRB- -RRB- `` " ' $ # AFX CC CD DT EX FW HYPH IN JJ JJR
JJS LS MD NIL NN NNP NNPS NNS PDT POS PRP PRP$ RB RBR RBS RP TO UH VB VBD VBG
VBN VBP VBZ WDT WP WP$ WRB SP ADD NFP GW XX BES HVS NP PP VP ADVP ADJP SBAR PRT
PNP"""
SpacyFeatureVectorizer.VECTORIZERS = \
{cls.FEATURE_ID: cls for cls in (NamedEntityRecognitionFeatureVectorizer,
DependencyFeatureVectorizer,
PartOfSpeechFeatureVectorizer)}
"""The default set of spaCy feature vectorizers.
""" | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/vectorize/spacy.py | spacy.py |
__author__ = 'Paul Landes'
from typing import List, Tuple
from dataclasses import dataclass, field, asdict
import logging
import sys
import copy as cp
from io import TextIOBase
from zensols.config import Writable
import torch
from torch import nn
from zensols.persist import persisted
from zensols.deeplearn import (
ActivationNetworkSettings,
DropoutNetworkSettings,
BatchNormNetworkSettings,
)
from zensols.deeplearn.layer import (
LayerError, ConvolutionLayerFactory, MaxPool1dFactory
)
from zensols.deeplearn.model import BaseNetworkModule
from . import EmbeddingNetworkModule
logger = logging.getLogger(__name__)
@dataclass
class DeepConvolution1dNetworkSettings(ActivationNetworkSettings,
DropoutNetworkSettings,
Writable):
"""Configurable repeated series of 1-dimension convolution, pooling, batch norm
and activation layers. This layer is specifically designed for natural
language processing task, which is why this configuration includes
parameters for token counts.
Each layer repeat consists of::
1. convolution
2. max pool
3. batch (optional)
4. activation
This class is used directly after embedding (and in conjuction with) a
layer class that extends :class:`.EmbeddingNetworkModule`. The lifecycle
of this class starts with being instantiated (usually configured using a
:class:`~zensols.config.factory.ImportConfigFactory`), then cloned with
:meth:`clone` during the initialization on the layer from which it's used.
:param token_length: the number of tokens processed through the layer (used
as the width kernel parameter ``W``)
:param embedding_dimension: the dimension of the embedding (word vector)
layer (height dimension ``H`` and the kernel
parameter ``F``)
:param token_kernel: the size of the kernel in number of tokens (width
dimension of kernel parameter ``F``)
:param n_filters: number of filters to use, aka filter depth/volume (``K``)
:param stride: the stride, which is the number of cells to skip for each
convolution (``S``)
:param padding: the zero'd number of cells on the ends of tokens X
embedding neurons (``P``)
:param pool_token_kernel: like ``token_length`` but in the pooling layer
:param pool_stride: like ``stride`` but in the pooling layer
:param pool_padding: like ``padding`` but in the pooling layer
:param repeats: number of times the convolution, max pool, batch,
activation layers are repeated
:param batch_norm_d: the dimension of the batch norm (should be ``1``) or
``None`` to disable
:see: :class:`.DeepConvolution1d`
:see :class:`.EmbeddingNetworkModule`
"""
token_length: int = field(default=None)
embedding_dimension: int = field(default=None)
token_kernel: int = field(default=2)
stride: int = field(default=1)
n_filters: int = field(default=1)
padding: int = field(default=1)
pool_token_kernel: int = field(default=2)
pool_stride: int = field(default=1)
pool_padding: int = field(default=0)
repeats: int = field(default=1)
batch_norm_d: int = field(default=None)
def _assert_module(self):
"""Raise an exception if we don't have an embedding module configured.
"""
if not hasattr(self, 'module'):
raise LayerError('Not created with embedding module')
@property
@persisted('_layer_factory')
def layer_factory(self) -> ConvolutionLayerFactory:
"""Return the factory used to create convolution layers.
"""
self._assert_module()
return ConvolutionLayerFactory(
width=self.token_length,
height=self.embedding_dimension,
n_filters=self.n_filters,
kernel_filter=(self.token_kernel, self.embedding_dimension),
stride=self.stride,
padding=self.padding)
@property
@persisted('_pool_factory')
def pool_factory(self) -> MaxPool1dFactory:
"""Return the factory used to create max 1D pool layers.
"""
self._assert_module()
return MaxPool1dFactory(
layer_factory=self.layer_factory,
kernel_filter=self.pool_token_kernel,
stride=self.pool_stride,
padding=self.pool_padding)
def clone(self, module: EmbeddingNetworkModule, **kwargs):
"""Clone this network settings configuration with a different embedding
settings.
:param module: the embedding settings to use in the clone
:param kwargs: arguments as attributes on the clone
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'cloning module from module with {kwargs}')
if hasattr(self, 'module'):
raise LayerError('Not nascent: module already set')
params = {
'token_length': module.embedding.token_length,
'embedding_dimension': module.embedding_output_size,
'module': module,
}
params.update(kwargs)
clone = cp.copy(self)
clone.__dict__.update(params)
return clone
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line('embedding layer factory:', depth, writer)
self._write_dict(asdict(self), depth + 1, writer)
self._write_line('convolution layer factory:', depth, writer)
self._write_dict(asdict(self.create_layer_factory()),
depth + 1, writer)
def get_module_class_name(self) -> str:
return __name__ + '.DeepConvolution1d'
class DeepConvolution1d(BaseNetworkModule):
"""Configurable repeated series of 1-dimension convolution, pooling, batch norm
and activation layers. See :meth:`get_layers`.
:see: :class:`.DeepConvolution1dNetworkSettings`
"""
MODULE_NAME = 'conv'
def __init__(self, net_settings: DeepConvolution1dNetworkSettings,
logger: logging.Logger):
"""Initialize the deep convolution layer.
*Implementation note*: all layers are stored sequentially using a
:class:`torch.nn.Sequential` to get normal weight persistance on torch
save/loads.
:param net_settings: the deep convolution layer configuration
:param logger: the logger to use for the forward process in this layer
"""
super().__init__(net_settings, logger)
layers = []
self.layer_sets = []
self._create_layers(layers, self.layer_sets)
self.seq_layers = nn.Sequential(*layers)
def _create_layers(self, layers: List[nn.Module],
layer_sets: List[Tuple[nn.Module, nn.Module, nn.Module]]):
"""Create the convolution, max pool and batch norm layers used to forward
through.
:param layers: the layers to populate used in an
:class:`torch.nn.Sequential`
:param layer_sets: tuples of (conv, pool, batch_norm) layers
"""
pool_factory: MaxPool1dFactory = self.net_settings.pool_factory
conv_factory: ConvolutionLayerFactory = pool_factory.layer_factory
repeats = self.net_settings.repeats
for n_set in range(repeats):
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'conv_factory: {conv_factory}')
self._debug(f'pool factory: {pool_factory}')
pool = pool_factory.create_pool()
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'pool: {pool}')
conv = conv_factory.conv1d()
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'conv: {conv}')
if self.net_settings.batch_norm_d is not None:
batch_norm = BatchNormNetworkSettings.create_batch_norm_layer(
self.net_settings.batch_norm_d, pool_factory.out_shape[0])
else:
batch_norm = None
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'batch_norm: {batch_norm}')
layer_set = (conv, pool, batch_norm)
layer_sets.append(layer_set)
layers.extend(layer_set)
pool_out = pool_factory.flatten_dim
if n_set < repeats:
conv_factory.width = pool_out
conv_factory.height = 1
conv_factory.kernel_filter = (conv_factory.kernel_filter[0], 1)
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'pool out: {pool_out}')
self.out_features = pool_out
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'out features: {self.out_features}')
def deallocate(self):
super().deallocate()
self._deallocate_attribute('seq_layers')
def get_layers(self) -> Tuple[Tuple[nn.Module, nn.Module, nn.Module]]:
"""Return a tuple of layer sets, with each having the form: ``(convolution, max
pool, batch_norm)``. The ``batch_norm`` norm is ``None`` if not
configured.
"""
return tuple(self.seq_layers)
def _forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward convolution, batch normalization, pool, activation and dropout for
those layers that are configured.
:see: `Sunghean et al <http://mipal.snu.ac.kr/images/1/16/Dropout_ACCV2016.pdf>`_
:see: `Ioffe et al <https://arxiv.org/pdf/1502.03167.pdf>`_
"""
layer_sets = self.layer_sets
ls_len = len(layer_sets)
for i, (conv, pool, batch_norm) in enumerate(layer_sets):
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'layer set iter: {i}')
x = conv(x)
self._shape_debug('conv', x)
if batch_norm is not None:
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'batch norm: {batch_norm}')
x = batch_norm(x)
x = x.view(x.shape[0], 1, -1)
self._shape_debug('flatten', x)
x = pool(x)
self._shape_debug('pool', x)
self._forward_activation(x)
self._forward_dropout(x)
if i < ls_len - 1:
x = x.unsqueeze(3)
self._shape_debug('unsqueeze', x)
return x | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/layer/conv.py | conv.py |
__author__ = 'Paul Landes'
from typing import Tuple, List, Optional, Union
from dataclasses import dataclass, field
import logging
import torch
from torch import Tensor
from zensols.deeplearn import ModelError, DatasetSplitType
from zensols.deeplearn.model import (
SequenceNetworkModule, SequenceNetworkContext, SequenceNetworkOutput
)
from zensols.deeplearn.batch import Batch
from zensols.deeplearn.layer import (
RecurrentCRFNetworkSettings,
RecurrentCRF,
)
from zensols.deepnlp.layer import (
EmbeddingNetworkSettings,
EmbeddingNetworkModule,
)
logger = logging.getLogger(__name__)
@dataclass
class EmbeddedRecurrentCRFSettings(EmbeddingNetworkSettings):
"""A utility container settings class for convulsion network models.
"""
recurrent_crf_settings: RecurrentCRFNetworkSettings = field()
"""The RNN settings (configure this with an LSTM for (Bi)LSTM CRFs)."""
mask_attribute: str = field()
"""The vectorizer attribute name for the mask feature."""
tensor_predictions: bool = field(default=False)
"""Whether or not to return predictions as tensors. There are currently no
identified use cases to do this as setting this to ``True`` will inflate
performance metrics. This is because the batch iterator will create a
tensor with the entire batch adding a lot of default padded value that will
be counted as results.
"""
use_crf: bool = field(default=True)
def get_module_class_name(self) -> str:
return __name__ + '.EmbeddedRecurrentCRF'
class EmbeddedRecurrentCRF(EmbeddingNetworkModule, SequenceNetworkModule):
"""A recurrent neural network composed of an embedding input, an recurrent
network, and a linear conditional random field output layer. When
configured with an LSTM, this becomes a (Bi)LSTM-CRF. More specifically,
this network has the following:
1. Input embeddings mapped from tokens.
2. Recurrent network (i.e. LSTM).
3. Fully connected feed forward deep linear layer(s) as the decoder.
4. Linear chain conditional random field (CRF) layer.
5. Output the labels.
"""
MODULE_NAME = 'emb-recur-crf'
def __init__(self, net_settings: EmbeddedRecurrentCRFSettings,
sub_logger: logging.Logger = None):
super().__init__(net_settings, sub_logger)
ns = self.net_settings
rc = ns.recurrent_crf_settings
rc.input_size = self.embedding_output_size
self.mask_attribute = ns.mask_attribute
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'recur emb settings: {rc}')
self.recurcrf: RecurrentCRF = rc.create_module(
sub_logger=sub_logger, use_crf=ns.use_crf)
def deallocate(self):
super().deallocate()
self.recurcrf.deallocate()
def _get_mask(self, batch: Batch) -> Tensor:
mask = batch[self.mask_attribute]
self._shape_debug('mask', mask)
return mask
def _forward_train_with_crf(self, batch: Batch) -> Tensor:
labels = batch.get_labels()
self._shape_debug('labels', labels)
mask = self._get_mask(batch)
self._shape_debug('mask', mask)
x = super()._forward(batch)
self._shape_debug('super emb', x)
x = self.recurcrf.forward(x, mask, labels)
self._shape_debug('recur', x)
return x
def _forward_train_no_crf(self, batch: Batch,
context: SequenceNetworkContext) -> \
List[List[int]]:
recur_crf: RecurrentCRF = self.recurcrf
# no implementation yet for prediction sans-training
labels: Optional[Tensor] = batch.get_labels()
pred_lists: List[List[int]]
emb: Tensor = EmbeddingNetworkModule._forward(self, batch)
self._shape_debug('embedding', emb)
logits: Tensor = recur_crf.forward_recur_decode(emb)
self._shape_debug('logits', logits)
logits_flat: Tensor = logits.flatten(0, 1)
labels_flat: Tensor = labels.flatten(0, 1)
self._shape_debug('flat logits', logits_flat)
self._shape_debug('flat labels', labels_flat)
loss: Tensor = context.criterion(logits_flat, labels_flat)
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'loss: {loss}')
pred_labels: Tensor = logits.argmax(dim=2)
self._shape_debug('predictions (agg)', pred_labels)
mask: Tensor = self._get_mask(batch)
assert len(mask.size()) == 2
pred_lsts: List[List[int]] = []
for bix in range(mask.size(0)):
bmask = mask[bix]
plst = torch.masked_select(pred_labels[bix], bmask)
pred_lsts.append(plst.tolist())
return pred_lsts, loss
def _decode(self, batch: Batch, add_loss: bool) -> Tuple[Tensor, Tensor]:
loss: Tensor = None
mask: Tensor = self._get_mask(batch)
self._shape_debug('mask', mask)
x: Tensor = super()._forward(batch)
self._shape_debug('super emb', x)
if add_loss:
labels = batch.get_labels()
loss = self.recurcrf.forward(x, mask, labels)
x, score = self.recurcrf.decode(x, mask)
self._debug(f'recur {len(x)}')
self._shape_debug('score', score)
return x, loss, score
def _map_labels(self, batch: Batch, context: SequenceNetworkContext,
labels: Union[List[List[int]], Tensor]) -> List[List[int]]:
return labels
def _shape_or_list_debug(self, msg: str,
data: Union[List[List[int]], Tensor],
full: bool = False):
if self.logger.isEnabledFor(logging.DEBUG) or True:
if data is None:
self.logger.debug(f'{msg}: None')
else:
if isinstance(data, Tensor):
self._shape_debug(msg, data)
else:
dtype = 'none'
if len(data) > 0:
dtype = type(data[0])
if dtype == list:
dtype = f'{dtype} ({len(data)})'
self.logger.debug(
f'{msg}: length={len(data)}, type={dtype}')
if full:
from zensols.deeplearn import printopts
with printopts(profile='full'):
self.logger.debug('full data:\n' + str(data))
def _forward(self, batch: Batch, context: SequenceNetworkContext) -> \
SequenceNetworkOutput:
use_crf = self.net_settings.use_crf
split_type: DatasetSplitType = context.split_type
preds: List[List[int]] = None
labels: Optional[Tensor] = batch.get_labels()
loss: Tensor = None
score: Tensor = None
if self.logger.isEnabledFor(logging.DEBUG):
self.logger.debug(f'forward on splt: {context.split_type}')
if context.split_type != DatasetSplitType.train and self.training:
raise ModelError(
f'Attempting to use split {split_type} while training')
if context.split_type == DatasetSplitType.train:
if use_crf:
loss = self._forward_train_with_crf(batch)
else:
preds, loss = self._forward_train_no_crf(batch, context)
elif context.split_type == DatasetSplitType.validation:
if use_crf:
preds, loss, score = self._decode(batch, True)
else:
preds, loss = self._forward_train_no_crf(batch, context)
elif context.split_type == DatasetSplitType.test:
if use_crf:
preds, _, score = self._decode(batch, False)
loss = batch.torch_config.singleton([0], dtype=torch.float32)
else:
preds, loss = self._forward_train_no_crf(batch, context)
else:
raise ModelError(f'Unknown data split type: {split_type}')
# list of lists of the predictions, which are the CRF output when
# enabled
if preds is not None:
preds = self._map_labels(batch, context, preds)
# padded tensor of shape (batch, data i.e. token length)
if labels is not None:
labels = self._map_labels(batch, context, labels)
self._shape_or_list_debug('output preds', preds)
self._shape_or_list_debug('output labels', labels)
out = SequenceNetworkOutput(preds, loss, score, labels)
if preds is not None and labels is not None and len(labels.size()) > 1:
out.righsize_labels(preds)
return out | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/layer/embrecurcrf.py | embrecurcrf.py |
__author__ = 'Paul Landes'
from typing import Dict, List, Tuple
from dataclasses import dataclass, field
from typing import Callable
import logging
import torch
from torch import nn
from torch import Tensor
from zensols.persist import Deallocatable
from zensols.deeplearn import ModelError
from zensols.deeplearn.vectorize import FeatureVectorizer
from zensols.deeplearn.model import BaseNetworkModule, DebugModule
from zensols.deeplearn.layer import LayerError
from zensols.deeplearn.batch import (
Batch,
BatchMetadata,
BatchFieldMetadata,
MetadataNetworkSettings,
)
from zensols.deepnlp.vectorize import FeatureDocumentVectorizerManager
from zensols.deepnlp.embed import WordEmbedModel
from zensols.deepnlp.vectorize import (
TextFeatureType,
FeatureDocumentVectorizer,
EmbeddingFeatureVectorizer,
)
logger = logging.getLogger(__name__)
class EmbeddingLayer(DebugModule, Deallocatable):
"""A class used as an input layer to provide word embeddings to a deep neural
network.
**Important**: you must always check for attributes in
:meth:`~zensols.persist.dealloc.Deallocatable.deallocate` since it might be
called more than once (i.e. from directly deallocating and then from the
factory).
**Implementation note**: No datacasses are usable since pytorch is picky
about initialization order.
"""
def __init__(self, feature_vectorizer_manager: FeatureDocumentVectorizerManager,
embedding_dim: int, sub_logger: logging.Logger = None,
trainable: bool = False):
"""Initialize.
:param feature_vectorizer_manager: the feature vectorizer manager that
manages this instance
:param embedding_dim: the vector dimension of the embedding
:param trainable: ``True`` if the embedding layer is to be trained
"""
super().__init__(sub_logger)
self.feature_vectorizer_manager = feature_vectorizer_manager
self.embedding_dim = embedding_dim
self.trainable = trainable
@property
def token_length(self):
return self.feature_vectorizer_manager.token_length
@property
def torch_config(self):
return self.feature_vectorizer_manager.torch_config
def deallocate(self):
super().deallocate()
if hasattr(self, 'emb'):
if logger.isEnabledFor(logging.DEBUG):
em = '<deallocated>'
if hasattr(self, 'embed_model'):
em = self.embed_model.name
self._debug(f'deallocating: {em} and {type(self.emb)}')
self._try_deallocate(self.emb)
del self.emb
if hasattr(self, 'embed_model'):
del self.embed_model
class TrainableEmbeddingLayer(EmbeddingLayer):
"""A non-frozen embedding layer that has grad on parameters.
"""
def reset_parameters(self):
if self.trainable:
self.emb.load_state_dict({'weight': self.vecs})
def _get_emb_key(self, prefix: str):
return f'{prefix}emb.weight'
def state_dict(self, *args, destination=None, prefix='', keep_vars=False):
state = super().state_dict(
*args,
destination=destination,
prefix=prefix,
keep_vars=keep_vars)
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'state_dict: trainable: {self.trainable}')
if not self.trainable:
emb_key = self._get_emb_key(prefix)
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'state_dict: embedding key: {emb_key}')
if emb_key is not None:
if emb_key not in state:
raise ModelError(f'No key {emb_key} in {state.keys()}')
arr = state[emb_key]
if arr is not None:
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'state_dict: emb state: {arr.shape}')
assert arr.shape == self.embed_model.matrix.shape
state[emb_key] = None
return state
def _load_from_state_dict(self, state_dict, prefix, *args, **kwargs):
if not self.trainable:
emb_key = self._get_emb_key(prefix)
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'load_state_dict: {emb_key}')
if emb_key is not None:
state_dict[emb_key] = self.vecs
super()._load_from_state_dict(state_dict, prefix, *args, **kwargs)
@dataclass
class EmbeddingNetworkSettings(MetadataNetworkSettings):
"""A utility container settings class for models that use an embedding input
layer that inherit from :class:`.EmbeddingNetworkModule`.
"""
embedding_layer: EmbeddingLayer = field()
"""The word embedding layer used to vectorize."""
def get_module_class_name(self) -> str:
return __name__ + '.EmbeddingNetworkModule'
@dataclass
class _EmbeddingContainer(object):
"""Contains the mathcing of vectorizer, embedding_layer and field mapping.
"""
field_meta: BatchFieldMetadata = field()
"""The mapping that has the batch attribute name for the embedding."""
vectorizer: FeatureDocumentVectorizer = field()
"""The vectorizer used to encode the batch data."""
embedding_layer: EmbeddingLayer = field()
"""The word embedding layer used to vectorize."""
@property
def dim(self) -> int:
"""The embedding's dimension."""
return self.embedding_layer.embedding_dim
@property
def attr(self) -> str:
"""The attribute name of the layer's mapping."""
return self.field_meta.field.attr
def get_embedding_tensor(self, batch: Batch) -> Tensor:
"""Get the embedding (or indexes depending on how it was vectorize)."""
return batch[self.attr]
def __str__(self) -> str:
return self.attr
def __repr__(self) -> str:
return self.__str__()
class EmbeddingNetworkModule(BaseNetworkModule):
"""An module that uses an embedding as the input layer. This class uses an
instance of :class:`.EmbeddingLayer` provided by the network settings
configuration for resolving the embedding during the *forward* phase.
The following attributes are created and/or set during initialization:
* ``embedding`` the :class:`.EmbeddingLayer` instance used get the input
embedding tensors
* ``embedding_attribute_names`` the name of the word embedding vectorized
feature attribute names (usually one, but possible to have more)
* ``embedding_output_size`` the outpu size of the embedding layer, note
this includes any features layered/concated given in all token level
vectorizer's configuration
* ``join_size`` if a join layer is to be used, this has the size of the
part of the join layer that will have the document level features
* ``token_attribs`` the token level feature names (see
:meth:`forward_token_features`)
* ``doc_attribs`` the doc level feature names (see
:meth:`forward_document_features`)
The initializer adds additional attributes conditional on the
:class:`.EmbeddingNetworkSettings` instance's
:obj:`~zensols.deeplearn.batch.meta.MetadataNetworkSettings.batch_metadata`
property (type :class:`~zensols.deeplearn.batch.meta.BatchMetadata`). For
each meta data field's vectorizer that extends class
:class:`.FeatureDocumentVectorizer` the following is set on this
instance based on the value of ``feature_type`` (of type
:class:`.TextFeatureType`):
* :obj:`~.TextFeatureType.TOKEN`: ``embedding_output_size`` is
increased by the vectorizer's shape
* :obj:`~.TextFeatureType.DOCUMENT`: ``join_size`` is increased
by the vectorizer's shape
Fields can be filtered by passing a filter function to the initializer.
See :meth:`__init__` for more information.
"""
MODULE_NAME = 'embed'
def __init__(self, net_settings: EmbeddingNetworkSettings,
module_logger: logging.Logger = None,
filter_attrib_fn: Callable[[BatchFieldMetadata], bool] = None):
"""Initialize the embedding layer.
:param net_settings: the embedding layer configuration
:param logger: the logger to use for the forward process in this layer
:param filter_attrib_fn:
if provided, called with a :class:`.BatchFieldMetadata` for each
field returning ``True`` if the batch field should be retained and
used in the embedding layer (see class docs); if ``None`` all
fields are considered
"""
super().__init__(net_settings, module_logger)
self.embedding_output_size: int = 0
self.join_size: int = 0
self.token_attribs: List[str] = []
self.doc_attribs: List[str] = []
self._embedding_containers: List[_EmbeddingContainer] = []
self._embedding_layers = self._map_embedding_layers()
self._embedding_sequence = nn.Sequential(
*tuple(self._embedding_layers.values()))
field: BatchFieldMetadata
meta: BatchMetadata = self.net_settings.batch_metadata
fba: Dict[str, BatchFieldMetadata] = meta.fields_by_attribute
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'batch field metadata: {fba}')
for name in sorted(fba.keys()):
field_meta: BatchFieldMetadata = fba[name]
if filter_attrib_fn is not None and \
not filter_attrib_fn(field_meta):
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'skipping: {name}')
continue
vec: FeatureVectorizer = field_meta.vectorizer
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'{name} -> {field_meta}')
if isinstance(vec, FeatureDocumentVectorizer):
try:
self._add_field(vec, field_meta)
except Exception as e:
raise ModelError(
f'Could not create field {field_meta}: {e}') from e
if len(self._embedding_containers) == 0:
raise LayerError('No embedding vectorizer feature type found')
def _map_embedding_layers(self) -> Dict[int, EmbeddingLayer]:
"""Return a mapping of embedding layers configured using their in memory
location as keys.
"""
els: Tuple[EmbeddingLayer]
els = self.net_settings.embedding_layer
if not isinstance(els, (tuple, list)):
els = [els]
return {id(el.embed_model): el for el in els}
def _add_field(self, vec: FeatureDocumentVectorizer,
field_meta: BatchFieldMetadata):
"""Add a batch metadata field and it's respective vectorizer to class
member datastructures.
"""
attr = field_meta.field.attr
if vec.feature_type == TextFeatureType.TOKEN:
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'adding tok type {attr}: {vec.shape[2]}')
self.embedding_output_size += vec.shape[2]
self.token_attribs.append(attr)
elif vec.feature_type == TextFeatureType.DOCUMENT:
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'adding doc type {attr} ' +
f'({field_meta.shape}/{vec.shape})')
self.join_size += field_meta.shape[1]
self.doc_attribs.append(attr)
elif vec.feature_type == TextFeatureType.EMBEDDING:
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'adding embedding: {attr}')
embedding_layer: EmbeddingLayer = \
self._embedding_layers.get(id(vec.embed_model))
if embedding_layer is None:
raise ModelError(f'No embedding layer found for {attr}')
if self.logger.isEnabledFor(logging.INFO):
we_model: WordEmbedModel = embedding_layer.embed_model
self.logger.info(f'embeddings: {we_model.name}')
ec = _EmbeddingContainer(field_meta, vec, embedding_layer)
self._embedding_containers.append(ec)
self.embedding_output_size += ec.dim
def get_embedding_tensors(self, batch: Batch) -> Tuple[Tensor]:
"""Get the embedding tensors (or indexes depending on how it was
vectorize) from a batch.
:param batch: contains the vectorized embeddings
:return: the vectorized embedding as tensors, one for each embedding
"""
return tuple(map(lambda ec: batch[ec.attr], self._embedding_containers))
@property
def embedding_dimension(self) -> int:
"""Return the dimension of the embeddings, which doesn't include any additional
token or document features potentially added.
"""
return sum(map(lambda ec: ec.dim, self._embedding_containers))
def vectorizer_by_name(self, name: str) -> FeatureVectorizer:
"""Utility method to get a vectorizer by name.
:param name: the name of the vectorizer as given in the vectorizer
manager
"""
meta: BatchMetadata = self.net_settings.batch_metadata
field_meta: BatchFieldMetadata = meta.fields_by_attribute[name]
vec: FeatureVectorizer = field_meta.vectorizer
return vec
def _forward(self, batch: Batch) -> Tensor:
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'batch: {batch}')
x = self.forward_embedding_features(batch)
x = self.forward_token_features(batch, x)
x = self.forward_document_features(batch, x)
return x
def _forward_embedding_layer(self, ec: _EmbeddingContainer,
batch: Batch) -> Tensor:
decoded: bool = False
is_tok_vec: bool = isinstance(ec.vectorizer, EmbeddingFeatureVectorizer)
x: Tensor = ec.get_embedding_tensor(batch)
self._shape_debug('input', x)
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'vectorizer type: {type(ec.vectorizer)}')
if is_tok_vec:
decoded = ec.vectorizer.decode_embedding
if self.logger.isEnabledFor(logging.DEBUG):
self._debug(f'is embedding already decoded: {decoded}')
if not decoded:
x = ec.embedding_layer(x)
return x
def forward_embedding_features(self, batch: Batch) -> Tensor:
"""Use the embedding layer return the word embedding tensors.
"""
arr: Tensor
arrs: List[Tensor] = []
ec: _EmbeddingContainer
for ec in self._embedding_containers:
x: Tensor = self._forward_embedding_layer(ec, batch)
self._shape_debug(f'decoded sub embedding ({ec}):', x)
arrs.append(x)
if len(arrs) == 1:
arr = arrs[0]
else:
arr = torch.concat(arrs, dim=-1)
self._shape_debug(f'decoded concat embedding ({ec}):', arr)
return arr
def forward_token_features(self, batch: Batch, x: Tensor = None) -> Tensor:
"""Concatenate any token features given by the vectorizer configuration.
:param batch: contains token level attributes to concatenate to ``x``
:param x: if given, the first tensor to be concatenated
"""
self._shape_debug('forward token features', x)
arrs = []
if x is not None:
self._shape_debug('adding passed token features', x)
arrs.append(x)
for attrib in self.token_attribs:
feats = batch.attributes[attrib]
self._shape_debug(f"token attrib '{attrib}'", feats)
arrs.append(feats)
if len(arrs) == 1:
x = arrs[0]
elif len(arrs) > 1:
self._debug(f'concating {len(arrs)} token features')
x = torch.cat(arrs, 2)
self._shape_debug('token concat', x)
return x
def forward_document_features(self, batch: Batch, x: Tensor = None,
include_fn: Callable = None) -> Tensor:
"""Concatenate any document features given by the vectorizer configuration.
"""
self._shape_debug('forward document features', x)
arrs = []
if x is not None:
arrs.append(x)
for attrib in self.doc_attribs:
if include_fn is not None and not include_fn(attrib):
continue
st = batch.attributes[attrib]
self._shape_debug(f'doc attrib {attrib}', st)
arrs.append(st)
if len(arrs) > 0:
x = torch.cat(arrs, 1)
self._shape_debug('doc concat', x)
return x | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/layer/embed.py | embed.py |
__author__ = 'Paul Landes'
import logging
import torch
from torch import Tensor
from torch import nn
from zensols.deeplearn.model import BaseNetworkModule
from zensols.deepnlp.embed import WordEmbedModel
from . import TrainableEmbeddingLayer
logger = logging.getLogger(__name__)
class WordVectorEmbeddingLayer(TrainableEmbeddingLayer):
"""An input embedding layer. This uses an instance of
:class:`~zensols.deepnlp.embed.WordEmbedModel` to compose the word
embeddings from indexes. Each index is that of word vector, which is
stacked to create the embedding. This happens in the PyTorch framework,
and is fast.
This class overrides PyTorch methods that disable persistance of the
embedding weights when configured to be frozen (not trainable). Otherwise,
the entire embedding model is saved *every* time the model is saved for
each epoch, which is both unecessary, but costs in terms of time and
memory.
"""
def __init__(self, embed_model: WordEmbedModel, *args, **kwargs):
"""Initialize
:param embed_model: contains the word embedding model, such as
``glove``, and ``word2vec``
"""
super().__init__(*args, embedding_dim=embed_model.matrix.shape[1],
**kwargs)
self.embed_model = embed_model
self.num_embeddings = embed_model.matrix.shape[0]
self.vecs = embed_model.to_matrix(self.torch_config)
if self.trainable:
self.logger.info('cloning embedding for trainability')
self.vecs = torch.clone(self.vecs)
else:
self.logger.info('layer is not trainable')
self.requires_grad = not self.trainable
if self.logger.isEnabledFor(logging.INFO):
self.logger.info(f'setting embedding matrix: {self.vecs.shape}, ' +
f'device={self.vecs.device}, ' +
f'trainble: {self.trainable}')
self.emb = nn.Embedding.from_pretrained(
self.vecs, freeze=not self.trainable)
def forward(self, x: Tensor) -> Tensor:
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'forward: {x.shape}, device: {x.device} = ' +
f'{BaseNetworkModule.device_from_module(self.emb)}')
return self.emb.forward(x) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/layer/wordvec.py | wordvec.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
import logging
from pathlib import Path
import numpy as np
import gensim
from gensim.models import KeyedVectors, Word2Vec
from zensols.util import time
from zensols.install import Installer, Resource
from zensols.deepnlp.embed import WordVectorModel, WordEmbedModel
logger = logging.getLogger(__name__)
@dataclass
class Word2VecModel(WordEmbedModel):
"""Load keyed or non-keyed Gensim models.
"""
installer: Installer = field(default=None)
"""The installer used to for the text vector zip file."""
resource: Resource = field(default=None)
"""The zip resource used to find the path to the model files."""
dimension: int = field(default=300)
"""The dimension of the word embedding."""
model_type: str = field(default='keyed')
"""The type of the embeddings, which is either ``keyed`` or ``gensim``."""
@property
def path(self) -> Path:
self.installer()
return self.installer[self.resource]
def _get_model_id(self) -> str:
return f'word2vec: type={self.model_type}, dim={self.dimension}'
def _get_model(self) -> KeyedVectors:
"""The word2vec model.
"""
with time('loaded word2vec model'):
if self.model_type == 'keyed':
model = self._get_keyed_model()
else:
model = self._get_trained_model().wv
return model
def _get_keyed_model(self) -> KeyedVectors:
"""Load a model from a pretrained word2vec model.
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading keyed file: {self.path}')
fname = str(self.path.absolute())
with time(f'loaded key model from {fname}'):
return KeyedVectors.load_word2vec_format(fname, binary=True)
def _get_trained_model(self) -> Word2Vec:
"""Load a model trained with gensim.
"""
path = self.path
if path.exists():
if logger.isEnabledFor(logging.INFO):
logger.info('loading trained file: {}'.format(path))
model = Word2Vec.load(str(path.absolute()))
else:
model = self._train()
if logger.isEnabledFor(logging.INFO):
logger.info('saving trained vectors to: {}'.format(path))
model.save(str(path.absolute()))
return model
def _create_data(self) -> WordVectorModel:
logger.info('reading binary vector file')
# https://github.com/RaRe-Technologies/gensim/wiki/Migrating-from-Gensim-3.x-to-4
if gensim.__version__[0] >= '4':
logger.debug('using version 4')
wv = self._get_model()
words = wv.index_to_key
else:
logger.debug('using version 3')
wv = self._get_model().wv
words = wv.index2entity
word2vec = {}
word2idx = {}
vectors = []
with time('created data structures'):
for i, word in enumerate(words):
word2idx[word] = i
vec = wv[word]
vectors.append(vec)
word2vec[word] = vec
vectors = np.array(vectors)
unknown_vec = np.expand_dims(np.zeros(self.dimension), axis=0)
vectors = np.concatenate((vectors, unknown_vec))
word2idx[self.UNKNOWN] = len(words)
words.append(self.UNKNOWN)
word2vec[self.UNKNOWN] = unknown_vec
return WordVectorModel(vectors, word2vec, words, word2idx)
def _create_keyed_vectors(self) -> KeyedVectors:
return self._get_model() | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/embed/word2vec.py | word2vec.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Tuple, Iterable, ClassVar, Optional
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
import logging
import numpy as np
import torch
from torch import Tensor
import gensim
from gensim.models.keyedvectors import Word2VecKeyedVectors, KeyedVectors
from zensols.persist import persisted, PersistableContainer, PersistedWork
from zensols.deeplearn import TorchConfig, DeepLearnError
logger = logging.getLogger(__name__)
class WordEmbedError(DeepLearnError):
"""Raised for any errors pertaining to word vectors.
"""
pass
@dataclass
class WordVectorModel(object):
"""Vector data from the model
"""
vectors: np.ndarray = field()
"""The word vectors."""
word2vec: Dict[str, np.ndarray] = field()
"""The word to word vector mapping."""
words: List[str] = field()
"""The vocabulary."""
word2idx: Dict[str, int] = field()
"""The word to word vector index mapping."""
def __post_init__(self):
self.tensors = {}
def to_matrix(self, torch_config: TorchConfig) -> torch.Tensor:
dev = torch_config.device
if dev in self.tensors:
if logger.isEnabledFor(logging.INFO):
logger.info(f'reusing already cached from {torch_config}')
vecs = self.tensors[dev]
else:
if logger.isEnabledFor(logging.INFO):
logger.info(f'created tensor vectory matrix on {torch_config}')
vecs = torch_config.from_numpy(self.vectors)
self.tensors[dev] = vecs
return vecs
@dataclass
class _WordEmbedVocabAdapter(object):
"""Adapts a :class:`.WordEmbedModel` to a gensim :class:`.KeyedVectors`,
which is used in :meth:`.WordEmbedModel._create_keyed_vectors`.
"""
model: WordVectorModel = field()
def __post_init__(self):
self._index = -1
@property
def index(self):
return self._index
def __iter__(self):
words: List[str] = self.model.words
return iter(words)
def get(self, word: int, default: str):
self._index = self.model.word2idx.get(word, default)
def __getitem__(self, word: str):
self._index = self.model.word2idx[word]
return self
@dataclass
class WordEmbedModel(PersistableContainer, metaclass=ABCMeta):
"""This is an abstract base class that represents a set of word vectors
(i.e. GloVe).
"""
UNKNOWN: ClassVar[str] = '<unk>'
"""The unknown symbol used for out of vocabulary words."""
ZERO: ClassVar[str] = UNKNOWN
"""The zero vector symbol used for padding vectors."""
_CACHE: ClassVar[Dict[str, WordVectorModel]] = {}
"""Contains cached embedding model that point to the same source."""
name: str = field()
"""The name of the model given by the configuration and must be unique
across word vector type and dimension.
"""
cache: bool = field(default=True)
"""If ``True`` globally cache all data strucures, which should be ``False``
if more than one embedding across a model type is used.
"""
lowercase: bool = field(default=False)
"""If ``True``, downcase each word for all methods that take a word as input.
Use this for embeddings that are only lower case in order to find more hits
when querying for words that have uppercase characters.
"""
def __post_init__(self):
super().__init__()
self._data_inst = PersistedWork('_data_inst', self, transient=True)
@abstractmethod
def _get_model_id(self) -> str:
"""Return a string that uniquely identifies this instance of the embedding
model. This should have the type, size and dimension of the embedding.
:see: :obj:`model_id`
"""
pass
@abstractmethod
def _create_data(self) -> WordVectorModel:
"""Return the vector data from the model in the form:
(vectors, word2vec, words, word2idx)
where:
"""
pass
def clear_cache(self):
for model in self._CACHE.values():
self._try_deallocate(model)
self._CACHE.clear()
def deallocate(self):
self.clear_cache()
super().deallocate()
@property
def model_id(self) -> str:
"""Return a string that uniquely identifies this instance of the embedding
model. This should have the type, size and dimension of the embedding.
This string is used to cache models in both CPU and GPU memory so the
layers can have the benefit of reusing the same in memeory word
embedding matrix.
"""
return self._get_model_id()
@persisted('_data_inst', transient=True)
def _data(self) -> WordVectorModel:
model_id = self.model_id
wv_model: WordVectorModel = self._CACHE.get(model_id)
if wv_model is None:
wv_model = self._create_data()
if self.cache:
self._CACHE[model_id] = wv_model
return wv_model
@property
def matrix(self) -> np.ndarray:
"""The word vector matrix."""
return self._data().vectors
@property
def shape(self) -> Tuple[int, int]:
"""The shape of the word vector :obj"`matrix`."""
return self.matrix.shape
def to_matrix(self, torch_config: TorchConfig) -> Tensor:
"""Return a matrix the represents the entire vector embedding as a tensor.
:param torch_config: indicates where to load the new tensor
"""
return self._data().to_matrix(torch_config)
@property
def vectors(self) -> Dict[str, np.ndarray]:
"""Return all word vectors with the string words as keys.
"""
return self._data().word2vec
@property
def vector_dimension(self) -> int:
"""Return the dimension of the word vectors.
"""
return self.matrix.shape[1]
def keys(self) -> Iterable[str]:
"""Return the keys, which are the word2vec words.
"""
return self.vectors.keys()
@property
@persisted('_unk_idx')
def unk_idx(self) -> int:
"""The ID to the out-of-vocabulary index"""
model: WordVectorModel = self._data()
word2idx: Dict[str, int] = model.word2idx
return word2idx.get(self.UNKNOWN)
def word2idx(self, word: str, default: int = None) -> Optional[int]:
"""Return the index of ``word`` or :obj:`UNKONWN` if not indexed.
"""
if self.lowercase:
word = word.lower()
model: WordVectorModel = self._data()
word2idx: Dict[str, int] = model.word2idx
idx: int = word2idx.get(word)
if idx is None:
idx = default
return idx
def word2idx_or_unk(self, word: str) -> int:
"""Return the index of ``word`` or :obj:`UNKONWN` if not indexed.
"""
return self.word2idx(word, self.unk_idx)
def prime(self):
pass
def get(self, key: str, default: np.ndarray = None) -> np.ndarray:
"""Just like a ``dict.get()``, but but return the vector for a word.
:param key: the word to get the vector
:param default: what to return if ``key`` doesn't exist in the dict
:return: the word vector
"""
if self.lowercase:
key = key.lower()
return self.vectors.get(key, default)
@property
@persisted('_keyed_vectors', transient=True)
def keyed_vectors(self) -> KeyedVectors:
"""Adapt instances of this class to a gensim keyed vector instance."""
return self._create_keyed_vectors()
def _create_keyed_vectors(self) -> KeyedVectors:
kv = Word2VecKeyedVectors(vector_size=self.vector_dimension)
if gensim.__version__[0] >= '4':
kv.key_to_index = self._data().word2idx
else:
kv.vocab = _WordEmbedVocabAdapter(self._data())
kv.vectors = self.matrix
kv.index2entity = list(self._data().words)
return kv
def __getitem__(self, key: str):
if self.lowercase:
key = key.lower()
return self.vectors[key]
def __contains__(self, key: str):
if self.lowercase:
key = key.lower()
return key in self.vectors
def __len__(self):
return self.matrix.shape[0]
def __str__(self):
s = f'{self.__class__.__name__} ({self.name}): id={self.model_id}'
if self._data_inst.is_set():
s += f', num words={len(self)}, dim={self.vector_dimension}'
return s | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/embed/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import List, Dict
from dataclasses import dataclass, field, InitVar
from abc import abstractmethod, ABCMeta
import logging
from pathlib import Path
import pickle
import numpy as np
import h5py
from h5py import Dataset
from zensols.util import time
from zensols.config import Dictable
from zensols.persist import Primeable
from zensols.install import Installer, Resource
from zensols.deepnlp.embed import WordVectorModel, WordEmbedModel
from . import WordEmbedError
logger = logging.getLogger(__name__)
@dataclass
class TextWordModelMetadata(Dictable):
"""Describes a text based :class:`.WordEmbedModel`. This information in this
class is used to construct paths both text source vector file and all
generated binary files
"""
name: str = field()
"""The name of the word vector set (i.e. glove)."""
desc: str = field()
"""A descriptor about this particular word vector set (i.e. 6B)."""
dimension: int = field()
"""The dimension of the word vectors."""
n_vocab: int = field()
"""The number of words in the vocabulary."""
source_path: Path = field()
"""The path to the text file."""
sub_directory: InitVar[Path] = field(default=None)
"""The subdirectory to be appended to :obj:`self.bin_dir`, which defaults to
the directory ``bin/<description>.<dimension>``.
"""
def __post_init__(self, sub_directory: Path):
if sub_directory is None:
fname: str = f'{self.name}.{self.desc}.{self.dimension}'
sub_directory = Path('bin', fname)
self.bin_dir = self.source_path.parent / sub_directory
self.bin_file = self.bin_dir / 'vec.dat'
self.words_file = self.bin_dir / 'words.dat'
self.idx_file = self.bin_dir / 'idx.dat'
@dataclass
class TextWordEmbedModel(WordEmbedModel, Primeable, metaclass=ABCMeta):
"""Extensions of this class read a text vectors file and compile, then write
a binary representation for fast loading.
"""
DATASET_NAME = 'vec'
"""Name of the dataset in the HD5F file."""
path: Path = field(default=None)
"""The path to the model file(s)."""
installer: Installer = field(default=None)
"""The installer used to for the text vector zip file."""
resource: Resource = field(default=None)
"""The zip resource used to find the path to the model files."""
@abstractmethod
def _get_metadata(self) -> TextWordModelMetadata:
"""Create the metadata used to construct paths both text source vector
file and all generated binary files.
"""
pass
def _install(self) -> Path:
"""Install any missing word vector models."""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'install resource for {self.name}: {self.resource}')
self.installer()
return self.installer[self.resource]
@property
def metadata(self):
"""Return the metadata used to construct paths both text source vector
file and all generated binary files.
"""
if not hasattr(self, '_metadata'):
if self.path is None and self.installer is None:
raise WordEmbedError('No path is not set')
if self.installer is not None and self.resource is None:
raise WordEmbedError("Installer given but not 'resource''")
if self.installer is not None:
self.path = self._install()
self._metadata = self._get_metadata()
return self._metadata
def _get_model_id(self) -> str:
"""Return a string used to uniquely identify this model.
"""
meta = self.metadata
return f'{meta.name}: description={meta.desc}, dim={meta.dimension}'
def _populate_vec_lines(self, words: List[str], word2idx: Dict[str, int],
ds: Dataset):
"""Add word vectors to the h5py dataset, vocab and vocab index.
:param words: the list of vocabulary words
:param word2idx: dictionary of word to word vector index (row)
:param ds: the h5py data structure to add the word vectors
"""
meta = self.metadata
idx = 0
lc = 0
with open(meta.source_path, 'rb') as f:
for rix, ln in enumerate(f):
lc += 1
line = ln.decode().strip().split(' ')
word = line[0]
words.append(word)
word2idx[word] = idx
idx += 1
try:
ds[rix, :] = line[1:]
except Exception as e:
raise WordEmbedError(
f'Could not parse line {lc} (word: {word}): ' +
f'{e}; line: {ln}') from e
def _write_vecs(self) -> np.ndarray:
"""Write the h5py binary files. Only when they do not exist on the
files system already are they calculated and written.
"""
meta = self.metadata
meta.bin_dir.mkdir(parents=True, exist_ok=True)
words = []
word2idx = {}
if logger.isEnabledFor(logging.INFO):
logger.info(f'writing binary vectors {meta.source_path} ' +
f'-> {meta.bin_dir}')
shape = (meta.n_vocab, meta.dimension)
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating h5py binary vec files with shape {shape}:')
meta.write_to_log(logger, logging.INFO, 1)
with time(f'wrote h5py to {meta.bin_file}'):
with h5py.File(meta.bin_file, 'w') as f:
dset: Dataset = f.create_dataset(
self.DATASET_NAME, shape, dtype='float64')
self._populate_vec_lines(words, word2idx, dset)
with open(meta.words_file, 'wb') as f:
pickle.dump(words[:], f)
with open(meta.idx_file, 'wb') as f:
pickle.dump(word2idx, f)
def _assert_binary_vecs(self):
meta = self.metadata
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{meta.bin_file} exists: {meta.bin_file.exists()}')
if not meta.bin_file.exists():
if logger.isEnabledFor(logging.INFO):
logger.info(f'writing binary vectors to: {meta.bin_file}')
self._write_vecs()
def prime(self):
self._assert_binary_vecs()
def _create_data(self) -> WordVectorModel:
"""Read the binary bcolz, vocabulary and index files from disk.
"""
self._assert_binary_vecs()
meta = self.metadata
if logger.isEnabledFor(logging.INFO):
logger.info(f'reading binary vector file: {meta.bin_file}')
with time('loaded {cnt} vectors'):
with h5py.File(meta.bin_file, 'r') as f:
ds: Dataset = f[self.DATASET_NAME]
vectors: np.ndarray = ds[:]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'word embedding type: {vectors.dtype}')
with open(meta.words_file, 'rb') as f:
words = pickle.load(f)
with open(meta.idx_file, 'rb') as f:
word2idx = pickle.load(f)
cnt = len(word2idx)
with time('prepared vectors'):
unknown_vec: np.ndarray = np.expand_dims(
np.zeros(self.dimension), axis=0)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'unknown type: {unknown_vec.dtype}')
vectors: np.ndarray = np.concatenate((vectors, unknown_vec))
word2idx[self.UNKNOWN] = len(words)
words.append(self.UNKNOWN)
word2vec = {w: vectors[word2idx[w]] for w in words}
return WordVectorModel(vectors, word2vec, words, word2idx)
@dataclass
class DefaultTextWordEmbedModel(TextWordEmbedModel):
"""This class uses the Stanford pretrained GloVE embeddings as a ``dict`` like
Python object. It loads the glove vectors from a text file and then
creates a binary file that's quick to load on subsequent uses.
An example configuration would be::
[glove_embedding]
class_name = zensols.deepnlp.embed.GloveWordEmbedModel
path = path: ${default:corpus_dir}/glove
desc = 6B
dimension = 50
"""
name: str = field(default='unknown_name')
"""The name of the word vector set (i.e. glove)."""
desc: str = field(default='unknown_desc')
"""The size description (i.e. 6B for the six billion word trained vectors).
"""
dimension: int = field(default=50)
"""The word vector dimension."""
vocab_size: int = field(default=0)
"""Vocabulary size."""
file_name_pattern: str = field(default='{name}.{desc}.{dimension}d.txt')
"""The format of the file to create."""
@property
def file_name(self) -> str:
return self.file_name_pattern.format(
name=self.name,
desc=self.desc,
dimension=self.dimension)
def _get_metadata(self) -> TextWordModelMetadata:
name: str = self.name
dim: int = self.dimension
desc: str = self.desc
path: Path = self.path / self.file_name
return TextWordModelMetadata(name, desc, dim, self.vocab_size, path) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/embed/wordtext.py | wordtext.py |
___author__ = 'Paul Landes'
from typing import List, Dict
from dataclasses import dataclass, field
import logging
from h5py import Dataset
from zensols.deepnlp.embed import TextWordEmbedModel, TextWordModelMetadata
logger = logging.getLogger(__name__)
@dataclass
class FastTextEmbedModel(TextWordEmbedModel):
"""This class reads the FastText word vector text data format and provides an
instances of a :class:`.WordEmbedModel`. Files that have the format that
look like ``crawl-300d-2M.vec`` can be downloaded with the link below.
:see: `English word vectors <https://fasttext.cc/docs/en/english-vectors.html>`_
"""
desc: str = field(default='2M')
"""The size description (i.e. 6B for the six billion word trained vectors).
"""
dimension: str = field(default=300)
"""The word vector dimension."""
corpus: str = field(default='crawl')
"""The corpus the embeddings were trained on, such as ``crawl`` and ``web``.
"""
def _get_metadata(self) -> TextWordModelMetadata:
name = 'fasttext'
# crawl-300d-2M.vec
path = self.path
desc = f'{self.corpus}-{self.desc}'
with open(path, encoding='utf-8',
newline='\n', errors='ignore') as f:
vocab_size, dim = map(int, f.readline().split())
return TextWordModelMetadata(name, desc, dim, vocab_size, path)
def _populate_vec_lines(self, words: List[str], word2idx: Dict[str, int],
ds: Dataset):
meta = self.metadata
idx = 0
lc = 0
with open(meta.source_path, encoding='utf-8',
newline='\n', errors='ignore') as f:
n_vocab, dim = map(int, f.readline().split())
for rix, ln in enumerate(f):
lc += 1
line = ln.rstrip().split(' ')
word = line[0]
words.append(word)
word2idx[word] = idx
idx += 1
try:
ds[rix, :] = line[1:]
except Exception as e:
logger.error(f'could not parse line {lc} ' +
f'(word: {word}): {e}; line: {ln}')
raise e | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/embed/fasttext.py | fasttext.py |
__author__ = 'Paul Landes'
from typing import Optional, Union, List
from dataclasses import dataclass, field
import numpy as np
import torch
from torch import Tensor
from zensols.deeplearn import TorchConfig
from zensols.nlp import (
FeatureToken, FeatureSentence, FeatureDocument, FeatureDocumentDecorator
)
from . import WordEmbedModel
@dataclass
class WordEmbedDocumentDecorator(FeatureDocumentDecorator):
"""Populates sentence and token embeddings in the documents. Token's have
shape ``(1, d)`` where ``d`` is the embeddingn dimsion, and the first is
always 1 to be compatible with word piece embeddings populated by
:class:`..transformer.WordPieceDocumentDecorator`.
:see: :class:`.WordEmbedModel`
"""
model: WordEmbedModel = field()
"""The word embedding model for populating tokens and sentences."""
torch_config: Optional[TorchConfig] = field(default=None)
"""The Torch configuration to allocate the embeddings from either the GPU or
the CPU. If ``None``, then Numpy :class:`numpy.ndarray` arrays are used
instead of :class:`torch.Tensor`.
"""
token_embeddings: bool = field(default=True)
"""Whether to add :class:`.WordPieceFeatureToken.embeddings`.
"""
sent_embeddings: bool = field(default=True)
"""Whether to add class:`.WordPieceFeatureSentence.embeddings`.
"""
skip_oov: bool = field(default=False)
"""Whether to skip out-of-vocabulary tokens that have no embeddings."""
def _add_sent_embedding(self, sent: FeatureSentence):
use_np: bool = self.torch_config is None
add_tok_emb: bool = self.token_embeddings
model: WordEmbedModel = self.model
# our embedding will be a numpy array when no torch config is provided
emb: Union[np.ndarray, Tensor]
sembs: List[Union[np.ndarray, Tensor]] = []
if use_np:
# already a numpy array
emb = model.matrix
else:
# convert to a torch tensor based on our configuration (i.e. device)
emb = model.to_matrix(self.torch_config)
tok: FeatureToken
for tok in sent.token_iter():
norm: str = tok.norm
idx: int = model.word2idx(norm)
if not self.skip_oov or idx is not None:
if idx is None:
idx = model.unk_idx
vec: Union[np.ndarray, Tensor] = emb[idx]
sembs.append(vec)
if add_tok_emb:
if use_np:
vec = np.expand_dims(vec, axis=0)
else:
vec = vec.unsqueeze(axis=0)
tok.embedding = vec
# sentinel embeddings are the centroid for non-contextual embeddings
if len(sembs) > 0 and self.sent_embeddings:
if use_np:
sent.embedding = np.stack(sembs).mean(axis=0)
else:
sent.embedding = torch.stack(sembs).mean(axis=0)
def decorate(self, doc: FeatureDocument):
assert isinstance(self.model, WordEmbedModel)
if self.token_embeddings or self.sent_embeddings:
sent: FeatureSentence
for sent in doc.sents:
self._add_sent_embedding(sent) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/embed/doc.py | doc.py |
__author__ = 'Paul Landes'
from typing import Tuple, Iterable, Any, Dict
from dataclasses import dataclass, field
import logging
import numpy as np
from sklearn.decomposition import TruncatedSVD
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.preprocessing import Normalizer
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import csr_matrix
from zensols.util import time
from zensols.nlp import FeatureDocument, TokenContainer
from zensols.deeplearn import DeepLearnError
from zensols.deeplearn.vectorize import FeatureContext, TensorFeatureContext
from zensols.deepnlp.vectorize import TextFeatureType
from . import DocumentIndexVectorizer
logger = logging.getLogger(__name__)
@dataclass
class LatentSemanticDocumentIndexerVectorizer(DocumentIndexVectorizer):
"""Train a latent semantic indexing (LSI, aka LSA) model from::
Deerwester, S., Dumais, S.T., Furnas, G.W., Landauer, T.K., and Harshman,
R. 1990. Indexing by Latent Semantic Analysis. Journal of the American
Society for Information Science; New York, N.Y. 41, 6, 391–407.
This class can be used only to index TF/IDF. To skip the LSI training, set
:obj:`iterations` to zero.
:shape: ``(1,)``
:see: :class:`sklearn.decomposition.TruncatedSVD`
"""
DESCRIPTION = 'latent semantic indexing'
FEATURE_TYPE = TextFeatureType.DOCUMENT
components: int = field(default=100)
"""The number of components for the output."""
iterations: int = field(default=10)
"""Number of iterations for randomized SVD solver if greater than 0 (see
class docs).
"""
vectorizer_params: Dict[str, Any] = field(default_factory=dict)
"""Additional parameters passed to
:class:`~sklearn.feature_extraction.text.TfidfVectorizer` when vectorizing
TF/IDF features.
"""
def _get_shape(self) -> Tuple[int, int]:
return 1,
def _create_model(self, docs: Iterable[FeatureDocument]) -> Dict[str, Any]:
"""Train using a singular value decomposition, then truncate to get the
most salient terms in a document/term matrics.
"""
vectorizer = TfidfVectorizer(
lowercase=False,
tokenizer=self.feat_to_tokens,
**self.vectorizer_params,
)
model: Dict[str, Any] = {'vectorizer': vectorizer}
with time('TF/IDF vectorized {X_train_tfidf.shape[0]} documents'):
X_train_tfidf = vectorizer.fit_transform(docs)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'tfidf shape: {X_train_tfidf.shape}')
svd = TruncatedSVD(self.components, n_iter=self.iterations)
if self.iterations > 0:
lsa: Pipeline = make_pipeline(svd, Normalizer(copy=False))
with time('SVD complete'):
X_train_lsa = lsa.fit_transform(X_train_tfidf)
if logger.isEnabledFor(logging.INFO):
logger.info(f'created model w/{self.components} components, ' +
f'over {self.iterations} iterations with ' +
f'TF/IDF matrix shape: {X_train_tfidf.shape}, ' +
f'SVD matrix shape: {X_train_lsa.shape}')
model['lsa'] = lsa
return model
@property
def vectorizer(self) -> TfidfVectorizer:
"""The vectorizer trained on the document set."""
return self.model['vectorizer']
@property
def lsa(self) -> Pipeline:
"""The LSA pipeline trained on the document set."""
if 'lsa' not in self.model:
raise DeepLearnError('SVD model was not trained')
return self.model['lsa']
def _transform_doc(self, doc: FeatureDocument, vectorizer: TfidfVectorizer,
lsa: Pipeline) -> np.ndarray:
X_test_tfidf: csr_matrix = vectorizer.transform([doc])
X_test_lsa: csr_matrix = lsa.transform(X_test_tfidf)
return X_test_lsa
def similarity(self, a: FeatureDocument, b: FeatureDocument) -> float:
"""Return the semantic similarity between two documents.
"""
vectorizer: TfidfVectorizer = self.vectorizer
lsa: Pipeline = self.lsa
emb_a = self._transform_doc(a, vectorizer, lsa)
emb_b = self._transform_doc(b, vectorizer, lsa)
return np.dot(emb_a, emb_b.T)[0][0]
def _encode(self, containers: Tuple[TokenContainer]) -> FeatureContext:
measure = self.similarity(*containers)
arr = self.torch_config.singleton([measure])
return TensorFeatureContext(self.feature_id, arr) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/index/lsi.py | lsi.py |
from typing import Tuple, Iterable, Any
from dataclasses import dataclass, field
import logging
import torch
import gensim.corpora as corpora
from gensim.models.ldamodel import LdaModel
from zensols.util import time
from zensols.nlp import TokenContainer, FeatureDocument
from zensols.deeplearn import TorchConfig
from zensols.deeplearn.vectorize import FeatureContext, TensorFeatureContext
from zensols.deepnlp.vectorize import TextFeatureType
from . import DocumentIndexVectorizer
logger = logging.getLogger(__name__)
@dataclass
class TopicModelDocumentIndexerVectorizer(DocumentIndexVectorizer):
"""Train a model using LDA for topic modeling.
Citation:
Hoffman, M., Bach, F., and Blei, D. 2010. Online Learning for Latent
Dirichlet Allocation. Advances in Neural Information Processing Systems 23.
:shape: ``(topics, )`` when ``decode_as_flat`` is ``True,
otherwise, ``(, topics)``
:see: :class:`gensim.models.ldamodel.LdaModel`
"""
DESCRIPTION = 'latent semantic indexing'
FEATURE_TYPE = TextFeatureType.DOCUMENT
topics: int = field(default=20)
"""The number of topics (usually denoted ``K``)."""
decode_as_flat: bool = field(default=True)
"""If ``True``, flatten the tensor after decoding."""
def _get_shape(self) -> Tuple[int, int]:
if self.decode_as_flat:
return self.topics,
else:
return 1, self.topics
def _create_model(self, docs: Iterable[FeatureDocument]) -> Any:
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating {self.topics} topics')
docs = tuple(map(lambda doc: self.feat_to_tokens(doc), docs))
id2word = corpora.Dictionary(docs)
corpus = tuple(map(lambda doc: id2word.doc2bow(doc), docs))
rand_state = TorchConfig.get_random_seed()
if rand_state is None:
rand_state = 0
params = {
'corpus': corpus,
'id2word': id2word,
'num_topics': self.topics,
'random_state': rand_state,
'update_every': 1,
'chunksize': 100,
'passes': 10,
'alpha': 'auto',
'per_word_topics': True
}
with time(f'modeled {self.topics} acros {len(docs)} documents'):
lda = LdaModel(**params)
return {'lda': lda, 'corpus': corpus, 'id2word': id2word}
def query(self, tokens: Tuple[str]) -> Tuple[float]:
"""Return a distribution over the topics for a query set of tokens.
:param tokens: the string list of tokens to use for inferencing in the
model
:return: a list of tuples in the form ``(topic_id, probability)``
"""
lda = self.model['lda']
id2word = self.model['id2word']
docs_q = [tokens]
corpus_q = tuple(map(lambda doc: id2word.doc2bow(doc), docs_q))
return lda.get_document_topics(corpus_q, minimum_probability=0)[0]
def _encode(self, containers: Tuple[TokenContainer]) -> FeatureContext:
arrs = []
for container in containers:
terms = tuple(map(lambda t: t.lemma, container.tokens))
arr = self.torch_config.from_iterable(
map(lambda x: x[1], self.query(terms)))
arrs.append(arr)
arrs = torch.stack(arrs)
return TensorFeatureContext(self.feature_id, arrs)
def _decode(self, context: FeatureContext) -> torch.Tensor:
arr = super()._decode(context)
if self.decode_as_flat:
shape = arr.shape
arr = arr.flatten()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decode shape {shape} -> {arr.shape}')
return arr | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/index/lda.py | lda.py |
__author__ = 'Paul Landes'
from typing import Tuple, Iterable, Any
from abc import ABC, ABCMeta, abstractmethod
from dataclasses import dataclass, field
import logging
from itertools import chain
from pathlib import Path
from zensols.util import time
from zensols.persist import (
persisted,
PersistedWork,
PersistableContainer,
Primeable
)
from zensols.nlp import FeatureToken, FeatureDocument
from zensols.deepnlp.vectorize import FeatureDocumentVectorizer
logger = logging.getLogger(__name__)
@dataclass
class IndexedDocumentFactory(ABC):
"""Creates training documents used to generate indexed features (i.e. latent
dirichlet allocation, latent semantic indexing etc).
:see: :class:`.DocumentIndexVectorizer`
"""
@abstractmethod
def create_training_docs(self) -> Iterable[FeatureDocument]:
"""Create the documents used to index in the model during training.
"""
pass
@dataclass
class DocumentIndexVectorizer(FeatureDocumentVectorizer,
PersistableContainer, Primeable,
metaclass=ABCMeta):
"""A vectorizer that generates vectorized features based on the index documents
of the training set. For example, latent dirichlet allocation maybe be
used to generated a distrubiton of likelihood a document belongs to a
topic.
Subclasses of this abstract class are both vectorizers and models. The
model created once, and then cached. To clear the cache and force it to be
retrained, use :meth:`clear`.
The method :meth:`_create_model` must be implemented.
:see: :class:`.TopicModelDocumentIndexerVectorizer`
.. document private functions
.. automethod:: _create_model
"""
doc_factory: IndexedDocumentFactory = field()
"""The document factor used to create training documents for the model
vectorizer.
"""
index_path: Path = field()
"""The path to the pickeled cache file of the trained model.
"""
def __post_init__(self):
PersistableContainer.__init__(self)
self.index_path.parent.mkdir(parents=True, exist_ok=True)
self._model = PersistedWork(self.index_path, self)
@staticmethod
def feat_to_tokens(docs: Tuple[FeatureDocument, ...]) -> Tuple[str, ...]:
"""Create a tuple of string tokens from a set of documents suitable for
document indexing. The strings are the lemmas of the tokens.
**Important**: this method must remain static since the LSI instance of
this class uses it as a factory function in the a vectorizer.
"""
def filter_tok(t: FeatureToken) -> bool:
return not t.is_space and not t.is_stop and not t.is_punctuation
toks = map(lambda d: d.lemma_.lower(),
filter(filter_tok, chain.from_iterable(
map(lambda d: d.tokens, docs))))
return tuple(toks)
@abstractmethod
def _create_model(self, docs: Iterable[FeatureDocument]) -> Any:
"""Create the model for this indexer. The model is implementation specific.
The model must be pickelabel and is cached in as :obj:`model`.
"""
pass
@property
@persisted('_model')
def model(self):
"""Return the trained model for this vectorizer. See the class docs on how it
is cached and cleared.
"""
docs: Iterable[FeatureDocument] = \
self.doc_factory.create_training_docs()
with time('trained model'):
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating model at {self.index_path}')
return self._create_model(docs)
def __getstate__(self):
return self.__dict__
def prime(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'priming {self}')
self.model
def clear(self):
self._model.clear() | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/index/domain.py | domain.py |
from typing import Iterable, List, Tuple, Any
from dataclasses import dataclass, field
import logging
import sys
from abc import abstractmethod, ABCMeta
import itertools as it
import pandas as pd
from zensols.persist import Stash, PrimeableStash
from zensols.multi import MultiProcessStash
from zensols.nlp import FeatureDocument
from zensols.deepnlp.vectorize import FeatureDocumentVectorizerManager
logger = logging.getLogger(__name__)
@dataclass
class DocumentFeatureStash(MultiProcessStash, metaclass=ABCMeta):
"""This class parses natural language text in to :class:`.FeatureDocument`
instances in multiple sub processes.
.. document private functions
.. automethod:: _parse_document
"""
ATTR_EXP_META = ('document_limit',)
factory: Stash = field()
"""The stash that creates the ``factory_data`` given to
:meth:`_parse_document`.
"""
vec_manager: FeatureDocumentVectorizerManager = field()
"""Used to parse text in to :class:`.FeatureDocument` instances.
"""
document_limit: int = field(default=sys.maxsize)
"""The maximum number of documents to process."""
def prime(self):
if isinstance(self.factory, PrimeableStash):
self.factory.prime()
super().prime()
@abstractmethod
def _parse_document(self, id: int, factory_data: Any) -> FeatureDocument:
pass
def _create_data(self) -> List[str]:
return it.islice(self.factory.keys(), self.document_limit)
def _process(self, chunk: List[str]) -> \
Iterable[Tuple[str, FeatureDocument]]:
logger.info(f'processing chunk with {len(chunk)} ids')
for id, factory_data in map(lambda id: (id, self.factory[id]), chunk):
data = self._parse_document(id, factory_data)
yield (id, data)
@dataclass
class DataframeDocumentFeatureStash(DocumentFeatureStash):
"""Creates :class:`.FeatureDocument` instances from :class:`pandas.Series` rows
from the :class:`pandas.DataFrame` stash values.
"""
text_column: str = field(default='text')
"""The column name for the text to be parsed by the document parser."""
additional_columns: Tuple[str] = field(default=None)
"""A tuple of column names to add as position argument to the instance."""
def _parse_document(self, id: int, row: pd.Series) -> FeatureDocument:
# text to parse with SpaCy
text = row[self.text_column]
if self.additional_columns is None:
return self.vec_manager.parse(text)
else:
vals = dict(map(lambda c: (c, row[c]), self.additional_columns))
return self.vec_manager.parse(text, **vals) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/feature/stash.py | stash.py |
__author__ = 'Paul Landes'
from typing import Set, List
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
import logging
from zensols.persist import Stash
from zensols.deeplearn import NetworkSettings, ModelSettings
from zensols.deeplearn.batch import BatchMetadata, ManagerFeatureMapping
from zensols.deeplearn.model import ModelError, ModelFacade
from zensols.deeplearn.vectorize import (
FeatureVectorizerManagerSet,
FeatureVectorizerManager,
FeatureVectorizer,
)
from zensols.nlp import FeatureDocumentParser, FeatureDocument
from zensols.deepnlp.transformer import suppress_warnings
from zensols.deepnlp.transformer.vectorizers import \
TransformerEmbeddingFeatureVectorizer
from zensols.deepnlp.transformer import (
TransformerResource,
TransformerDocumentTokenizer,
)
logger = logging.getLogger(__name__)
@dataclass
class LanguageModelFacadeConfig(object):
"""Configuration that defines how and what to access language configuration
data. Note that this data reflects how you have the model configured per
the configuration file. Parameter examples are given per the Movie Review
example.
"""
manager_name: str = field()
"""The name of the language based feature vectorizer, such as
``language_vectorizer_manager``.
"""
attribs: Set[str] = field()
"""The language attributes (all levels: token, document etc), such as
``enum``, ``count``, ``dep`` etc.
"""
embedding_attribs: Set[str] = field()
"""All embedding attributes using in the configuration, such as
``glove_50_embedding``, ``word2vec_300``, ``bert_embedding``, etc.
"""
@dataclass
class LanguageModelFacade(ModelFacade, metaclass=ABCMeta):
"""A facade that supports natural language model feature updating through a
facade. This facade also provides logging configuration for NLP domains
for this package.
This class makes assumptions on the naming of the embedding layer
vectorizer naming. See :obj:`embedding`.
"""
suppress_transformer_warnings: bool = field(default=True)
"""If ``True``, suppress the ```Some weights of the model checkpoint...```
warnings from huggingface transformers library.
"""
def __post_init__(self, *args, **kwargs):
super().__post_init__(*args, **kwargs)
if self.suppress_transformer_warnings:
suppress_warnings()
@abstractmethod
def _get_language_model_config(self) -> LanguageModelFacadeConfig:
"""Get the langauge model configuration.
"""
pass
def _create_facade_explorer(self):
from zensols.deepnlp.vectorize import FeatureDocumentVectorizer
ce = super()._create_facade_explorer()
ce.include_classes.update({NetworkSettings, ModelSettings})
ce.exclude_classes.update({FeatureDocumentVectorizer})
ce.dictify_dataclasses = True
return ce
@property
def enum_feature_ids(self) -> Set[str]:
"""Spacy enumeration encodings used to token wise to widen the input
embeddings.
"""
return self._get_vectorizer_feature_ids('enum')
@enum_feature_ids.setter
def enum_feature_ids(self, feature_ids: Set[str]):
"""Spacy enumeration encodings used to token wise to widen the input
embeddings.
"""
self._set_vectorizer_feature_ids('enum', feature_ids)
@property
def count_feature_ids(self) -> Set[str]:
"""The spacy token features are used in the join layer.
"""
return self._get_vectorizer_feature_ids('count')
@count_feature_ids.setter
def count_feature_ids(self, feature_ids: Set[str]):
"""The spacy token features are used in the join layer.
"""
self._set_vectorizer_feature_ids('count', feature_ids)
@property
def language_attributes(self) -> Set[str]:
"""The language attributes to be used.
"""
lc = self._get_language_model_config()
stash = self.batch_stash
return stash.decoded_attributes & lc.attribs
@language_attributes.setter
def language_attributes(self, attributes: Set[str]):
"""The language attributes to be used.
:param attributes:
the set of attributes to use, which are and (sub)set of the
:class:`~zensols.deeplearn.batch.BatchStash`'s
``decoded_attributes``
"""
stash = self.batch_stash
lc = self._get_language_model_config()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'all language attributes: {lc.attribs}')
non_existant = attributes - lc.attribs
if len(non_existant) > 0:
raise ModelError(f'No such langauge attributes: {non_existant}')
cur_attribs = self.batch_stash.decoded_attributes
to_set = (cur_attribs - lc.attribs) | attributes
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'settings decoded batch stash attribs: {to_set}')
if cur_attribs == to_set:
logger.info('no attribute changes--skipping')
else:
stash.decoded_attributes = to_set
self.clear()
def _get_default_token_length(self, embedding: str) -> int:
lvm: FeatureVectorizerManager = self.language_vectorizer_manager
#return self.config.get_option_int('token_length', 'language_defaults')
return lvm.token_length
@property
def embedding(self) -> str:
"""The embedding layer.
**Important**: the naming of the ``embedding`` parameter is that which
is given in the configuration without the ``_layer`` postfix. For
example, ``embedding`` is ``glove_50_embedding`` for:
* ``glove_50_embedding`` is the name of the
:class:`~zensols.deepnlp.embed.GloveWordEmbedModel`
* ``glove_50_feature_vectorizer`` is the name of the
:class:`~zensols.deepnlp.vectorize.WordVectorEmbeddingFeatureVectorizer`
* ``glove_50_embedding_layer`` is the name of the
:class: `~zensols.deepnlp.vectorize.WordVectorEmbeddingLayer`
:param embedding: the kind of embedding, i.e. ``glove_50_embedding``
"""
stash = self.batch_stash
cur_attribs = stash.decoded_attributes
lang_attribs = self._get_language_model_config()
emb = lang_attribs.embedding_attribs & cur_attribs
assert len(emb) == 1
return next(iter(emb))
@embedding.setter
def embedding(self, embedding: str):
"""The embedding layer.
**Important**: the naming of the ``embedding`` parameter is that which
is given in the configuration without the ``_layer`` postfix. For
example, ``embedding`` is ``glove_50_embedding`` for:
* ``glove_50_embedding`` is the name of the
:class:`~zensols.deepnlp.embed.GloveWordEmbedModel`
* ``glove_50_feature_vectorizer`` is the name of the
:class:`~zensols.deepnlp.vectorize.WordVectorEmbeddingFeatureVectorizer`
* ``glove_50_embedding_layer`` is the name of the
:class: `~zensols.deepnlp.vectorize.WordVectorEmbeddingLayer`
:param embedding: the kind of embedding, i.e. ``glove_50_embedding``
"""
self._set_embedding(embedding)
def _set_embedding(self, embedding: str):
lang_attribs = self._get_language_model_config()
emb_sec = embedding
if emb_sec not in lang_attribs.embedding_attribs:
raise ModelError(f'No such embedding attribute: {embedding}')
stash = self.batch_stash
cur_attribs = stash.decoded_attributes
attribs = (cur_attribs - lang_attribs.embedding_attribs) | {emb_sec}
needs_change = cur_attribs == attribs
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'decoded batch stash attribs: {attribs}')
logger.debug(f'embedding layer: {emb_sec}')
if needs_change:
logger.info('no attribute changes--skipping')
else:
vec_mng = self.language_vectorizer_manager
old_emb = self.embedding
self._deallocate_config_instance(f'{old_emb}_layer')
stash.decoded_attributes = attribs
elayer = f'instance: {emb_sec}_layer'
self.executor.net_settings.embedding_layer = elayer
vec_mng.token_length = self._get_default_token_length(embedding)
self.clear()
return needs_change
@property
def language_vectorizer_manager(self) -> FeatureVectorizerManager:
"""Return the language vectorizer manager for the class.
"""
lc = self._get_language_model_config()
return self.vectorizer_manager_set[lc.manager_name]
def get_transformer_vectorizer(self) -> \
TransformerEmbeddingFeatureVectorizer:
"""Return the first found tranformer token vectorizer.
"""
mng_set: FeatureVectorizerManagerSet = self.vectorizer_manager_set
mng: FeatureVectorizerManager
for mng in mng_set.values():
vec: FeatureVectorizer
for vc in mng.values():
if isinstance(vc, TransformerEmbeddingFeatureVectorizer):
return vc
def get_max_word_piece_len(self) -> int:
"""Get the longest word piece length for the first found configured transformer
embedding feature vectorizer.
"""
vec: TransformerEmbeddingFeatureVectorizer = \
self.get_transformer_vectorizer()
if vec is None:
raise ModelError('No transformer vectorizer found')
tres: TransformerResource = vec.embed_model
tokenizer: TransformerDocumentTokenizer = tres.tokenizer
meta: BatchMetadata = self.batch_metadata
field: ManagerFeatureMapping = \
meta.mapping.get_field_map_by_feature_id(vec.feature_id)[1]
attr_name: str = field.attr_access
batch_stash: Stash = self.batch_stash
mlen = 0
params = {'padding': 'longest',
'truncation': False}
for bn, batch in enumerate(batch_stash.values()):
sents = map(lambda dp: getattr(dp, attr_name).to_sentence(),
batch.data_points)
doc = FeatureDocument(sents)
tok_doc = tokenizer.tokenize(doc, params)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'max word piece tokens for batch {bn}: ' +
f'{len(tok_doc)}')
mlen = max(mlen, len(tok_doc))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'max word piece token length: {mlen}')
return mlen
@property
def doc_parser(self) -> FeatureDocumentParser:
"""Return the document parser assocated with the language vectorizer manager.
:see: obj:`language_vectorizer_manager`
"""
mng: FeatureVectorizerManager = self.language_vectorizer_manager
return mng.doc_parser
def _get_vectorizer_feature_ids(self, name: str) -> Set[str]:
lang_vec = self.language_vectorizer_manager[name]
return lang_vec.decoded_feature_ids
def _set_vectorizer_feature_ids(self, name: str, feature_ids: Set[str]):
lang_vec_mng = self.language_vectorizer_manager
lang_vec = lang_vec_mng[name]
spacy_feat_ids = set(lang_vec_mng.spacy_vectorizers.keys())
non_existant = feature_ids - spacy_feat_ids
if len(non_existant) > 0:
raise ModelError(f'No such spacy feature IDs: {non_existant}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'settings {feature_ids} on {lang_vec}')
lang_vec.decoded_feature_ids = feature_ids
def _configure_debug_logging(self):
super()._configure_debug_logging()
for name in ['zensols.deeplearn.layer',
'zensols.deepnlp.layer',
'zensols.deepnlp.transformer.layer',
__name__]:
logging.getLogger(name).setLevel(logging.DEBUG)
for name in ['zensols.deepnlp.vectorize.vectorizers',
'zensols.deepnlp.transformer.vectorizers',
'zensols.deepnlp.model.module']:
logging.getLogger(name).setLevel(logging.INFO)
def _configure_cli_logging(self, info_loggers: List[str],
debug_loggers: List[str]):
super()._configure_cli_logging(info_loggers, debug_loggers)
info_loggers.extend([
# installed files
'zensols.install',
# show (slow) embedding loading
'zensols.deepnlp.embed',
# LSI/LDA indexing
'zensols.deepnlp.index',
# CLI interface
'zensols.deepnlp.cli.app']) | zensols.deepnlp | /zensols.deepnlp-1.10.1-py3-none-any.whl/zensols/deepnlp/model/facade.py | facade.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.