code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
__author__ = 'Paul Landes'
from typing import (
Type, Iterable, Sequence, Set, Dict, Any, List, Tuple, ClassVar
)
from dataclasses import dataclass, field
import logging
import sys
import re
import itertools as it
from io import TextIOBase
import spacy
from spacy.language import Language
from spacy.symbols import ORTH
from spacy.tokens import Doc, Span, Token
from zensols.config import Dictable, ConfigFactory
from zensols.persist import persisted, PersistedWork
from . import (
FeatureSentenceDecorator, FeatureTokenDecorator, FeatureDocumentDecorator,
Component, FeatureDocumentParser,
)
from . import (
ParseError, TokenNormalizer, FeatureToken, SpacyFeatureToken,
FeatureSentence, FeatureDocument,
)
logger = logging.getLogger(__name__)
@dataclass
class _DictableDoc(Dictable):
"""Utility class to pretty print and serialize Spacy documents.
"""
doc: Doc = field(repr=False)
"""The document from which to create a :class:`.dict`."""
def _write_token(self, tok: Token, depth: int, writer: TextIOBase):
s = (f'{tok}: tag={tok.tag_}, pos={tok.pos_}, stop={tok.is_stop}, ' +
f'lemma={tok.lemma_}, dep={tok.dep_}')
self._write_line(s, depth, writer)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
token_limit: int = sys.maxsize):
"""Pretty print the document.
:param token_limit: the max number of tokens to write, which defaults
to all of them
"""
text = self._trunc(str(self.doc.text))
self._write_line(f'text: {text}', depth, writer)
self._write_line('tokens:', depth, writer)
for sent in self.doc.sents:
self._write_line(self._trunc(str(sent)), depth + 1, writer)
for t in it.islice(sent, token_limit):
self._write_token(t, depth + 2, writer)
self._write_line('entities:', depth, writer)
for ent in self.doc.ents:
self._write_line(f'{ent}: {ent.label_}', depth + 1, writer)
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
sents = tuple(self.doc.sents)
em = {}
for e in self.doc.ents:
for tok in self.doc[e.start:e.end]:
em[tok.i] = e.label_
def tok_json(t):
return {'tag': t.tag_, 'pos': t.pos_,
'is_stop': t.is_stop, 'lemma': t.lemma_, 'dep': t.dep_,
'text': t.text, 'idx': t.idx,
'ent': None if t.i not in em else em[t.i],
'childs': tuple(map(lambda c: c.i, t.children))}
def sent_json(idx):
s = sents[idx]
return {t.i: tok_json(t) for t in self.doc[s.start:s.end]}
return {'text': self.doc.text,
'sents': {i: sent_json(i) for i in range(len(sents))},
'ents': [(str(e), e.label_,) for e in self.doc.ents]}
@dataclass
class SpacyFeatureDocumentParser(FeatureDocumentParser):
"""This langauge resource parses text in to Spacy documents. Loaded spaCy
models have attribute ``doc_parser`` set enable creation of factory
instances from registered pipe components (i.e. specified by
:class:`.Component`).
Configuration example::
[doc_parser]
class_name = zensols.nlp.SpacyFeatureDocumentParser
lang = en
model_name = ${lang}_core_web_sm
"""
_MODELS = {}
"""Contains cached models, such as ``en_core_web_sm``."""
config_factory: ConfigFactory = field()
"""A configuration parser optionally used by pipeline :class:`.Component`
instances.
"""
name: str = field()
"""The name of the parser, which is taken from the section name when created
with a :class:`~zensols.config.ConfigFactory`.
"""
lang: str = field(default='en')
"""The natural language the identify the model."""
model_name: str = field(default=None)
"""The Spacy model name (defualts to ``en_core_web_sm``); this is ignored
if ``model`` is not ``None``.
"""
token_feature_ids: Set[str] = field(
default_factory=lambda: FeatureDocumentParser.TOKEN_FEATURE_IDS)
"""The features to keep from spaCy tokens.
:see: :obj:`TOKEN_FEATURE_IDS`
"""
components: Sequence[Component] = field(default=())
"""Additional Spacy components to add to the pipeline."""
token_decorators: Sequence[FeatureTokenDecorator] = field(default=())
"""A list of decorators that can add, remove or modify features on a token.
"""
sentence_decorators: Sequence[FeatureSentenceDecorator] = field(
default=())
"""A list of decorators that can add, remove or modify features on a
sentence.
"""
document_decorators: Sequence[FeatureDocumentDecorator] = field(
default=())
"""A list of decorators that can add, remove or modify features on a
document.
"""
disable_component_names: Sequence[str] = field(default=None)
"""Components to disable in the spaCy model when creating documents in
:meth:`parse`.
"""
token_normalizer: TokenNormalizer = field(default=None)
"""The token normalizer for methods that use it, i.e. ``features``."""
special_case_tokens: List = field(default_factory=list)
"""Tokens that will be parsed as one token, i.e. ``</s>``."""
doc_class: Type[FeatureDocument] = field(default=FeatureDocument)
"""The type of document instances to create."""
sent_class: Type[FeatureSentence] = field(default=FeatureSentence)
"""The type of sentence instances to create."""
token_class: Type[FeatureToken] = field(default=SpacyFeatureToken)
"""The type of document instances to create."""
remove_empty_sentences: bool = field(default=None)
"""Deprecated and will be removed from future versions. Use
:class:`.FilterSentenceFeatureDocumentDecorator` instead.
"""
reload_components: bool = field(default=False)
"""Removes, then re-adds components for cached models. This is helpful for
when there are component configurations that change on reruns with a
difference application context but in the same Python interpreter session.
A spaCy component can get other instances via :obj:`config_factory`, but if
this is ``False`` it will be paired with the first instance of this class
and not the new ones created with a new configuration factory.
"""
auto_install_model: bool = field(default=False)
"""Whether to install models not already available. Note that this uses the
pip command to download model requirements, which might have an adverse
effect of replacing currently installed Python packages.
"""
def __post_init__(self):
super().__post_init__()
self._model = PersistedWork('_model', self)
if self.remove_empty_sentences is not None:
import warnings
warnings.warn(
'remove_empty_sentences is deprecated (use ' +
'FilterSentenceFeatureDocumentDecorator instead',
DeprecationWarning)
def _assert_model(self, model_name: str):
import spacy.util
import spacy.cli
if not spacy.util.is_package(model_name):
spacy.cli.download(model_name)
def _create_model_key(self) -> str:
"""Create a unique key used for storing expensive-to-create spaCy language
models in :obj:`_MODELS`.
"""
comps = sorted(map(lambda c: f'{c.pipe_name}:{hash(c)}',
self.components))
comp_str = '-' + '|'.join(comps)
return f'{self.model_name}{comp_str}'
def _create_model(self) -> Language:
"""Load, configure and return a new spaCy model instance."""
if self.auto_install_model:
self._assert_model(self.model_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loading model: {self.model_name}')
nlp = spacy.load(self.model_name)
return nlp
def _add_components(self, nlp: Language):
"""Add components to the pipeline that was just created."""
if self.components is not None:
comp: Component
for comp in self.components:
if comp.pipe_name in nlp.pipe_names:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{comp} already registered--skipping')
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding {comp} ({id(comp)}) to pipeline')
comp.init(nlp)
def _remove_components(self, nlp: Language):
for comp in self.components:
name, comp = nlp.remove_pipe(comp.pipe_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'removed {name} ({id(comp)})')
@property
@persisted('_model')
def model(self) -> Language:
"""The spaCy model. On first access, this creates a new instance using
``model_name``.
"""
mkey: str = self._create_model_key()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'model key: {mkey}')
if self.model_name is None:
self.model_name = f'{self.lang}_core_web_sm'
# cache model in class space
nlp: Language = self._MODELS.get(mkey)
if nlp is None:
nlp: Language = self._create_model()
# pipe components can create other application context instance via
# the :obj:`config_factory` with access to this instance
nlp.doc_parser = self
self._add_components(nlp)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f'adding {mkey} to cached models ({len(self._MODELS)})')
self._MODELS[mkey] = nlp
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'cached models: {len(self._MODELS)}')
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'cached model: {mkey} ({self.model_name})')
if self.reload_components:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f're-adding components to {id(self)}')
nlp.doc_parser = self
self._remove_components(nlp)
self._add_components(nlp)
if self.token_normalizer is None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('adding default tokenizer')
self.token_normalizer = TokenNormalizer()
for stok in self.special_case_tokens:
rule = [{ORTH: stok}]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding special token: {stok} with rule: {rule}')
nlp.tokenizer.add_special_case(stok, rule)
return nlp
@classmethod
def clear_models(self):
"""Clears all cached models."""
self._MODELS.clear()
def parse_spacy_doc(self, text: str) -> Doc:
"""Parse ``text`` in to a Spacy document.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating document with model: {self.model_name}, ' +
f'disable components: {self.disable_component_names}')
if self.disable_component_names is None:
doc = self.model(text)
else:
doc = self.model(text, disable=self.disable_component_names)
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsed text: <{self._trunc(text)}>')
if logger.isEnabledFor(logging.DEBUG):
doc_text = self._trunc(str(doc))
logger.debug(f'parsed document: <{doc_text}>')
return doc
def get_dictable(self, doc: Doc) -> Dictable:
"""Return a dictionary object graph and pretty prints spaCy docs.
"""
return _DictableDoc(doc)
def _normalize_tokens(self, doc: Doc, *args, **kwargs) -> \
Iterable[FeatureToken]:
"""Generate an iterator of :class:`.FeatureToken` instances with features on a
per token level.
"""
if logger.isEnabledFor(logging.DEBUG):
doc_text = self._trunc(str(doc))
logger.debug(f'normalizing features in {doc_text}')
logger.debug(f'args: <{args}>')
logger.debug(f'kwargs: <{kwargs}>')
tokens: Tuple[FeatureToken, ...] = \
map(lambda tup: self._create_token(*tup, *args, **kwargs),
self.token_normalizer.normalize(doc))
return tokens
def _decorate_token(self, spacy_tok: Token, feature_token: FeatureToken):
decorator: FeatureTokenDecorator
for decorator in self.token_decorators:
decorator.decorate(feature_token)
def _create_token(self, tok: Token, norm: Tuple[Token, str],
*args, **kwargs) -> FeatureToken:
tp: Type[FeatureToken] = self.token_class
ft: FeatureToken = tp(tok, norm, *args, **kwargs)
self._decorate_token(tok, ft)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'detaching using features: {self.token_feature_ids}')
return ft.detach(self.token_feature_ids)
def _decorate_sent(self, spacy_sent: Span, feature_sent: FeatureSentence):
decorator: FeatureSentenceDecorator
for decorator in self.sentence_decorators:
decorator.decorate(feature_sent)
def _create_sent(self, spacy_sent: Span, stoks: Iterable[FeatureToken],
text: str) -> FeatureSentence:
sent: FeatureSentence = self.sent_class(tuple(stoks), text, spacy_sent)
self._decorate_sent(spacy_sent, sent)
return sent
def _create_sents(self, doc: Doc) -> List[FeatureSentence]:
"""Create sentences from a spaCy doc."""
toks: Tuple[FeatureToken, ...] = tuple(self._normalize_tokens(doc))
sents: List[FeatureSentence] = []
ntoks: int = len(toks)
tix: int = 0
sent: Span
for sent in doc.sents:
e: int = sent[-1].i
stoks: List[FeatureToken] = []
while tix < ntoks:
tok = toks[tix]
if tok.i <= e:
stoks.append(tok)
else:
break
tix += 1
fsent: FeatureSentence = self._create_sent(sent, stoks, sent.text)
sents.append(fsent)
return sents
def from_spacy_doc(self, doc: Doc, *args, text: str = None,
**kwargs) -> FeatureDocument:
"""Create s :class:`.FeatureDocument` from a spaCy doc.
:param doc: the spaCy generated document to transform in to a feature
document
:param text: either a string or a list of strings; if the former a
document with one sentence will be created, otherwise a
document is returned with a sentence for each string in
the list
:param args: the arguments used to create the FeatureDocument instance
:param kwargs: the key word arguments used to create the
FeatureDocument instance
"""
text = doc.text if text is None else text
sents: List[FeatureSentence] = self._create_sents(doc)
try:
return self.doc_class(tuple(sents), text, doc, *args, **kwargs)
except Exception as e:
raise ParseError(
f'Could not parse <{text}> for {self.doc_class} ' +
f"with args {args} for parser '{self.name}'") from e
def _decorate_doc(self, spacy_doc: Span, feature_doc: FeatureDocument):
decorator: FeatureDocumentDecorator
for decorator in self.document_decorators:
decorator.decorate(feature_doc)
def parse(self, text: str, *args, **kwargs) -> FeatureDocument:
if not isinstance(text, str):
raise ParseError(
f'Expecting string text but got: {text} ({type(str)})')
sdoc: Doc = self.parse_spacy_doc(text)
fdoc: FeatureDocument = self.from_spacy_doc(
sdoc, *args, text=text, **kwargs)
self._decorate_doc(sdoc, fdoc)
return fdoc
def to_spacy_doc(self, doc: FeatureDocument, norm: bool = True,
add_features: Set[str] = None) -> Doc:
"""Convert a feature document back in to a spaCy document.
**Note**: not all data is copied--only text, ``pos_``, ``tag_``,
``lemma_`` and ``dep_``.
:param doc: the spaCy doc to convert
:param norm: whether to use the normalized text as the ``orth_`` spaCy
token attribute or ``text``
:pram add_features: whether to add POS, NER tags, lemmas, heads and
dependnencies
:return: the feature document with copied data from ``doc``
"""
def conv_iob(t: FeatureToken) -> str:
if t.ent_iob_ == 'O':
return 'O'
return f'{t.ent_iob_}-{t.ent_}'
if norm:
words = list(doc.norm_token_iter())
else:
words = [t.text for t in doc.token_iter()]
if add_features is None:
add_features = set('pos tag lemma head dep ent'.split())
sent_starts = [False] * len(words)
sidx = 0
for sent in doc:
sent_starts[sidx] = True
sidx += len(sent)
params = dict(vocab=self.model.vocab,
words=words,
spaces=[True] * len(words),
sent_starts=sent_starts)
if add_features and doc.token_len > 0:
assert len(words) == doc.token_len
tok = next(iter(doc.token_iter()))
if hasattr(tok, 'pos_') and 'pos' in add_features:
params['pos'] = [t.pos_ for t in doc.token_iter()]
if hasattr(tok, 'tag_') and 'tag' in add_features:
params['tags'] = [t.tag_ for t in doc.token_iter()]
if hasattr(tok, 'lemma_') and 'lemma' in add_features:
params['lemmas'] = [t.lemma_ for t in doc.token_iter()]
if hasattr(tok, 'head_') and 'head' in add_features:
params['heads'] = [t.head_ for t in doc.token_iter()]
if hasattr(tok, 'dep_') and 'dep' in add_features:
params['deps'] = [t.dep_ for t in doc.token_iter()]
if hasattr(tok, 'ent_') and 'ent' in add_features:
params['ents'] = [conv_iob(t) for t in doc.token_iter()]
return Doc(**params)
@dataclass
class WhiteSpaceTokenizerFeatureDocumentParser(SpacyFeatureDocumentParser):
"""This class parses text in to instances of :class:`.FeatureDocument`
instances using :meth:`parse`. This parser does no sentence chunking so
documents have one and only one sentence for each parse.
"""
_TOK_REGEX: ClassVar[re.Pattern] = re.compile(r'\S+')
"""The whitespace regular expression for splitting tokens."""
def parse(self, text: str, *args, **kwargs) -> FeatureDocument:
toks: List[FeatureToken] = []
m: re.Match
for i, m in zip(it.count(), re.finditer(self._TOK_REGEX, text)):
tok = FeatureToken(i, m.start(), 0, m.group(0))
tok.default_detached_feature_ids = \
FeatureToken.REQUIRED_FEATURE_IDS
toks.append(tok)
sent = self.sent_class(tokens=tuple(toks), text=text)
return self.doc_class(sents=(sent,), text=text, *args, **kwargs) | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/sparser.py | sparser.py |
__author__ = 'Paul Landes'
from typing import List, Tuple, Dict, Any, Union, Sequence, Optional
from dataclasses import dataclass, field
import logging
import re
from itertools import chain
import json
from spacy.language import Language
from spacy.tokens.doc import Doc
from spacy.matcher import Matcher
from spacy.tokens import Span
logger = logging.getLogger(__name__)
@Language.component('remove_sent_boundaries')
def create_remove_sent_boundaries_component(doc: Doc):
"""Remove sentence boundaries from tokens.
:param doc: the spaCy document to remove sentence boundaries
"""
for token in doc:
# this will entirely disable spaCy's sentence detection
token.is_sent_start = False
return doc
@dataclass
class EntityRecognizer(object):
"""Base class regular expression and spaCy match patterns named entity
recognizer. Both subclasses allow for an optional label for each
respective pattern or regular expression. If the label is provided, then
the match is made a named entity with a label. In any case, a span is
created on the token, and in some cases, retokenized.
"""
nlp: Language = field()
"""The NLP model."""
name: str = field()
"""The component name."""
import_file: Optional[str] = field()
"""An optional JSON file used to append the pattern configuration."""
patterns: List = field()
"""A list of the regular expressions to find."""
def __post_init__(self):
if self.import_file is not None:
self._append_config(self.patterns)
def _append_config(self, patterns: List):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating regex component for: {self.name}')
if self.import_file is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'reading file config file: {self.import_file}')
with open(self.import_file) as f:
add_pats = json.load(f)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding to patterns: {add_pats}')
patterns.extend(add_pats)
def _make_span(self, doc: Doc, start: int, end: int, label: str,
is_char: bool, retok: bool):
span: Span
if is_char:
if label is None:
span = doc.char_span(start, end)
else:
span = doc.char_span(start, end, label=label)
else:
if label is None:
span = Span(doc, start, end)
else:
span = Span(doc, start, end, label=label)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'span ({start}, {end}) for {label}: {span}')
if span is not None:
# this is a span object or none if match doesn't map to valid token
# sequence
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'match: {span.text}')
if label is not None:
doc.ents += (span,)
if retok:
# https://github.com/explosion/spaCy/discussions/4806
with doc.retokenize() as retokenizer:
# Iterate over all spans and merge them into one
# token. This is done after setting the entities –
# otherwise, it would cause mismatched indices!
retokenizer.merge(span)
@dataclass
class RegexEntityRecognizer(EntityRecognizer):
"""Merges regular expression matches as a :class:`~spacy.tokens.Span`. After
matches are found, re-tokenization merges them in to one token per match.
"""
patterns: List[Tuple[str, Tuple[re.Pattern]]] = field()
"""A list of the regular expressions to find."""
def __call__(self, doc: Doc) -> Doc:
for label, regex_list in self.patterns:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'label: {label}, regex: {regex_list}')
matches = map(lambda r: re.finditer(r, doc.text), regex_list)
for match in chain.from_iterable(matches):
start, end = match.span()
self._make_span(doc, start, end, label, True, True)
return doc
@Language.factory(
'regexner', default_config={'patterns': [], 'path': None})
def create_regexner_component(
nlp: Language, name: str,
patterns: Sequence[Tuple[Optional[str],
Sequence[Union[re.Pattern, str]]]],
path: str = None):
def map_rlist(rlist):
rl = map(lambda x: x if isinstance(x, re.Pattern) else re.compile(x),
rlist)
return tuple(rl)
regexes = map(lambda x: (x[0], map_rlist(x[1])), patterns)
return RegexEntityRecognizer(nlp, name, path, list(regexes))
@dataclass
class PatternEntityRecognizer(EntityRecognizer):
"""Adds entities based on regular epxressions.
:see: `Rule matching <https://spacy.io/usage/rule-based-matching>`_
"""
_NULL_LABEL = '<_>'
patterns: List[Tuple[str, List[List[Dict[str, Any]]]]] = field()
"""The patterns given to the :class:`~spacy.matcher.Matcher`."""
def __post_init__(self):
super().__post_init__()
self._matchers = []
self._labels = {}
for label, patterns in self.patterns:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'label: {label}')
logger.debug(f'pattern: {patterns}')
matcher = Matcher(self.nlp.vocab)
label = self._NULL_LABEL if label is None else label
matcher.add(label, patterns, on_match=self._add_event_ent)
self._labels[id(matcher)] = label
self._matchers.append(matcher)
def _add_event_ent(self, matcher, doc, i, matches):
match_id, start, end = matches[i]
label = self._labels[id(matcher)]
label = None if label == self._NULL_LABEL else label
self._make_span(doc, start, end, label, False, False)
def __call__(self, doc: Doc) -> Doc:
for matcher in self._matchers:
match: List[Tuple[int, int, int]] = matcher(doc)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'matched: {match}')
logger.debug(f'doc ents: {doc.ents}')
return doc
@Language.factory(
'patner', default_config={'patterns': [], 'path': None})
def create_patner_component(
nlp: Language, name: str,
patterns: List[Tuple[Optional[str], List[List[Dict[str, Any]]]]],
path: str = None):
return PatternEntityRecognizer(nlp, name, path, list(patterns)) | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/component.py | component.py |
__author__ = 'Paul Landes'
from typing import List, Tuple
from dataclasses import dataclass, field
import re
from . import (
LexicalSpan, FeatureToken, FeatureSentence, FeatureDocument,
FeatureSentenceDecorator, FeatureDocumentDecorator
)
@dataclass
class SplitTokenSentenceDecorator(FeatureSentenceDecorator):
"""A decorator that splits feature tokens by white space.
"""
def _split_tok(self, ftok: FeatureToken, matches: Tuple[re.Match]):
toks: List[FeatureToken] = []
norm: str
for match in matches:
ctok: FeatureToken = ftok.clone()
ctok.norm = match.group(0)
ctok.lexspan = LexicalSpan(ftok.lexspan.begin + match.start(0),
ftok.lexspan.begin + match.end(0))
ctok.idx = ctok.lexspan.begin
toks.append(ctok)
return toks
def decorate(self, sent: FeatureSentence):
split_toks: List[FeatureToken] = []
tok: FeatureToken
for ftok in sent.token_iter():
tnorms: Tuple[str, ...] = tuple(re.finditer(r'\S+', ftok.norm))
if len(tnorms) == 1:
split_toks.append(ftok)
else:
split_toks.extend(self._split_tok(ftok, tnorms))
if sent.token_len != len(split_toks):
sent.tokens = tuple(split_toks)
@dataclass
class StripSentenceDecorator(FeatureSentenceDecorator):
"""A decorator that strips whitespace from sentences.
:see: :meth:`.TokenContainer.strip`
"""
def decorate(self, sent: FeatureSentence):
sent.strip()
@dataclass
class FilterTokenSentenceDecorator(FeatureSentenceDecorator):
"""A decorator that strips whitespace from sentences.
:see: :meth:`.TokenContainer.strip`
"""
remove_stop: bool = field(default=False)
remove_space: bool = field(default=False)
remove_pronouns: bool = field(default=False)
remove_punctuation: bool = field(default=False)
remove_determiners: bool = field(default=False)
remove_empty: bool = field(default=False)
def decorate(self, sent: FeatureSentence):
def filter_tok(t: FeatureToken) -> bool:
return \
(not self.remove_stop or not t.is_stop) and \
(not self.remove_space or not t.is_space) and \
(not self.remove_pronouns or not t.pos_ == 'PRON') and \
(not self.remove_punctuation or not t.is_punctuation) and \
(not self.remove_determiners or not t.tag_ == 'DT') and \
(not self.remove_empty or len(t.norm) > 0)
toks: Tuple[FeatureToken] = tuple(filter(filter_tok, sent))
if sent.token_len != len(toks):
sent.tokens = toks
@dataclass
class FilterEmptySentenceDocumentDecorator(FeatureDocumentDecorator):
"""Filter zero length sentences.
"""
filter_space: bool = field(default=True)
"""Whether to filter space tokens when comparing zero length sentences."""
def _filter_empty_sentences(self, fsent: FeatureSentence) -> bool:
toks: Tuple[FeatureToken] = fsent.tokens
if self.filter_space:
toks = tuple(filter(lambda t: not t.is_space, fsent.token_iter()))
return len(toks) > 0
def decorate(self, doc: FeatureDocument):
olen: int = len(doc)
fsents: Tuple[FeatureSentence] = tuple(filter(
self._filter_empty_sentences, doc.sents))
nlen: int = len(fsents)
if olen != nlen:
doc.sents = fsents
@dataclass
class UpdateDocumentDecorator(FeatureDocumentDecorator):
"""Updates document indexes and spans (see fields).
"""
update_indexes: bool = field(default=True)
"""Whether to update the document indexes with
:meth:`.FeatureDocument.update_indexes`.
"""
update_entity_spans: bool = field(default=True)
"""Whether to update the document indexes with
:meth:`.FeatureDocument.update_entity_spans`.
"""
def decorate(self, doc: FeatureDocument):
if self.update_indexes:
doc.update_indexes()
if self.update_entity_spans:
doc.update_entity_spans() | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/decorate.py | decorate.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Tuple, Union, Optional, ClassVar, Set, Iterable, List, Type
from abc import ABCMeta
import sys
from io import TextIOBase
import textwrap as tw
from spacy.tokens import Token
from spacy.tokens import Span
from spacy.tokens import Doc
from zensols.util import APIError
from zensols.config import Dictable
class NLPError(APIError):
"""Raised for any errors for this library."""
pass
class ParseError(APIError):
"""Raised for any parsing errors."""
pass
class LexicalSpan(Dictable):
"""A lexical character span of text in a document. The span has two
positions: :obj:`begin` and :obj:`end`, which is indexed respectively as an
operator as well..
One span is less than the other when the beginning position is less. When
the beginnign positions are the same, the one with the smaller end position
is less.
The length of the span is the distance between the end and the beginning
positions.
"""
_DICTABLE_ATTRIBUTES: ClassVar[Set[str]] = {'begin', 'end'}
EMPTY_SPAN: ClassVar[LexicalSpan]
def __init__(self, begin: int, end: int):
"""Initialize the interval.
:param begin: the begin of the span
:param end: the end of the span
"""
self.begin = begin
self.end = end
@property
def astuple(self) -> Tuple[int, int]:
"""The span as a ``(begin, end)`` tuple."""
return (self.begin, self.end)
@classmethod
def from_tuples(cls: Type, tups: Iterable[Tuple[int, int]]) -> \
Iterable[LexicalSpan]:
"""Create spans from tuples.
:param tups: an iterable of ``(<begin>, <end)`` tuples
"""
return map(lambda t: cls(*t), tups)
@classmethod
def from_token(cls, tok: Union[Token, Span]) -> Tuple[int, int]:
"""Create a span from a spaCy :class:`~spacy.tokens.Token` or
:class:`~spacy.tokens.Span`.
"""
if isinstance(tok, Span):
doc: Doc = tok.doc
etok = doc[tok.end - 1]
start = doc[tok.start].idx
end = etok.idx + len(etok.orth_)
else:
start = tok.idx
end = tok.idx + len(tok.orth_)
return cls(start, end)
@staticmethod
def overlaps(a0: int, a1: int, b0: int, b1: int, inclusive: bool = True):
"""Return whether or not one text span overlaps with another.
:param inclusive: whether to check include +1 on the end component
:return: any overlap detected returns ``True``
"""
if inclusive:
m = (a0 <= b0 and a1 >= b0) or (b0 <= a0 and b1 >= a0)
else:
m = (a0 <= b0 and a1 > b0) or (b0 <= a0 and b1 > a0)
return m
def overlaps_with(self, other: LexicalSpan,
inclusive: bool = True) -> bool:
"""Return whether or not one text span overlaps non-inclusively with another.
:param other: the other location
:param inclusive: whether to check include +1 on the end component
:return: any overlap detected returns ``True``
"""
return self.overlaps(
self.begin, self.end, other.begin, other.end, inclusive)
def narrow(self, other: LexicalSpan) -> Optional[LexicalSpan]:
"""Return the shortest span that inclusively fits in both this and
``other``.
:param other: the second span to narrow with this span
:retun: a span so that beginning is maximized and end is minimized or
``None`` if the two spans do not overlap
"""
nar: LexicalSpan = None
if self.overlaps_with(other):
beg = max(self.begin, other.begin)
end = min(self.end, other.end)
if beg == self.begin and end == self.end:
nar = self
elif beg == other.begin and end == other.end:
nar = other
else:
nar = LexicalSpan(beg, end)
return nar
@staticmethod
def widen(others: Iterable[LexicalSpan]) -> Optional[LexicalSpan]:
"""Take the span union by using the left most :obj:`begin` and the right
most :obj:`end`.
:param others: the spans to union
:return: the widest span that inclusively aggregates ``others``, or None
if an empty sequence is passed
"""
begs = sorted(others, key=lambda s: s.begin)
if len(begs) > 0:
ends = sorted(begs, key=lambda s: s.end)
return LexicalSpan(begs[0].begin, ends[-1].end)
@staticmethod
def gaps(spans: Iterable[LexicalSpan], end: Optional[int] = None,
nudge_begin: int = 0, nudge_end: int = 0) -> List[LexicalSpan]:
"""Return the spans for the "holes" in ``spans``. For example, if
``spans`` is ``((0, 5), (10, 12), (15, 17))``, then return ``((5, 10),
(12, 15))``.
:param spans: the spans used to find gaps
:param end: an end position for the last gap so that if the last item in
``spans`` end does not match, another is added
:return: a list of spans that "fill" any holes in ``spans``
"""
spans: List[LexicalSpan] = sorted(spans)
gaps: List[LexicalSpan] = []
spiter: Iterable[LexicalSpan] = iter(spans)
last: LexicalSpan = next(spiter)
if last.begin > 0:
last = LexicalSpan(0, last.begin)
gaps.append(last)
spiter = iter(spans)
ns: LexicalSpan
for ns in spiter:
gap: int = ns.begin - last.end
if gap > 0:
gs = LexicalSpan(last.end + nudge_begin, ns.begin + nudge_end)
gaps.append(gs)
last = ns
# add ending span if the last didn't cover it
if end is not None and last.end != end:
gaps.append(LexicalSpan(gaps[-1].end + nudge_begin,
end + nudge_end))
return gaps
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line(str(self), depth, writer)
def _from_dictable(self, *args, **kwargs):
# prettier printing
return dict(super()._from_dictable(*args, **kwargs))
def __eq__(self, other: LexicalSpan) -> bool:
if self is other:
return True
return isinstance(other, LexicalSpan) and \
self.begin == other.begin and self.end == other.end
def __lt__(self, other):
if self.begin == other.begin:
return self.end < other.end
else:
return self.begin < other.begin
def __hash__(self) -> int:
return hash(self.begin) + (13 * hash(self.end))
def __setattr__(self, name, value):
if hasattr(self, 'end'):
raise AttributeError(f'{self.__class__.__name__} is immutable')
super().__setattr__(name, value)
def __getitem__(self, ix: int) -> int:
if ix == 0:
return self.begin
elif ix == 1:
return self.end
raise KeyError(f'LexicalSpan index: {ix}')
def __len__(self) -> int:
return self.end - self.begin
def __str__(self) -> str:
return f'({self.begin}, {self.end})'
def __repr__(self):
return self.__str__()
LexicalSpan.EMPTY_SPAN = LexicalSpan(0, 0)
class TextContainer(Dictable, metaclass=ABCMeta):
"""A *writable* class that has a ``text`` property or attribute. All
subclasses need a ``norm`` attribute or property.
"""
_DEFAULT_TOSTR_LEN: ClassVar[str] = 80
"""Default length of string when rendering :meth:`__str__`."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_original: bool = True, include_normalized: bool = True):
if (include_original or include_normalized) and self.text == self.norm:
self._write_line(f'[T]: {self.text}', depth, writer)
else:
if include_original:
self._write_line(f'[O]: {self.text}', depth, writer)
if include_normalized:
self._write_line(f'[N]: {self.norm}', depth, writer)
def __str__(self):
return f'<{tw.shorten(self.norm, width=self._DEFAULT_TOSTR_LEN-2)}>'
def __repr__(self):
return self.__str__() | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import List, Iterable, Tuple, Union, Dict
from dataclasses import dataclass, field
from abc import abstractmethod, ABC
import logging
import re
from itertools import chain
from spacy.tokens import Token, Span, Doc
from zensols.config import ConfigFactory
from . import LexicalSpan
logger = logging.getLogger(__name__)
@dataclass
class TokenNormalizer(object):
"""Base token extractor returns tuples of tokens and their normalized
version.
Configuration example::
[default_token_normalizer]
class_name = zensols.nlp.TokenNormalizer
embed_entities = False
"""
embed_entities: bool = field(default=True)
"""Whether or not to replace tokens with their respective named entity
version.
"""
def __embed_entities(self, doc: Doc):
"""For each token, return the named entity form if it exists.
:param doc: the spacy document to iterate over
"""
tlen = len(doc)
ents = {}
for ent in doc.ents:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding entity start: {ent.start} -> {ent}')
ents[ent.start] = ent
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'entities: {ents}')
i = 0
while i < tlen:
if i in ents:
ent = ents[i]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding entity: {ent}')
yield ent
i = ent.end
else:
tok = doc[i]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding token: {tok}')
yield tok
i += 1
def _to_token_tuple(self, doc: Doc) -> Iterable[Tuple[Token, str]]:
"Normalize the document in to (token, normal text) tuples."
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'embedding entities: {self.embed_entities}')
if self.embed_entities:
toks = self.__embed_entities(doc)
else:
toks = doc
toks = map(lambda t: (t, t.orth_,), toks)
return toks
def _map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
"""Map token tuples in sub classes.
:param token_tups: tuples generated from ``_to_token_tuple``
"""
return None
def normalize(self, doc: Doc) -> Iterable[Tuple[Token, str]]:
"""Normalize Spacey document ``doc`` in to (token, normal text) tuples.
"""
tlist = self._to_token_tuple(doc)
maps = self._map_tokens(tlist)
if maps is not None:
tlist = tuple(maps)
return iter(tlist)
def __str__(self):
if hasattr(self, 'name'):
name = self.name
else:
name = type(self).__name__
return f'{name}: embed={self.embed_entities}'
def __repr__(self):
return self.__str__()
@dataclass
class TokenMapper(ABC):
"""Abstract class used to transform token tuples generated from
:meth:`.TokenNormalizer.normalize`.
"""
@abstractmethod
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
"""Transform token tuples.
"""
pass
@dataclass
class SplitTokenMapper(TokenMapper):
"""Splits the normalized text on a per token basis with a regular expression.
Configuration example::
[split_token_mapper]
class_name = zensols.nlp.SplitTokenMapper
regex = r'[ ]'
"""
regex: Union[re.Pattern, str] = field(default=r'[ ]')
"""The regular expression to use for splitting tokens."""
def __post_init__(self):
if not isinstance(self.regex, re.Pattern):
self.regex = re.compile(eval(self.regex))
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
rg = self.regex
return map(lambda t: map(lambda s: (t[0], s), re.split(rg, t[1])),
token_tups)
@dataclass
class JoinTokenMapper(object):
"""Join tokens based on a regular expression. It does this by creating spans
in the spaCy component (first in the tuple) and using the span text as the
normalized token.
"""
regex: Union[re.Pattern, str] = field(default=r'[ ]')
"""The regular expression to use for joining tokens"""
separator: str = field(default=None)
"""The string used to separate normalized tokens in matches. If ``None``, use
the token text.
"""
def __post_init__(self):
if not isinstance(self.regex, re.Pattern):
self.regex = re.compile(eval(self.regex))
def _loc(self, doc: Doc, tok: Union[Token, Span]) -> Tuple[int, int]:
if isinstance(tok, Span):
etok = doc[tok.end - 1]
start = doc[tok.start].idx
end = etok.idx + len(etok.orth_)
else:
start = tok.idx
end = tok.idx + len(tok.orth_)
return start, end
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
def map_match(t: Token) -> str:
tup = tix2tup.get(t.idx)
if tup is not None:
return tup[1]
tups = tuple(token_tups)
stok: Token = tups[0][0]
etok: Token = tups[-1][0]
doc: Doc = stok.doc
src: Span = doc.char_span(stok.idx, etok.idx + len(etok.orth_))
matches: List[Span] = []
tix2tup: Dict[int, int]
if self.separator is not None:
tix2tup = {doc[t[0].start].idx
if isinstance(t[0], Span) else t[0].idx: t
for t in tups}
for match in re.finditer(self.regex, src.text):
start, end = match.span()
span: Span = doc.char_span(start, end)
# this is a Span object or None if match doesn't map to valid token
# sequence
if span is not None:
matches.append(span)
if len(matches) > 0:
mtups = []
mix = 0
mlen = len(matches)
stack = list(tups)
while len(stack) > 0:
tup = stack.pop(0)
tok = tup[0]
tok_loc = LexicalSpan.from_token(tok)
next_tup = tup
if mix < mlen:
match: Span = matches[mix]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'matched: {match}')
mloc = LexicalSpan.from_token(match)
if mloc.overlaps_with(tok_loc):
mix += 1
match_text = match.text
if self.separator is not None:
norms = map(map_match, doc[match.start:match.end])
norms = filter(lambda t: t is not None, norms)
match_text = self.separator.join(norms)
next_tup = (match, match_text)
while len(stack) > 0:
tup = stack.pop(0)
tok = tup[0]
tok_loc = self._loc(doc, tok)
if not mloc.overlaps_with(tok_loc):
stack.insert(0, tup)
break
mtups.append(next_tup)
tups = (mtups,)
return tups
@dataclass
class SplitEntityTokenMapper(TokenMapper):
"""Splits embedded entities (or any :class:`~spacy.token.span.Span`) in to
separate tokens. This is useful for splitting up entities as tokens after
being grouped with :obj:`.TokenNormalizer.embed_entities`. Note,
``embed_entities`` must be ``True`` to create the entities as they come
from spaCy as spans. This then can be used to create
:class:`.SpacyFeatureToken` with spans that have the entity.
"""
token_unit_type: bool = field(default=False)
"""Whether to generate tokens for each split span or a one token span."""
copy_attributes: Tuple[str, ...] = field(default=('label', 'label_'))
"""Attributes to copy from the span to the split token."""
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
def map_tup(tup):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setm: mapping tup: {tup}')
if isinstance(tup[0], Span):
span = tup[0]
for tix in range(span.end - span.start):
if not token_unit_type:
tok = span[tix:tix + 1]
else:
tok = span[tix]
for attr in cp_attribs:
setattr(tok, attr, getattr(span, attr))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setm: split: {tok}')
yield (tok, tok.orth_)
else:
yield tup
token_unit_type = self.token_unit_type
cp_attribs = self.copy_attributes
return map(map_tup, token_tups)
@dataclass
class LemmatizeTokenMapper(TokenMapper):
"""Lemmatize tokens and optional remove entity stop words.
**Important:** This completely ignores the normalized input token string
and essentially just replaces it with the lemma found in the token
instance.
Configuration example::
[lemma_token_mapper]
class_name = zensols.nlp.LemmatizeTokenMapper
:param lemmatize: lemmatize if ``True``; this is an option to allow (only)
the removal of the first top word in named entities
:param remove_first_stop: whether to remove the first top word in named
entities when ``embed_entities`` is ``True``
"""
lemmatize: bool = field(default=True)
remove_first_stop: bool = field(default=False)
def _lemmatize(self, tok_or_ent):
if isinstance(tok_or_ent, Token):
stok = tok_or_ent.lemma_
else:
if self.remove_first_stop and tok_or_ent[0].is_stop:
tok_or_ent = tok_or_ent[1:]
stok = tok_or_ent.text.lower()
return stok
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
return (map(lambda x: (x[0], self._lemmatize(x[0])), token_tups),)
@dataclass
class FilterTokenMapper(TokenMapper):
"""Filter tokens based on token (Spacy) attributes.
Configuration example::
[filter_token_mapper]
class_name = zensols.nlp.FilterTokenMapper
remove_stop = True
remove_punctuation = True
"""
remove_stop: bool = field(default=False)
remove_space: bool = field(default=False)
remove_pronouns: bool = field(default=False)
remove_punctuation: bool = field(default=False)
remove_determiners: bool = field(default=False)
def __post_init__(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created {self.__class__}: ' +
f'remove_stop: {self.remove_stop}, ' +
f'remove_space: {self.remove_space}, ' +
f'remove_pronouns: {self.remove_pronouns}, ' +
f'remove_punctuation: {self.remove_punctuation}, ' +
f'remove_determiners: {self.remove_determiners}')
def _filter(self, tok_or_ent_tup):
tok_or_ent = tok_or_ent_tup[0]
keep = False
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'filter: {tok_or_ent} ({type(tok_or_ent)})')
if isinstance(tok_or_ent, Token):
t = tok_or_ent
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'token {t}: l={len(t)}, ' +
f's={t.is_stop}, p={t.is_punct}')
if (not self.remove_stop or not t.is_stop) and \
(not self.remove_space or not t.is_space) and \
(not self.remove_pronouns or not t.pos_ == 'PRON') and \
(not self.remove_punctuation or not t.is_punct) and \
(not self.remove_determiners or not t.tag_ == 'DT') and \
len(t) > 0:
keep = True
else:
keep = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'filter: keeping={keep}')
return keep
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('filter mapper: map_tokens')
return (filter(self._filter, token_tups),)
@dataclass
class FilterRegularExpressionMapper(TokenMapper):
"""Filter tokens based on normalized form regular expression.
"""
regex: Union[re.Pattern, str] = field(default=r'[ ]+')
"""The regular expression to use for splitting tokens."""
invert: bool = field(default=False)
"""If ``True`` then remove rather than keep everything that matches.."""
def __post_init__(self):
if not isinstance(self.regex, re.Pattern):
self.regex = re.compile(eval(self.regex))
def _filter(self, tup: Tuple[Token, str]):
token, norm = tup
match = self.regex.match(norm) is not None
if self.invert:
match = not match
return match
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('filter mapper: map_tokens')
return (filter(self._filter, token_tups),)
@dataclass
class SubstituteTokenMapper(TokenMapper):
"""Replace a regular expression in normalized token text.
Configuration example::
[subs_token_mapper]
class_name = zensols.nlp.SubstituteTokenMapper
regex = r'[ \\t]'
replace_char = _
"""
regex: str = field(default='')
"""The regular expression to use for substitution."""
replace_char: str = field(default='')
"""The character that is used for replacement."""
def __post_init__(self):
self.regex = re.compile(eval(self.regex))
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
return (map(lambda x: (x[0], re.sub(
self.regex, self.replace_char, x[1])), token_tups),)
@dataclass
class LambdaTokenMapper(TokenMapper):
"""Use a lambda expression to map a token tuple.
This is handy for specialized behavior that can be added directly to a
configuration file.
Configuration example::
[lc_lambda_token_mapper]
class_name = zensols.nlp.LambdaTokenMapper
map_lambda = lambda x: (x[0], f'<{x[1].lower()}>')
"""
add_lambda: str = field(default=None)
map_lambda: str = field(default=None)
def __post_init__(self):
if self.add_lambda is None:
self.add_lambda = lambda x: ()
else:
self.add_lambda = eval(self.add_lambda)
if self.map_lambda is None:
self.map_lambda = lambda x: x
else:
self.map_lambda = eval(self.map_lambda)
def map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
return (map(self.map_lambda, token_tups),)
@dataclass
class MapTokenNormalizer(TokenNormalizer):
"""A normalizer that applies a sequence of :class:`.TokenMapper` instances to
transform the normalized token text. The members of the
``mapper_class_list`` are sections of the application configuration.
Configuration example::
[map_filter_token_normalizer]
class_name = zensols.nlp.MapTokenNormalizer
mapper_class_list = list: filter_token_mapper
"""
config_factory: ConfigFactory = field(default=None)
"""The factory that created this instance and used to create the mappers.
"""
mapper_class_list: List[str] = field(default_factory=list)
"""The configuration section names to create from the application configuration
factory, which is added to :obj:`mappers`. This field settings is
deprecated; use :obj:`mappers` instead.
"""
def __post_init__(self):
self.mappers = list(map(self.config_factory, self.mapper_class_list))
def _map_tokens(self, token_tups: Iterable[Tuple[Token, str]]) -> \
Iterable[Tuple[Token, str]]:
for mapper in self.mappers:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mapping token_tups with {mapper}')
token_tups = chain.from_iterable(mapper.map_tokens(token_tups))
return token_tups
def __str__(self) -> str:
s = super().__str__()
maps = ', '.join(map(str, self.mapper_class_list))
return f'{s}, {maps}' | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/norm.py | norm.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import List, Tuple, Iterable, Dict, Type, Any, ClassVar, Set, Union
from dataclasses import dataclass, field
import dataclasses
from abc import ABCMeta, abstractmethod
import sys
import logging
import textwrap as tw
import itertools as it
from itertools import chain
from io import TextIOBase
from frozendict import frozendict
from interlap import InterLap
from spacy.tokens import Doc, Span, Token
from zensols.persist import PersistableContainer, persisted, PersistedWork
from . import NLPError, TextContainer, FeatureToken, LexicalSpan
from .spannorm import SpanNormalizer, DEFAULT_FEATURE_TOKEN_NORMALIZER
logger = logging.getLogger(__name__)
class TokenContainer(PersistableContainer, TextContainer, metaclass=ABCMeta):
"""A base class for token container classes such as
:class:`.FeatureSentence` and :class:`.FeatureDocument`. In addition to the
defined methods, each instance has a ``text`` attribute, which is the
original text of the document.
"""
_PERSITABLE_TRANSIENT_ATTRIBUTES: ClassVar[Set[str]] = {'_token_norm'}
def __post_init__(self):
super().__init__()
self._norm = PersistedWork('_norm', self, transient=True)
self._entities = PersistedWork('_entities', self, transient=True)
self._token_norm: SpanNormalizer = DEFAULT_FEATURE_TOKEN_NORMALIZER
@abstractmethod
def token_iter(self, *args, **kwargs) -> Iterable[FeatureToken]:
"""Return an iterator over the token features.
:param args: the arguments given to :meth:`itertools.islice`
"""
pass
@staticmethod
def strip_tokens(token_iter: Iterable[FeatureToken]) -> \
Iterable[FeatureToken]:
"""Strip beginning and ending whitespace. This uses
:obj:`~.tok.SpacyFeatureToken.is_space`, which is ``True`` for spaces,
tabs and newlines.
:param token_iter: an stream of tokens
:return: non-whitespace middle tokens
"""
first_tok: bool = False
space_toks: List[FeatureToken] = []
tok: FeatureToken
for tok in token_iter:
if tok.is_space:
if first_tok:
space_toks.append(tok)
else:
first_tok = True
stok: FeatureToken
for stok in space_toks:
yield stok
space_toks.clear()
yield tok
def strip_token_iter(self, *args, **kwargs) -> Iterable[FeatureToken]:
"""Strip beginning and ending whitespace (see :meth:`strip_tokens`)
using :meth:`token_iter`.
"""
return self.strip_tokens(self.token_iter(*args, **kwargs))
def strip(self, in_place: bool = True) -> TokenContainer:
"""Strip beginning and ending whitespace (see :meth:`strip_tokens`) and
:obj:`text`.
"""
self._clear_persistable_state()
cont: TokenContainer = self if in_place else self.clone()
cont._strip()
return cont
@abstractmethod
def _strip(self):
pass
def norm_token_iter(self, *args, **kwargs) -> Iterable[str]:
"""Return a list of normalized tokens.
:param args: the arguments given to :meth:`itertools.islice`
"""
return map(lambda t: t.norm, self.token_iter(*args, **kwargs))
@property
@persisted('_norm')
def norm(self) -> str:
"""The normalized version of the sentence."""
return self._token_norm.get_norm(self.token_iter())
@property
@persisted('_canonical', transient=True)
def canonical(self) -> str:
"""A canonical representation of the container, which are non-space
tokens separated by :obj:`CANONICAL_DELIMITER`.
"""
return self._token_norm.get_canonical(self.token_iter())
@property
@persisted('_tokens', transient=True)
def tokens(self) -> Tuple[FeatureToken, ...]:
"""Return the token features as a tuple.
"""
return tuple(self.token_iter())
@property
@persisted('_token_len', transient=True)
def token_len(self) -> int:
"""Return the number of tokens."""
return sum(1 for i in self.token_iter())
@property
@persisted('_lexspan', transient=True)
def lexspan(self) -> LexicalSpan:
"""The document indexed lexical span using :obj:`idx`.
"""
toks: Tuple[FeatureToken, ...] = self.tokens
if len(toks) == 0:
return LexicalSpan.EMPTY_SPAN
else:
return LexicalSpan(toks[0].lexspan.begin, toks[-1].lexspan.end)
@persisted('_interlap', transient=True)
def _get_interlap(self) -> InterLap:
"""Create an interlap with all tokens of the container added."""
il = InterLap()
# adding with tuple inline is ~3 times as fast than a list, and ~9 times
# faster than an individual add in a for loop
spans: Tuple[Tuple[int, int]] = tuple(
map(lambda t: (t.lexspan.begin, t.lexspan.end - 1, t),
self.token_iter()))
if len(spans) > 0:
il.add(spans)
return il
def map_overlapping_tokens(self, spans: Iterable[LexicalSpan],
inclusive: bool = True) -> \
Iterable[Tuple[FeatureToken, ...]]:
"""Return a tuple of tokens, each tuple in the range given by the
respective span in ``spans``.
:param spans: the document 0-index character based inclusive spans to
compare with :obj:`.FeatureToken.lexspan`
:param inclusive: whether to check include +1 on the end component
:return: a tuple of matching tokens for the respective ``span`` query
"""
def map_span(s: LexicalSpan) -> Tuple[FeatureToken]:
toks = map(lambda m: m[2], il.find(s.astuple))
# we have to manually check non-inclusive right intervals since
# InterLap includes it
if not inclusive:
toks = filter(lambda t: t.lexspan.overlaps_with(s, False), toks)
return tuple(toks)
il = self._get_interlap()
return map(map_span, spans)
def get_overlapping_tokens(self, span: LexicalSpan,
inclusive: bool = True) -> \
Iterable[FeatureToken]:
"""Get all tokens that overlap lexical span ``span``.
:param span: the document 0-index character based inclusive span to
compare with :obj:`.FeatureToken.lexspan`
:param inclusive: whether to check include +1 on the end component
:return: a token sequence containing the 0 index offset of ``span``
"""
return next(iter(self.map_overlapping_tokens((span,), inclusive)))
def get_overlapping_span(self, span: LexicalSpan,
inclusive: bool = True) -> TokenContainer:
"""Return a feature span that includes the lexical scope of ``span``."""
sent = FeatureSentence(tokens=self.tokens, text=self.text)
doc = FeatureDocument(sents=(sent,), text=self.text)
return doc.get_overlapping_document(span, inclusive=inclusive)
@abstractmethod
def to_sentence(self, limit: int = sys.maxsize,
contiguous_i_sent: Union[str, bool] = False,
delim: str = '') -> FeatureSentence:
"""Coerce this instance to a single sentence. No tokens data is updated
so :obj:`.FeatureToken.i_sent` keep their original indexes. These
sentence indexes will be inconsistent when called on
:class:`.FeatureDocument` unless contiguous_i_sent is set to ``True``.
:param limit: the max number of sentences to create (only starting kept)
:param contiguous_i_sent: if ``True``, ensures all tokens have
:obj:`.FeatureToken.i_sent` value that is
contiguous for the returned instance; if this
value is ``reset``, the token indicies start
from 0
:param delim: a string added between each constituent sentence
:return: an instance of ``FeatureSentence`` that represents this token
sequence
"""
pass
def _set_contiguous_tokens(self, contiguous_i_sent: Union[str, bool],
reference: TokenContainer):
if contiguous_i_sent is False:
pass
elif contiguous_i_sent == 'reset':
for i, tok in enumerate(self.token_iter()):
tok.i_sent = i
elif contiguous_i_sent is True:
st: FeatureToken
for ref_tok, tok in zip(reference.token_iter(), self.token_iter()):
tok.i_sent = ref_tok.i
else:
raise ValueError(
f'Bad value for contiguous_i_sent: {contiguous_i_sent}')
@abstractmethod
def to_document(self, limit: int = sys.maxsize) -> FeatureDocument:
"""Coerce this instance in to a document.
"""
pass
def clone(self, cls: Type[TokenContainer] = None, **kwargs) -> \
TokenContainer:
"""Clone an instance of this token container.
:param cls: the type of the new instance
:param kwargs: arguments to add to as attributes to the clone
:return: the cloned instance of this instance
"""
cls = self.__class__ if cls is None else cls
return cls(**kwargs)
@property
@persisted('_entities')
def entities(self) -> Tuple[FeatureSpan, ...]:
"""The named entities of the container with each multi-word entity as
elements.
"""
return self._get_entities()
@abstractmethod
def _get_entities(self) -> Tuple[FeatureSpan, ...]:
pass
@property
@persisted('_tokens_by_idx', transient=True)
def tokens_by_idx(self) -> Dict[int, FeatureToken]:
"""A map of tokens with keys as their character offset and values as
tokens.
**Limitations**: Multi-word entities will have have a mapping only for
the first word of that entity if tokens were split by spaces (for
example with :class:`~zensols.nlp.SplitTokenMapper`). However,
:obj:`tokens_by_i` does not have this limitation.
:see: obj:`tokens_by_i`
:see: :obj:`zensols.nlp.FeatureToken.idx`
"""
by_idx = {}
cnt = 0
tok: FeatureToken
for tok in self.token_iter():
by_idx[tok.idx] = tok
cnt += 1
assert cnt == self.token_len
return frozendict(by_idx)
@property
@persisted('_tokens_by_i', transient=True)
def tokens_by_i(self) -> Dict[int, FeatureToken]:
"""A map of tokens with keys as their position offset and values as
tokens. The entries also include named entity tokens that are grouped
as multi-word tokens. This is helpful for multi-word entities that were
split (for example with :class:`~zensols.nlp.SplitTokenMapper`), and
thus, have many-to-one mapped indexes.
:see: :obj:`zensols.nlp.FeatureToken.i`
"""
return frozendict(self._get_tokens_by_i())
@abstractmethod
def _get_tokens_by_i(self) -> Dict[int, FeatureToken]:
pass
def update_indexes(self):
"""Update all :obj:`.FeatureToken.i` attributes to those provided by
:obj:`tokens_by_i`. This corrects the many-to-one token index mapping
for split multi-word named entities.
:see: :obj:`tokens_by_i`
"""
i: int
ft: FeatureToken
for i, ft in self.tokens_by_i.items():
ft.i = i
@abstractmethod
def update_entity_spans(self, include_idx: bool = True):
"""Update token entity to :obj:`norm` text. This is helpful when
entities are embedded after splitting text, which becomes
:obj:`.FeatureToken.norm` values. However, the token spans still index
the original entities that are multi-word, which leads to norms that are
not equal to the text spans. This synchronizes the token span indexes
with the norms.
:param include_idx: whether to update :obj:`.SpacyFeatureToken.idx` as
well
"""
pass
def reindex(self, reference_token: FeatureToken = None):
"""Re-index tokens, which is useful for situtations where a 0-index
offset is assumed for sub-documents created with
:meth:`.FeatureDocument.get_overlapping_document` or
:meth:`.FeatureDocument.get_overlapping_sentences`. The following data
are modified:
* :obj:`.FeatureToken.i`
* :obj:`.FeatureToken.idx`
* :obj:`.FeatureToken.i_sent`
* :obj:`.FeatureToken.sent_i` (see :obj:`.SpacyFeatureToken.sent_i`)
* :obj:`.FeatureToken.lexspan` (see :obj:`.SpacyFeatureToken.lexspan`)
* :obj:`entities`
* :obj:`lexspan`
* :obj:`tokens_by_i`
* :obj:`tokens_by_idx`
* :obj:`.FeatureSpan.tokens_by_i_sent`
* :obj:`.FeatureSpan.dependency_tree`
"""
toks: Tuple[FeatureToken] = self.tokens
if len(toks) > 0:
if reference_token is None:
reference_token = toks[0]
self._reindex(reference_token.clone())
self.clear()
def _reindex(self, tok: FeatureToken):
offset_i, offset_idx = tok.i, tok.idx
sent_i = tok.sent_i if hasattr(tok, 'sent_i') else None
tok: FeatureToken
for tok in self.tokens:
idx: int = tok.idx - offset_idx
span = LexicalSpan(idx, idx + len(tok.text))
tok.i -= offset_i
tok.idx = idx
tok.lexspan = span
if sent_i is not None:
for tok in self.tokens:
tok.sent_i -= sent_i
def clear(self):
"""Clear all cached state."""
self._clear_persistable_state()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_original: bool = False, include_normalized: bool = True,
n_tokens: int = sys.maxsize, inline: bool = False):
"""Write the text container.
:param include_original: whether to include the original text
:param include_normalized: whether to include the normalized text
:param n_tokens: the number of tokens to write
:param inline: whether to print the tokens on one line each
"""
super().write(depth, writer,
include_original=include_original,
include_normalized=include_normalized)
if n_tokens > 0:
self._write_line('tokens:', depth, writer)
for t in it.islice(self.token_iter(), n_tokens):
if inline:
t.write_attributes(depth + 1, writer,
inline=True, include_type=False)
else:
t.write(depth + 1, writer)
def write_text(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_original: bool = False,
include_normalized: bool = True,
limit: int = sys.maxsize):
"""Write only the text of the container.
:param include_original: whether to include the original text
:param include_normalized: whether to include the normalized text
:param limit: the max number of characters to print
"""
inc_both: bool = include_original and include_normalized
add_depth = 1 if inc_both else 0
if include_original:
if inc_both:
self._write_line('[O]:', depth, writer)
text: str = tw.shorten(self.text, limit)
self._write_wrap(text, depth + add_depth, writer)
if include_normalized:
if inc_both:
self._write_line('[N]:', depth, writer)
norm: str = tw.shorten(self.norm, limit)
self._write_wrap(norm, depth + add_depth, writer)
def __getitem__(self, key: Union[LexicalSpan, int]) -> \
Union[FeatureToken, TokenContainer]:
if isinstance(key, LexicalSpan):
return self.get_overlapping_span(key, inclusive=False)
return self.tokens[key]
def __setstate__(self, state: Dict[str, Any]):
super().__setstate__(state)
self._token_norm: SpanNormalizer = DEFAULT_FEATURE_TOKEN_NORMALIZER
def __eq__(self, other: TokenContainer) -> bool:
if self is other:
return True
else:
a: FeatureToken
b: FeatureToken
for a, b in zip(self.token_iter(), other.token_iter()):
if a != b:
return False
return self.token_len == other.token_len and self.text == other.text
def __lt__(self, other: FeatureToken) -> int:
return self.norm < other.norm
def __hash__(self) -> int:
return sum(map(hash, self.token_iter()))
def __str__(self):
return TextContainer.__str__(self)
def __repr__(self):
return TextContainer.__repr__(self)
@dataclass(eq=False, repr=False)
class FeatureSpan(TokenContainer):
"""A span of tokens as a :class:`.TokenContainer`, much like
:class:`spacy.tokens.Span`.
"""
_PERSITABLE_TRANSIENT_ATTRIBUTES: ClassVar[Set[str]] = \
TokenContainer._PERSITABLE_TRANSIENT_ATTRIBUTES | {'spacy_span'}
"""Don't serialize the spacy document on persistance pickling."""
tokens: Tuple[FeatureToken, ...] = field()
"""The tokens that make up the span."""
text: str = field(default=None)
"""The original raw text of the span."""
spacy_span: Span = field(default=None, repr=False, compare=False)
"""The parsed spaCy span this feature set is based.
:see: :meth:`.FeatureDocument.spacy_doc`
"""
def __post_init__(self):
super().__post_init__()
if self.text is None:
self.text = ' '.join(map(lambda t: t.text, self.tokens))
# the _tokens setter is called to set the tokens before the the
# spacy_span set; so call it again since now we have spacy_span set
self._set_entity_spans()
@property
def _tokens(self) -> Tuple[FeatureToken, ...]:
return self._tokens_val
@_tokens.setter
def _tokens(self, tokens: Tuple[FeatureToken, ...]):
if not isinstance(tokens, tuple):
raise NLPError(
f'Expecting tuple of tokens, but got {type(tokens)}')
self._tokens_val = tokens
self._ents: List[Tuple[int, int]] = []
self._set_entity_spans()
if hasattr(self, '_norm'):
# the __post_init__ is called after this setter for EMPTY_SENTENCE
self._norm.clear()
def _set_entity_spans(self):
if self.spacy_span is not None:
for ents in self.spacy_span.ents:
start, end = None, None
ents = iter(ents)
try:
start = end = next(ents)
while True:
end = next(ents)
except StopIteration:
pass
if start is not None:
self._ents.append((start.idx, end.idx))
def _strip(self):
self.tokens = tuple(self.strip_tokens(self.tokens))
self.text = self.text.strip()
def to_sentence(self, limit: int = sys.maxsize,
contiguous_i_sent: Union[str, bool] = False,
delim: str = '') -> FeatureSentence:
if limit == 0:
return iter(())
else:
clone = self.clone(FeatureSentence)
if contiguous_i_sent:
clone._set_contiguous_tokens(contiguous_i_sent, self)
return clone
def to_document(self) -> FeatureDocument:
return FeatureDocument((self.to_sentence(),))
def clone(self, cls: Type = None, **kwargs) -> TokenContainer:
params = dict(kwargs)
if 'tokens' not in params:
params['tokens'] = tuple(
map(lambda t: t.clone(), self._tokens_val))
if 'text' not in params:
params['text'] = self.text
clone = super().clone(cls, **params)
clone._ents = list(self._ents)
return clone
def token_iter(self, *args, **kwargs) -> Iterable[FeatureToken]:
if len(args) == 0:
return iter(self._tokens_val)
else:
return it.islice(self._tokens_val, *args, **kwargs)
@property
def token_len(self) -> int:
return len(self._tokens_val)
def _is_mwe(self) -> bool:
"""True when this is a span with the same indexes because it was parsed
as a single token in to a multi-word expressions (i.e. entity).
"""
if self.token_len > 1:
return self._tokens_val[0].i != self._tokens_val[1].i
return False
@property
@persisted('_tokens_by_i_sent', transient=True)
def tokens_by_i_sent(self) -> Dict[int, FeatureToken]:
"""A map of tokens with keys as their sentanal position offset and
values as tokens.
:see: :obj:`zensols.nlp.FeatureToken.i`
"""
by_i_sent: Dict[int, FeatureToken] = {}
cnt: int = 0
tok: FeatureToken
for tok in self.token_iter():
by_i_sent[tok.i_sent] = tok
cnt += 1
assert cnt == self.token_len
# add indexes for multi-word entities that otherwise have mappings for
# only the first word of the entity
ent_span: FeatureSpan
for ent_span in self.entities:
im: int = 0 if ent_span._is_mwe() else 1
t: FeatureToken
for i, t in enumerate(ent_span):
by_i_sent[t.i_sent + (im * i)] = t
return frozendict(by_i_sent)
def _get_tokens_by_i(self) -> Dict[int, FeatureToken]:
by_i: Dict[int, FeatureToken] = {}
cnt: int = 0
tok: FeatureToken
for tok in self.token_iter():
by_i[tok.i] = tok
cnt += 1
assert cnt == self.token_len
# add indexes for multi-word entities that otherwise have mappings for
# only the first word of the entity
ent_span: Tuple[FeatureToken, ...]
for ent_span in self.entities:
im: int = 0 if ent_span._is_mwe() else 1
t: FeatureToken
for i, t in enumerate(ent_span):
by_i[t.i + (im * i)] = t
return by_i
def _get_entities(self) -> Tuple[FeatureSpan, ...]:
ents: List[FeatureSpan] = []
for start, end in self._ents:
ent: List[FeatureToken] = []
tok: FeatureToken
for tok in self.token_iter():
if tok.idx >= start and tok.idx <= end:
ent.append(tok)
if len(ent) > 0:
span = FeatureSpan(
tokens=tuple(ent),
text=' '.join(map(lambda t: t.norm, ent)))
ents.append(span)
return tuple(ents)
def update_indexes(self):
super().update_indexes()
i_sent: int
ft: FeatureToken
for i_sent, ft in self.tokens_by_i_sent.items():
ft.i_sent = i_sent
def update_entity_spans(self, include_idx: bool = True):
split_ents: List[Tuple[int, int]] = []
fspan: FeatureSpan
for fspan in self.entities:
beg: int = fspan[0].idx
tok: FeatureToken
for tok in fspan:
ls: LexicalSpan = tok.lexspan
end: int = beg + len(tok.norm)
if ls.begin != beg or ls.end != end:
ls = LexicalSpan(beg, end)
tok.lexspan = ls
if include_idx:
tok.idx = beg
split_ents.append((beg, beg))
beg = end + 1
self._ents = split_ents
self._entities.clear()
def _reindex(self, tok: FeatureToken):
offset_idx: int = tok.idx
super()._reindex(tok)
for i, tok in enumerate(self.tokens):
tok.i_sent = i
self._ents = list(map(
lambda t: (t[0] - offset_idx, t[1] - offset_idx), self._ents))
def _branch(self, node: FeatureToken, toks: Tuple[FeatureToken, ...],
tid_to_idx: Dict[int, int]) -> \
Dict[FeatureToken, List[FeatureToken]]:
clds = {}
for c in node.children:
cix = tid_to_idx.get(c)
if cix:
child = toks[cix]
clds[child] = self._branch(child, toks, tid_to_idx)
return clds
@property
@persisted('_dependency_tree', transient=True)
def dependency_tree(self) -> Dict[FeatureToken, List[Dict[FeatureToken]]]:
tid_to_idx: Dict[int, int] = {}
toks = self.tokens
for i, tok in enumerate(toks):
tid_to_idx[tok.i] = i
root = tuple(
filter(lambda t: t.dep_ == 'ROOT' and not t.is_punctuation, toks))
if len(root) == 1:
return {root[0]: self._branch(root[0], toks, tid_to_idx)}
else:
return {}
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
return {'text': self.text,
'tokens': self._from_object(self.tokens, recurse, readable)}
def __len__(self) -> int:
return self.token_len
def __iter__(self):
return self.token_iter()
# keep the dataclass semantics, but allow for a setter
FeatureSpan.tokens = FeatureSpan._tokens
@dataclass(eq=False, repr=False)
class FeatureSentence(FeatureSpan):
"""A container class of tokens that make a sentence. Instances of this class
iterate over :class:`.FeatureToken` instances, and can create documents
with :meth:`to_document`.
"""
EMPTY_SENTENCE: ClassVar[FeatureSentence]
def to_sentence(self, limit: int = sys.maxsize,
contiguous_i_sent: Union[str, bool] = False,
delim: str = '') -> FeatureSentence:
if limit == 0:
return iter(())
else:
if not contiguous_i_sent:
return self
else:
clone = self.clone(FeatureSentence)
clone._set_contiguous_tokens(contiguous_i_sent, self)
return clone
def to_document(self) -> FeatureDocument:
return FeatureDocument((self,))
def get_overlapping_span(self, span: LexicalSpan,
inclusive: bool = True) -> TokenContainer:
doc = FeatureDocument(sents=(self,), text=self.text)
return doc.get_overlapping_document(span, inclusive=inclusive)
FeatureSentence.EMPTY_SENTENCE = FeatureSentence(tokens=(), text='')
@dataclass(eq=False, repr=False)
class FeatureDocument(TokenContainer):
"""A container class of tokens that make a document. This class contains a
one to many of sentences. However, it can be treated like any
:class:`.TokenContainer` to fetch tokens. Instances of this class iterate
over :class:`.FeatureSentence` instances.
:param sents: the sentences defined for this document
.. document private functions
.. automethod:: _combine_documents
"""
EMPTY_DOCUMENT: ClassVar[FeatureDocument] = None
"""A zero length document."""
_PERSITABLE_TRANSIENT_ATTRIBUTES: ClassVar[Set[str]] = \
TokenContainer._PERSITABLE_TRANSIENT_ATTRIBUTES | {'spacy_doc'}
"""Don't serialize the spacy document on persistance pickling."""
sents: Tuple[FeatureSentence, ...] = field()
"""The sentences that make up the document."""
text: str = field(default=None)
"""The original raw text of the sentence."""
spacy_doc: Doc = field(default=None, repr=False, compare=False)
"""The parsed spaCy document this feature set is based. As explained in
:class:`~zensols.nlp.FeatureToken`, spaCy documents are heavy weight and
problematic to pickle. For this reason, this attribute is dropped when
pickled, and only here for ad-hoc predictions.
"""
def __post_init__(self):
super().__post_init__()
if self.text is None:
self.text = ''.join(map(lambda s: s.text, self.sent_iter()))
if not isinstance(self.sents, tuple):
raise NLPError(
f'Expecting tuple of sentences, but got {type(self.sents)}')
def set_spacy_doc(self, doc: Doc):
ft_to_i: Dict[int, FeatureToken] = self.tokens_by_i
st_to_i: Dict[int, Token] = {st.i: st for st in doc}
i: int
ft: FeatureToken
for i, ft in ft_to_i.items():
st: Token = st_to_i.get(i)
if st is not None:
ft.spacy_token = st
fs: FeatureSentence
ss: Span
for ft, ss in zip(self.sents, doc.sents):
ft.spacy_span = ss
self.spacy_doc = doc
def _strip(self):
sent: FeatureSentence
for sent in self.sents:
sent.strip()
self.text = self.text.strip()
def clone(self, cls: Type = None, **kwargs) -> TokenContainer:
"""
:param kwargs: if `copy_spacy` is ``True``, the spacy document is
copied to the clone in addition parameters passed to new
clone initializer
"""
params = dict(kwargs)
if 'sents' not in params:
params['sents'] = tuple(map(lambda s: s.clone(), self.sents))
if 'text' not in params:
params['text'] = self.text
if params.pop('copy_spacy', False):
for ss, cs in zip(self.sents, params['sents']):
cs.spacy_span = ss.spacy_span
params['spacy_doc'] = self.spacy_doc
return super().clone(cls, **params)
def token_iter(self, *args, **kwargs) -> Iterable[FeatureToken]:
sent_toks = chain.from_iterable(
map(lambda s: s.token_iter(), self.sents))
if len(args) == 0:
return sent_toks
else:
return it.islice(sent_toks, *args, **kwargs)
def sent_iter(self, *args, **kwargs) -> Iterable[FeatureSentence]:
if len(args) == 0:
return iter(self.sents)
else:
return it.islice(self.sents, *args, **kwargs)
@property
def max_sentence_len(self) -> int:
"""Return the length of tokens from the longest sentence in the document.
"""
return max(map(len, self.sent_iter()))
def _sent_class(self) -> Type[FeatureSentence]:
if len(self.sents) > 0:
cls = self.sents[0].__class__
else:
cls = FeatureSentence
return cls
def to_sentence(self, limit: int = sys.maxsize,
contiguous_i_sent: Union[str, bool] = False,
delim: str = '') -> FeatureSentence:
sents: Tuple[FeatureSentence, ...] = tuple(self.sent_iter(limit))
toks: Iterable[FeatureToken] = chain.from_iterable(
map(lambda s: s.tokens, sents))
stext: str = delim.join(map(lambda s: s.text, sents))
cls: Type = self._sent_class()
sent: FeatureSentence = cls(tokens=tuple(toks), text=stext)
sent._ents = list(chain.from_iterable(map(lambda s: s._ents, sents)))
sent._set_contiguous_tokens(contiguous_i_sent, self)
return sent
def _combine_update(self, other: FeatureDocument):
"""Update internal data structures from another combined document. This
includes merging entities.
:see :class:`.CombinerFeatureDocumentParser`
:see: :class:`.MappingCombinerFeatureDocumentParser`
"""
ss: FeatureSentence
ts: FeatureSentence
for ss, ts in zip(other, self):
ents = set(ss._ents) | set(ts._ents)
ts._ents = sorted(ents, key=lambda x: x[0])
def to_document(self) -> FeatureDocument:
return self
@persisted('_id_to_sent_pw', transient=True)
def _id_to_sent(self) -> Dict[int, int]:
id_to_sent = {}
for six, sent in enumerate(self):
for tok in sent:
id_to_sent[tok.idx] = six
return id_to_sent
def _get_tokens_by_i(self) -> Dict[int, FeatureToken]:
by_i = {}
for sent in self.sents:
by_i.update(sent.tokens_by_i)
return by_i
def update_indexes(self):
sent: FeatureSentence
for sent in self.sents:
sent.update_indexes()
def update_entity_spans(self, include_idx: bool = True):
sent: FeatureSentence
for sent in self.sents:
sent.update_entity_spans(include_idx)
self._entities.clear()
def _reindex(self, *args):
sent: FeatureSentence
for sent in self.sents:
sent._reindex(*args)
def clear(self):
"""Clear all cached state."""
super().clear()
sent: FeatureSentence
for sent in self.sents:
sent.clear()
def sentence_index_for_token(self, token: FeatureToken) -> int:
"""Return index of the parent sentence having ``token``."""
return self._id_to_sent()[token.idx]
def sentence_for_token(self, token: FeatureToken) -> FeatureSentence:
"""Return the parent sentence that has ``token``."""
six: int = self.sentence_index_for_token(token)
return self.sents[six]
def sentences_for_tokens(self, tokens: Tuple[FeatureToken, ...]) -> \
Tuple[FeatureSentence, ...]:
"""Find sentences having a set of tokens.
:param tokens: the query used to finding containing sentences
:return: the document ordered tuple of sentences containing `tokens`
"""
id_to_sent = self._id_to_sent()
sent_ids = sorted(set(map(lambda t: id_to_sent[t.idx], tokens)))
return tuple(map(lambda six: self[six], sent_ids))
def _combine_documents(self, docs: Tuple[FeatureDocument, ...],
cls: Type[FeatureDocument],
concat_tokens: bool,
**kwargs) -> FeatureDocument:
"""Override if there are any fields in your dataclass. In most cases,
the only time this is called is by an embedding vectorizer to batch
muultiple sentences in to a single document, so the only feature that
matter are the sentence level.
:param docs: the documents to combine in to one
:param cls: the class of the instance to create
:param concat_tokens:
if ``True`` each sentence of the returned document are the
concatenated tokens of each respective document; otherwise simply
concatenate sentences in to one document
:param kwargs: additional keyword arguments to pass to the new feature
document's initializer
"""
if concat_tokens:
sents = tuple(chain.from_iterable(
map(lambda d: d.combine_sentences(), docs)))
else:
sents = tuple(chain.from_iterable(docs))
if 'text' not in kwargs:
kwargs = dict(kwargs)
kwargs['text'] = ' '.join(map(lambda d: d.text, docs))
return cls(sents, **kwargs)
@classmethod
def combine_documents(cls, docs: Iterable[FeatureDocument],
concat_tokens: bool = True,
**kwargs) -> FeatureDocument:
"""Coerce a tuple of token containers (either documents or sentences) in
to one synthesized document.
:param docs: the documents to combine in to one
:param cls: the class of the instance to create
:param concat_tokens:
if ``True`` each sentence of the returned document are the
concatenated tokens of each respective document; otherwise simply
concatenate sentences in to one document
:param kwargs: additional keyword arguments to pass to the new feature
document's initializer
"""
docs = tuple(docs)
if len(docs) == 0:
doc = cls([], **kwargs)
else:
fdoc = docs[0]
doc = fdoc._combine_documents(
docs, type(fdoc), concat_tokens, **kwargs)
return doc
@persisted('_combine_all_sentences_pw', transient=True)
def _combine_all_sentences(self) -> FeatureDocument:
if len(self.sents) == 1:
return self
else:
sent_cls = self._sent_class()
sent = sent_cls(self.tokens)
doc = dataclasses.replace(self)
doc.sents = [sent]
doc._combined = True
return doc
def combine_sentences(self, sents: Iterable[FeatureSentence] = None) -> \
FeatureDocument:
"""Combine the sentences in this document in to a new document with a
single sentence.
:param sents: the sentences to combine in the new document or all if
``None``
"""
if sents is None:
return self._combine_all_sentences()
else:
return self.__class__(tuple(sents))
def _reconstruct_sents_iter(self) -> Iterable[FeatureSentence]:
sent: FeatureSentence
for sent in self.sents:
stoks: List[FeatureToken] = []
ip_sent: int = -1
tok: FeatureToken
for tok in sent:
# when the token's sentence index goes back to 0, we have a full
# sentence
if tok.i_sent < ip_sent:
sent = FeatureSentence(tuple(stoks))
stoks = []
yield sent
stoks.append(tok)
ip_sent = tok.i_sent
if len(stoks) > 0:
yield FeatureSentence(tuple(stoks))
def uncombine_sentences(self) -> FeatureDocument:
"""Reconstruct the sentence structure that we combined in
:meth:`combine_sentences`. If that has not been done in this instance,
then return ``self``.
"""
if hasattr(self, '_combined'):
return FeatureDocument(tuple(self._reconstruct_sents_iter()))
else:
return self
def _get_entities(self) -> Tuple[FeatureSpan, ...]:
return tuple(chain.from_iterable(
map(lambda s: s.entities, self.sents)))
def get_overlapping_span(self, span: LexicalSpan,
inclusive: bool = True) -> TokenContainer:
"""Return a feature span that includes the lexical scope of ``span``."""
return self.get_overlapping_document(span, inclusive=inclusive)
def get_overlapping_sentences(self, span: LexicalSpan,
inclusive: bool = True) -> \
Iterable[FeatureSentence]:
"""Return sentences that overlaps with ``span`` from this document.
:param span: indicates the portion of the document to retain
:param inclusive: whether to check include +1 on the end component
"""
for sent in self.sents:
if sent.lexspan.overlaps_with(span):
yield sent
def get_overlapping_document(self, span: LexicalSpan,
inclusive: bool = True) -> FeatureDocument:
"""Get the portion of the document that overlaps ``span``. Sentences
completely enclosed in a span are copied. Otherwise, new sentences are
created from those tokens that overlap the span.
:param span: indicates the portion of the document to retain
:param inclusive: whether to check include +1 on the end component
:return: a new document that contains the 0 index offset of ``span``
"""
send: int = 1 if inclusive else 0
doc = self.clone()
if span != self.lexspan:
doc_text: str = self.text
sents: List[FeatureSentence] = []
for sent in self.sent_iter():
toks: List[FeatureToken] = list(
sent.get_overlapping_tokens(span, inclusive))
if len(toks) == 0:
continue
elif len(toks) == len(sent):
pass
else:
text: str = doc_text[toks[0].lexspan.begin:
toks[-1].lexspan.end - 1 + send]
hang: int = (span.end + send) - toks[-1].lexspan.end
if hang < 0:
tok: FeatureToken = toks[-1]
clone = tok.clone()
clone.norm = tok.norm[:hang]
clone.text = tok.text[:hang]
toks[-1] = clone
hang = toks[0].lexspan.begin - span.begin
if hang < 0:
hang *= -1
tok = toks[0]
clone = tok.clone()
clone.norm = tok.norm[hang:]
clone.text = tok.text[hang:]
toks[0] = clone
sent = sent.clone(tokens=tuple(toks), text=text)
sents.append(sent)
text: str = doc_text[span.begin:span.end + send]
doc.sents = tuple(sents)
doc.text = text
body_len = sum(
1 for _ in doc.get_overlapping_tokens(span, inclusive))
assert body_len == doc.token_len
return doc
def from_sentences(self, sents: Iterable[FeatureSentence],
deep: bool = False) -> FeatureDocument:
"""Return a new cloned document using the given sentences.
:param sents: the sentences to add to the new cloned document
:param deep: whether or not to clone the sentences
:see: :meth:`clone`
"""
if deep:
sents = tuple(map(lambda s: s.clone(), sents))
clone = self.clone(sents=sents)
clone.text = ' '.join(map(lambda s: s.text, sents))
clone.spacy_doc = None
return clone
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
n_sents: int = sys.maxsize, n_tokens: int = 0,
include_original: bool = False,
include_normalized: bool = True):
"""Write the document and optionally sentence features.
:param n_sents: the number of sentences to write
:param n_tokens: the number of tokens to print across all sentences
:param include_original: whether to include the original text
:param include_normalized: whether to include the normalized text
"""
TextContainer.write(self, depth, writer,
include_original=include_original,
include_normalized=include_normalized)
self._write_line('sentences:', depth, writer)
s: FeatureSentence
for s in it.islice(self.sents, n_sents):
s.write(depth + 1, writer, n_tokens=n_tokens,
include_original=include_original,
include_normalized=include_normalized)
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
return {'text': self.text,
'sentences': self._from_object(self.sents, recurse, readable)}
def __getitem__(self, key: Union[LexicalSpan, int]) -> \
Union[FeatureSentence, TokenContainer]:
if isinstance(key, LexicalSpan):
return self.get_overlapping_span(key, inclusive=False)
return self.sents[key]
def __eq__(self, other: FeatureDocument) -> bool:
if self is other:
return True
else:
a: FeatureSentence
b: FeatureSentence
for a, b in zip(self.sents, other.sents):
if a != b:
return False
return len(self.sents) == len(other.sents) and \
self.text == other.text
def __hash__(self) -> int:
return sum(map(hash, self.sents))
def __len__(self):
return len(self.sents)
def __iter__(self):
return self.sent_iter()
FeatureDocument.EMPTY_DOCUMENT = FeatureDocument(sents=(), text='')
@dataclass(eq=False, repr=False)
class TokenAnnotatedFeatureSentence(FeatureSentence):
"""A feature sentence that contains token annotations.
"""
annotations: Tuple[Any, ...] = field(default=())
"""A token level annotation, which is one-to-one to tokens."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
n_tokens: int = 0):
super().write(depth, writer, n_tokens=n_tokens)
n_ann = len(self.annotations)
self._write_line(f'annotations ({n_ann}): {self.annotations}',
depth, writer)
@dataclass(eq=False, repr=False)
class TokenAnnotatedFeatureDocuemnt(FeatureDocument):
"""A feature sentence that contains token annotations. Sentences can be
modeled with :class:`.TokenAnnotatedFeatureSentence` or just
:class:`.FeatureSentence` since this sets the `annotations` attribute when
combining.
"""
@persisted('_combine_sentences', transient=True)
def combine_sentences(self) -> FeatureDocument:
"""Combine all the sentences in this document in to a new document with
a single sentence.
"""
if len(self.sents) == 1:
return self
else:
sent_cls = self._sent_class()
anns = chain.from_iterable(map(lambda s: s.annotations, self))
sent = sent_cls(self.tokens)
sent.annotations = tuple(anns)
doc = dataclasses.replace(self)
doc.sents = [sent]
doc._combined = True
return doc
def _combine_documents(self, docs: Tuple[FeatureDocument, ...],
cls: Type[FeatureDocument],
concat_tokens: bool) -> FeatureDocument:
if concat_tokens:
return super()._combine_documents(docs, cls, concat_tokens)
else:
sents = chain.from_iterable(docs)
text = ' '.join(chain.from_iterable(map(lambda s: s.text, docs)))
anns = chain.from_iterable(map(lambda s: s.annotations, self))
doc = cls(tuple(sents), text)
doc.sents[0].annotations = tuple(anns)
return doc | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/container.py | container.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
Tuple, Union, Optional, Any, Set, Iterable, Dict, Sequence, ClassVar, Type
)
from dataclasses import dataclass, field
from functools import reduce
from itertools import chain
import sys
from io import TextIOBase
from frozendict import frozendict
from spacy.tokens.token import Token
from spacy.tokens.doc import Doc
from spacy.tokens.span import Span
from zensols.persist import PersistableContainer
from . import NLPError, TextContainer, LexicalSpan
@dataclass
class FeatureToken(PersistableContainer, TextContainer):
"""A container class for features about a token. Subclasses such as
:class:`.SpacyFeatureToken` extracts only a subset of features from the
heavy Spacy C data structures and is hard/expensive to pickle.
**Feature note**: features :obj:`i`, :obj:`idx` and :obj:`i_sent` are always
added to features tokens to be able to reconstruct sentences (see
:meth:`.FeatureDocument.uncombine_sentences`), and alwyas included.
"""
_DICTABLE_WRITABLE_DESCENDANTS: ClassVar[bool] = True
"""Use write method."""
REQUIRED_FEATURE_IDS: ClassVar[Set[str]] = frozenset(
'i idx i_sent norm'.split())
"""Features retained regardless of configuration for basic functionality.
"""
FEATURE_IDS_BY_TYPE: ClassVar[Dict[str, Set[str]]] = frozendict({
'bool': frozenset(('is_space is_stop is_ent is_wh is_contraction ' +
'is_superlative is_pronoun').split()),
'int': frozenset(('i idx i_sent sent_i is_punctuation tag ' +
'ent ent_iob dep shape norm_len').split()),
'str': frozenset(('norm lemma_ tag_ pos_ ent_ ent_iob_ ' +
'dep_ shape_').split()),
'list': frozenset('children'.split()),
'object': frozenset('lexspan'.split())})
"""Map of class type to set of feature IDs."""
TYPES_BY_FEATURE_ID: ClassVar[Dict[str, str]] = frozendict(
chain.from_iterable(
map(lambda itm: map(lambda f: (f, itm[0]), itm[1]),
FEATURE_IDS_BY_TYPE.items())))
"""A map of feature ID to string type. This is used by
:meth:`.FeatureToken.write_attributes` to dump the type features.
"""
FEATURE_IDS: ClassVar[Set[str]] = frozenset(
reduce(lambda res, x: res | x, FEATURE_IDS_BY_TYPE.values()))
"""All default available feature IDs."""
SKIP_COMPARE_FEATURE_IDS: ClassVar[Set[str]] = set()
"""A set of feature IDs to avoid comparing in :meth:`__eq__`."""
WRITABLE_FEATURE_IDS: ClassVar[Tuple[str, ...]] = tuple(
('text norm idx sent_i i i_sent tag pos ' +
'is_wh entity dep children').split())
"""Feature IDs that are dumped on :meth:`write` and :meth:`write_attributes`.
"""
NONE: ClassVar[str] = '-<N>-'
"""Default string for *not a feature*, or missing features."""
i: int = field()
"""The index of the token within the parent document."""
idx: int = field()
"""The character offset of the token within the parent document."""
i_sent: int = field()
"""The index of the token within the parent sentence.
The index of the token in the respective sentence. This is not to be
confused with the index of the sentence to which the token belongs, which
is :obj:`sent_i`.
"""
norm: str = field()
"""Normalized text, which is the text/orth or the named entity if tagged as a
named entity.
"""
def __post_init__(self):
super().__init__()
self._detatched_feature_ids = None
def detach(self, feature_ids: Set[str] = None,
skip_missing: bool = False,
cls: Type[FeatureToken] = None) -> FeatureToken:
"""Create a detected token (i.e. from spaCy artifacts).
:param feature_ids: the features to write, which defaults to
:obj:`FEATURE_IDS`
:param skip_missing: whether to only keep ``feature_ids``
:param cls: the type of the new instance
"""
cls = FeatureToken if cls is None else cls
if feature_ids is None:
feature_ids = set(self.FEATURE_IDS)
else:
feature_ids = set(feature_ids)
feature_ids.update(self.REQUIRED_FEATURE_IDS)
feats: Dict[str, Any] = self.get_features(feature_ids, skip_missing)
clone = FeatureToken.__new__(cls)
clone.__dict__.update(feats)
if hasattr(self, '_text'):
clone.text = self._text
if feature_ids is not None:
clone._detatched_feature_ids = feature_ids
return clone
@property
def default_detached_feature_ids(self) -> Optional[Set[str]]:
"""The default set of feature IDs used when cloning or detaching
with :meth:`clone` or :meth:`detatch`.
"""
return self._detatched_feature_ids
@default_detached_feature_ids.setter
def default_detached_feature_ids(self, feature_ids: Set[str]):
"""The default set of feature IDs used when cloning or detaching
with :meth:`clone` or :meth:`detatch`.
"""
self._detatched_feature_ids = feature_ids
def clone(self, cls: Type = None, **kwargs) -> FeatureToken:
"""Clone an instance of this token.
:param cls: the type of the new instance
:param kwargs: arguments to add to as attributes to the clone
:return: the cloned instance of this instance
"""
clone = self.detach(self._detatched_feature_ids, cls=cls)
clone.__dict__.update(kwargs)
return clone
@property
def text(self) -> str:
"""The initial text before normalized by any :class:`.TokenNormalizer`.
"""
if hasattr(self, '_text'):
return self._text
else:
return self.norm
@text.setter
def text(self, text: str):
"""The initial text before normalized by any :class:`.TokenNormalizer`.
"""
self._text = text
@property
def is_none(self) -> bool:
"""Return whether or not this token is represented as none or empty."""
return self._is_none(self.norm)
@classmethod
def _is_none(cls, targ: Any) -> bool:
return targ is None or targ == cls.NONE or targ == 0
def get_value(self, attr: str) -> Optional[Any]:
"""Get a value by attribute.
:return: ``None`` when the value is not set
"""
val = None
if hasattr(self, attr):
targ = getattr(self, attr)
if not self._is_none(targ):
val = targ
return val
def get_features(self, feature_ids: Iterable[str] = None,
skip_missing: bool = False) -> Dict[str, Any]:
"""Get features as a :class:`dict`.
:param feature_ids: the features to write, which defaults to
:obj:`FEATURE_IDS`
:param skip_missing: whether to only keep ``feature_ids``
"""
feature_ids = self.FEATURE_IDS if feature_ids is None else feature_ids
if skip_missing:
feature_ids = filter(lambda fid: hasattr(self, fid), feature_ids)
return {k: getattr(self, k) for k in feature_ids}
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
dct = {}
for k, v in self.__dict__.items():
if not k.startswith('_'):
dct[k] = self._from_object(v, recurse, readable)
return dct
def to_vector(self, feature_ids: Sequence[str] = None) -> Iterable[str]:
"""Return an iterable of feature data.
"""
if feature_ids is None:
feature_ids = set(self.__dict__.keys()) - \
{'_detatched_feature_ids'}
return map(lambda a: getattr(self, a), sorted(feature_ids))
def write_attributes(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_type: bool = True,
feature_ids: Iterable[str] = None,
inline: bool = False,
include_none: bool = True):
"""Write feature attributes.
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
:param include_type: if ``True`` write the type of value (if available)
:param feature_ids: the features to write, which defaults to
:obj:`WRITABLE_FEATURE_IDS`
:param inline: whether to print attributes all on the same line
"""
if feature_ids is None:
feature_ids = self._detatched_feature_ids
if feature_ids is None:
feature_ids = self.WRITABLE_FEATURE_IDS
dct = self.get_features(feature_ids, True)
if 'text' in dct and dct['norm'] == dct['text']:
del dct['text']
for i, k in enumerate(sorted(dct.keys())):
val: str = dct[k]
ptype: str = None
if not include_none and self._is_none(val):
continue
if include_type:
ptype = self.TYPES_BY_FEATURE_ID.get(k)
if ptype is not None:
ptype = f' ({ptype})'
ptype = '' if ptype is None else ptype
sout = f'{k}={val}{ptype}'
if inline:
if i == 0:
writer.write(self._sp(depth))
else:
writer.write(', ')
writer.write(sout)
else:
self._write_line(sout, depth, writer)
if inline:
self._write_empty(writer)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_type: bool = True,
feature_ids: Iterable[str] = None):
con = f'norm=<{self.norm}>'
if self.text != self.norm:
con += f' org=<{self.text}>'
self._write_line(f'{self.__class__.__name__}: ' + con, depth, writer)
self._write_line('attributes:', depth + 1, writer)
self.write_attributes(depth + 2, writer, include_type, feature_ids)
def __eq__(self, other: FeatureToken) -> bool:
if self is other:
return True
if self.i == other.i and self.idx == other.idx:
a = dict(self.__dict__)
b = dict(other.__dict__)
del a['_detatched_feature_ids']
del b['_detatched_feature_ids']
for attr in self.SKIP_COMPARE_FEATURE_IDS:
a.pop(attr)
b.pop(attr)
return a == b
return False
def __lt__(self, other: FeatureToken) -> int:
return self.idx < other.idx
def __hash__(self) -> int:
return ((self.i + 1) * 13) + \
((self.idx + 1) * 29) + \
((self.i_sent + 1) * 71)
def __str__(self) -> str:
return TextContainer.__str__(self)
def __repr__(self) -> str:
return self.__str__()
# speed up none compares by using interned NONE
def __getstate__(self) -> Dict[str, Any]:
state = super().__getstate__()
if self.norm == self.NONE:
del state['norm']
return state
# speed up none compares by using interned NONE
def __setstate__(self, state: Dict[str, Any]):
if 'norm' not in state:
state['norm'] = self.NONE
super().__setstate__(state)
def long_repr(self) -> str:
attrs = []
for s in 'norm lemma_ tag_ ent_'.split():
v = getattr(self, s) if hasattr(self, s) else None
if v is not None:
attrs.append(f'{s}: {v}')
return ', '.join(attrs)
@dataclass(init=False)
class SpacyFeatureToken(FeatureToken):
"""Contains and provides the same features as a spaCy
:class:`~spacy.tokens.Token`.
"""
spacy_token: Union[Token, Span] = field(repr=False, compare=False)
"""The parsed spaCy token (or span if entity) this feature set is based.
:see: :meth:`.FeatureDocument.spacy_doc`
"""
def __init__(self, spacy_token: Union[Token, Span], norm: str):
self.spacy_token = spacy_token
self.is_ent: bool = not isinstance(self.spacy_token, Token)
self._doc: Doc = self.spacy_token.doc
i = self.token.i
idx = self.token.idx
i_sent = self.token.i - self.token.sent.start
self._text = spacy_token.orth_
super().__init__(i, idx, i_sent, norm)
def __getstate__(self):
raise NLPError('Not persistable')
@property
def token(self) -> Token:
"""Return the SpaCy token.
"""
tok = self.spacy_token
if isinstance(tok, Span):
tok = self._doc[tok.start]
return tok
@property
def is_wh(self) -> bool:
"""Return ``True`` if this is a WH word (i.e. what, where).
"""
return self.token.tag_.startswith('W')
@property
def is_stop(self) -> bool:
"""Return ``True`` if this is a stop word.
"""
return not self.is_ent and self.token.is_stop
@property
def is_punctuation(self) -> bool:
"""Return ``True`` if this is a punctuation (i.e. '?') token.
"""
return self.token.is_punct
@property
def is_pronoun(self) -> bool:
"""Return ``True`` if this is a pronoun (i.e. 'he') token.
"""
return False if self.is_ent else self.spacy_token.pos_ == 'PRON'
@staticmethod
def _is_apos(tok: Token) -> bool:
"""Return whether or not ``tok`` is an apostrophy (') symbol.
:param tok: the token to copmare
"""
return (tok.orth != tok.lemma_) and (tok.orth_.find('\'') >= 0)
@property
def lemma_(self) -> str:
"""Return the string lemma or text of the named entitiy if tagged as a named
entity.
"""
return self.spacy_token.orth_ if self.is_ent else self.spacy_token.lemma_
@property
def is_contraction(self) -> bool:
"""Return ``True`` if this token is a contradiction.
"""
if self.is_ent:
return False
else:
t = self.spacy_token
if self._is_apos(t):
return True
else:
doc = t.doc
dl = len(doc)
return ((t.i + 1) < dl) and self._is_apos(doc[t.i + 1])
@property
def ent(self) -> int:
"""Return the entity numeric value or 0 if this is not an entity.
"""
return self.spacy_token.label if self.is_ent else 0
@property
def ent_(self) -> str:
"""Return the entity string label or ``None`` if this token has no entity.
"""
return self.spacy_token.label_ if self.is_ent else self.NONE
@property
def ent_iob(self) -> int:
"""Return the entity IOB tag, which ``I`` for in, ```O`` for out, `B`` for
begin.
"""
return self.token.ent_iob if self.is_ent else 0
@property
def ent_iob_(self) -> str:
"""Return the entity IOB nominal index for :obj:``ent_iob``.
"""
return self.token.ent_iob_ if self.is_ent else 'O'
def conll_iob_(self) -> str:
"""Return the CoNLL formatted IOB tag, such as ``B-ORG`` for a beginning
organization token.
"""
if not self.is_ent:
return 'O'
return f'{self.self.token.ent_iob_}-{self.token.ent_type_}'
@property
def is_superlative(self) -> bool:
"""Return ``True`` if this token is the superlative.
"""
return self.token.tag_ == 'JJS'
@property
def is_space(self) -> bool:
"""Return ``True`` if this token is white space only.
"""
return self.token.is_space
@property
def sent_i(self) -> int:
"""The index of the sentence to which the token belongs. This is not to be
confused with the index of the token in the respective sentence, which
is :obj:`.FeatureToken.i_sent`.
This attribute does not exist in a spaCy token, and was named as such
to follow the naming conventions of their API.
"""
targ = self.i
for six, sent in enumerate(self._doc.sents):
for tok in sent:
if tok.i == targ:
return six
@property
def lexspan(self) -> LexicalSpan:
"""The document indexed lexical span using :obj:`idx`.
"""
return LexicalSpan.from_token(self.spacy_token)
@property
def tag(self) -> int:
"""Fine-grained part-of-speech text.
"""
return self.token.tag
@property
def tag_(self) -> str:
"""Fine-grained part-of-speech text.
"""
return self.token.tag_
@property
def pos(self) -> int:
"""The simple UPOS part-of-speech tag.
"""
return self.token.pos
@property
def pos_(self) -> str:
"""The simple UPOS part-of-speech tag.
"""
return self.token.pos_
@property
def shape(self) -> int:
"""Transform of the tokens’s string, to show orthographic features. For
example, “Xxxx” or “d.
"""
return self.token.shape
@property
def shape_(self) -> str:
"""Transform of the tokens’s string, to show orthographic features. For
example, “Xxxx” or “d.
"""
return self.token.shape_
@property
def children(self):
"""A sequence of the token’s immediate syntactic children.
"""
return [c.i for c in self.token.children]
@property
def dep(self) -> int:
"""Syntactic dependency relation.
"""
return self.token.dep
@property
def dep_(self) -> str:
"""Syntactic dependency relation string representation.
"""
return self.token.dep_
@property
def norm_len(self) -> int:
"""The length of the norm in characters."""
return len(self.norm)
def __str__(self):
if hasattr(self, 'spacy_token'):
tokstr = self.spacy_token
else:
tokstr = self.norm
return f'{tokstr} ({self.norm})' | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/tok.py | tok.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
Tuple, Dict, Set, List, Optional, Any, Iterable, ClassVar, Type
)
from dataclasses import dataclass, field, fields
import numpy as np
from zensols.nlp import TokenContainer, FeatureSpan
from zensols.nlp.score import (
Score, ErrorScore, ScoreMethod, ScoreContext, HarmonicMeanScore
)
@dataclass
class SemEvalHarmonicMeanScore(HarmonicMeanScore):
"""A harmonic mean score with the additional SemEval computed scores (see
module :mod:`zensols.nlp.nerscore` docs).
"""
NAN_INSTANCE: ClassVar[SemEvalHarmonicMeanScore] = None
correct: int = field()
"""The number of correct (COR): both are the same."""
incorrect: int = field()
"""The number of incorrect (INC): the output of a system and the golden
annotation don’t match.
"""
partial: int = field()
"""The number of partial (PAR): system and the golden annotation are
somewhat “similar” but not the same.
"""
missed: int = field()
"""The number of missed (MIS): a golden annotation is not captured by a
system."""
spurious: int = field()
"""The number of spurious (SPU): system produces a response which does not
exist in the golden annotation.
"""
possible: int = field()
actual: int = field()
SemEvalHarmonicMeanScore.NAN_INSTANCE = SemEvalHarmonicMeanScore(
*[np.nan] * 10)
@dataclass
class SemEvalScore(Score):
"""Contains all four harmonic mean SemEval scores (see module
:mod:`zensols.nlp.nerscore` docs). This score has four harmonic means
providing various levels of accuracy.
"""
NAN_INSTANCE: ClassVar[SemEvalScore] = None
strict: SemEvalHarmonicMeanScore = field()
"""Exact boundary surface string match and entity type."""
exact: SemEvalHarmonicMeanScore = field()
"""Exact boundary match over the surface string, regardless of the type."""
partial: SemEvalHarmonicMeanScore = field()
"""Partial boundary match over the surface string, regardless of the type.
"""
ent_type: SemEvalHarmonicMeanScore = field()
"""Some overlap between the system tagged entity and the gold annotation is
required.
"""
def asrow(self, meth: str) -> Dict[str, float]:
row: Dict[str, Any] = {}
f: field
for f in fields(self):
score: Score = getattr(self, f.name)
row.update(score.asrow(f'{meth}_{f.name}'))
return row
SemEvalScore.NAN_INSTANCE = SemEvalScore(
partial=SemEvalHarmonicMeanScore.NAN_INSTANCE,
strict=SemEvalHarmonicMeanScore.NAN_INSTANCE,
exact=SemEvalHarmonicMeanScore.NAN_INSTANCE,
ent_type=SemEvalHarmonicMeanScore.NAN_INSTANCE)
@dataclass
class SemEvalScoreMethod(ScoreMethod):
"""A Semeval-2013 Task 9.1 scor (see module :mod:`zensols.nlp.nerscore`
docs). This score has four harmonic means providing various levels of
accuracy. Sentence pairs are ordered as ``(<gold>, <prediction>)``.
"""
labels: Optional[Set[str]] = field(default=None)
"""The NER labels on which to evaluate. If not provided, text is evaluated
under a (stubbed tag) label.
"""
@classmethod
def _get_external_modules(cls: Type) -> Tuple[str, ...]:
return ('nervaluate',)
def _score_pair(self, gold: TokenContainer, pred: TokenContainer) -> \
SemEvalScore:
from nervaluate import Evaluator
def nolab(c: TokenContainer, label: str) -> Tuple[Dict[str, Any], ...]:
return tuple(map(
lambda t: dict(label=label, start=t.lexspan.begin,
end=t.lexspan.end),
c.token_iter()))
def withlab(c: TokenContainer) -> Tuple[Dict[str, Any]]:
ent_set: List[Tuple[Dict[str, Any], ...], ...] = []
ent: FeatureSpan
for ent in c.entities:
ents: Tuple[Dict[str, Any], ...] = tuple(map(
lambda t: dict(label=t.ent_, start=t.lexspan.begin,
end=t.lexspan.end), ent))
ent_set.append(ents)
return tuple(ent_set)
tags: Tuple[str, ...]
gold_ents: Tuple[Dict[str, Any], ...]
pred_ents: Tuple[Dict[str, Any], ...]
if self.labels is None:
label: str = '_'
gold_ents, pred_ents = nolab(gold, label), nolab(pred, label)
gold_ents, pred_ents = (gold_ents,), (pred_ents,)
tags = (label,)
else:
gold_ents, pred_ents = withlab(gold), withlab(pred)
tags = tuple(self.labels)
evaluator = Evaluator(gold_ents, pred_ents, tags=tags)
res: Dict[str, Any] = evaluator.evaluate()[0]
hscores: Dict[str, SemEvalHarmonicMeanScore] = {}
k: str
hdat: Dict[str, float]
for k, hdat in res.items():
hdat['f_score'] = hdat.pop('f1')
hscores[k] = (SemEvalHarmonicMeanScore(**hdat))
return SemEvalScore(**hscores)
def _score(self, meth: str, context: ScoreContext) -> \
Iterable[SemEvalScore]:
gold: TokenContainer
pred: TokenContainer
for gold, pred in context.pairs:
try:
yield self._score_pair(gold, pred)
except Exception as e:
yield ErrorScore(meth, e, SemEvalScore.NAN_INSTANCE) | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/nerscore.py | nerscore.py |
__author__ = 'Paul Landes'
from typing import ClassVar, Tuple, List, Iterable, Optional
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
import textwrap as tw
import re
import logging
from . import LexicalSpan, TokenContainer, FeatureSentence, FeatureDocument
logger = logging.getLogger(__name__)
@dataclass
class Chunker(object, metaclass=ABCMeta):
"""Splits :class:`~zensols.nlp.container.TokenContainer` instances using
regular expression :obj:`pattern`. Matched container (implementation of the
container is based on the subclass) are given if used as an iterable. The
document of all parsed containers is given if used as a callable.
"""
doc: FeatureDocument = field()
"""The document that contains the entire text (i.e. :class:`.Note`)."""
pattern: re.Pattern = field()
"""The chunk regular expression. There should be a default for each
subclass.
"""
sub_doc: FeatureDocument = field(default=None)
"""A lexical span created document of :obj:`doc`, which defaults to the
global document. Providing this and :obj:`char_offset` allows use of a
document without having to use :meth:`.TokenContainer.reindex`.
"""
char_offset: int = field(default=None)
"""The 0-index absolute character offset where :obj:`sub_doc` starts.
However, if the value is -1, then the offset is used as the begging
character offset of the first token in the :obj:`sub_doc`.
"""
def __post_init__(self):
if self.sub_doc is None:
self.sub_doc = self.doc
def _get_coff(self) -> int:
coff: int = self.char_offset
if coff is None:
coff = self.doc.lexspan.begin
if coff == -1:
coff = next(self.sub_doc.token_iter()).lexspan.begin
return coff
def __iter__(self) -> Iterable[TokenContainer]:
def match_to_span(m: re.Match) -> LexicalSpan:
s: Tuple[int, int] = m.span(1)
return LexicalSpan(s[0] + coff, s[1] + coff)
def trunc(s: str) -> str:
sh: str = tw.shorten(s, 50).replace('\n', '\\n')
sh = f'<<{s}>>'
return sh
conts: List[TokenContainer] = []
if self.sub_doc.token_len > 0:
# offset from the global document (if a subdoc from get_overlap...)
coff: int = self._get_coff()
# the text to match on, or ``gtext`` if there is no subdoc
subdoc_text: str = self.sub_doc.text
# the global document
gtext: str = self.doc.text
# all regular expression matches found in ``subdoc_text``
matches: List[LexicalSpan] = \
list(map(match_to_span, self.pattern.finditer(subdoc_text)))
# guard on no-matches-found edge case
if len(matches) > 0:
subdoc_len: int = len(subdoc_text) + coff
start: int = matches[0].begin
end: int = matches[-1].end
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'coff: {coff}, start={start}, end={end}')
# add a start front content match when not match on first char
if start > coff:
fms = LexicalSpan(coff, start - 1)
matches.insert(0, fms)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding start match: {start}, {coff}: ' +
f'{gtext[fms[0]:fms[1]]}')
# and any trailing content when match doesn't include last char
if subdoc_len > end:
matches.append(LexicalSpan(end, subdoc_len))
# treat matches as a LIFO stack
while len(matches) > 0:
# pop the first match in the stack
span: LexicalSpan = matches.pop(0)
cont: TokenContainer = None
if logger.isEnabledFor(logging.DEBUG):
st: str = trunc(gtext[span[0]:span[1]])
logger.debug(
f'span begin: {span.begin}, start: {start}, ' +
f'match {span}: {st}')
if span.begin > start:
# when the match comes after the last ending marker,
# added this content to the last match entry
cont = self._create_container(
LexicalSpan(start, span.begin - 1))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'create (trailing): {cont}')
# content exists if it's text we keep (ie non-space)
if cont is not None:
if len(conts) > 0:
# tack on to the last entry since it trailed
# (probably after a newline)
conts[-1] = self._merge_containers(
conts[-1], cont)
else:
# add a new entry
conts.append(cont)
# indcate we already added the content so we don't
# double add it
cont = None
# we dealt with the last trailling content from the
# previous span, but we haven't taken care of this span
matches.insert(0, span)
else:
# create and add the content for the exact match (again,
# we skip empty space etc.)
cont = self._create_container(span)
if logger.isEnabledFor(logging.DEBUG):
st: str = trunc(gtext[span[0]:span[1]])
logger.debug(f'create (not empty) {st} -> {cont}')
if cont is not None:
conts.append(cont)
# walk past this span to detect unmatched content for the
# next iteration (if there is one)
start = span.end + 1
# adhere to iterable contract for potentially more dynamic subclasses
return iter(conts)
def _merge_containers(self, a: TokenContainer, b: TokenContainer) -> \
TokenContainer:
"""Merge two token containers into one, which is used for straggling
content tacked to previous entries for text between matches.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merging: {a}||{b}')
return FeatureDocument((a, b)).to_sentence()
@abstractmethod
def _create_container(self, span: LexicalSpan) -> Optional[TokenContainer]:
"""Create content from :obj:`doc` and :obj:`sub_doc` as a subdocument
for span ``span``.
"""
pass
@abstractmethod
def to_document(self, conts: Iterable[TokenContainer]) -> FeatureDocument:
pass
def __call__(self) -> FeatureDocument:
return self.to_document(self)
@dataclass
class ParagraphChunker(Chunker):
"""A :class:`.Chunker` that splits list item and enumerated lists into
separate sentences. Matched sentences are given if used as an iterable.
For this reason, this class will probably be used as an iterable since
clients will usually want just the separated paragraphs as documents
"""
DEFAULT_SPAN_PATTERN: ClassVar[re.Pattern] = re.compile(
r'(.+?)(?:(?=\n{2})|\Z)', re.MULTILINE | re.DOTALL)
"""The default paragraph regular expression, which uses two newline positive
lookaheads to avoid matching on paragraph spacing.
"""
pattern: re.Pattern = field(default=DEFAULT_SPAN_PATTERN)
"""The list regular expression, which defaults to
:obj:`DEFAULT_SPAN_PATTERN`.
"""
def _merge_containers(self, a: TokenContainer, b: TokenContainer) -> \
TokenContainer:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merging: {a}||{b}')
# return documents to keep as much of the sentence structure as possible
return FeatureDocument.combine_documents((a, b))
def _create_container(self, span: LexicalSpan) -> Optional[TokenContainer]:
doc: FeatureDocument = self.doc.get_overlapping_document(span)
slen: int = len(doc.sents)
# remove double newline empty sentences, happens at beginning or ending
sents: Tuple[FeatureSentence] = tuple(
filter(lambda s: len(s) > 0, map(lambda x: x.strip(), doc)))
if slen != len(sents):
# when we find surrounding whitespace, create a (sentence) stripped
# document
doc = FeatureDocument(sents=tuple(sents), text=doc.text.strip())
if len(doc.sents) > 0:
# we still need to strip per sentence for whitespace added at the
# sentence level
return doc.strip()
def to_document(self, conts: Iterable[TokenContainer]) -> FeatureDocument:
"""It usually makes sense to use instances of this class as an iterable
rather than this (see class docs).
"""
return FeatureDocument.combine_documents(conts)
@dataclass
class ListItemChunker(Chunker):
"""A :class:`.Chunker` that splits list item and enumerated lists into
separate sentences. Matched sentences are given if used as an iterable.
This is useful when spaCy sentence chunks lists incorrectly and finds lists
using a regular expression to find lines that star with a decimal, or list
characters such as ``-`` and ``+``.
"""
DEFAULT_SPAN_PATTERN: ClassVar[re.Pattern] = re.compile(
r'^((?:[0-9-+]+|[a-zA-Z]+:)[^\n]+)$', re.MULTILINE)
"""The default list item regular expression, which uses an initial character
item notation or an initial enumeration digit.
"""
pattern: re.Pattern = field(default=DEFAULT_SPAN_PATTERN)
"""The list regular expression, which defaults to
:obj:`DEFAULT_SPAN_PATTERN`.
"""
def _create_container(self, span: LexicalSpan) -> Optional[TokenContainer]:
doc: FeatureDocument = self.doc.get_overlapping_document(span)
sent: FeatureSentence = doc.to_sentence()
# skip empty sentences, usually (spaCy) sentence chunked from text with
# two newlines in a row
sent.strip()
if sent.token_len > 0:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'narrowed sent: <{sent.text}>')
return sent
def to_document(self, conts: Iterable[TokenContainer]) -> FeatureDocument:
sents: Tuple[FeatureSentence] = tuple(conts)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating doc from:')
for s in sents:
logger.debug(f' {s}')
return FeatureDocument(
sents=sents,
text='\n'.join(map(lambda s: s.text, sents))) | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/chunker.py | chunker.py |
__author__ = 'Paul Landes'
from typing import Set, List, Dict, Tuple
from dataclasses import dataclass, field
import logging
from spacy.tokens.token import Token
from . import (
ParseError, TokenContainer, FeatureDocumentParser,
FeatureDocument, FeatureSentence, FeatureToken,
)
logger = logging.getLogger(__name__)
@dataclass
class CombinerFeatureDocumentParser(FeatureDocumentParser):
"""A class that combines features from two :class:`.FeatureDocumentParser`
instances. Features parsed using each :obj:`source_parser` are optionally
copied or overwritten on a token by token basis in the feature document
parsed by this instance.
The target tokens are sometimes added to or clobbered from the source,
but not the other way around.
"""
target_parser: FeatureDocumentParser = field()
"""The parser in to which data and features are merged."""
source_parsers: List[FeatureDocumentParser] = field(default=None)
"""The language resource used to parse documents and create token
attributes.
"""
validate_features: Set[str] = field(default_factory=set)
"""A set of features to compare across all tokens when copying. If any of
the given features don't match, an mismatch token error is raised.
"""
yield_features: List[str] = field(default_factory=list)
"""A list of features to be copied (in order) if the target token is not
set.
"""
overwrite_features: List[str] = field(default_factory=list)
"""A list of features to be copied/overwritten in order given in the list.
"""
overwrite_nones: bool = field(default=False)
"""Whether to write ``None`` for missing :obj:`overwrite_features`."""
include_detached_features: bool = field(default=True)
"""Whether to include copied (yielded or overwritten) features as listed
detected features. This controls what is compared, cloned and for printed
in :meth:`~zensols.config.writable.Writable.write`.
:see: :obj:`.FeatureToken.default_detached_feature_ids`
"""
def _validate_features(self, target_tok: FeatureToken,
source_tok: FeatureToken,
context_container: TokenContainer):
for f in self.validate_features:
prim = getattr(target_tok, f)
rep = getattr(source_tok, f)
if prim != rep:
raise ParseError(
f'Mismatch tokens: {target_tok.text}({f}={prim}) ' +
f'!= {source_tok.text}({f}={rep}) ' +
f'in container: {context_container}')
def _merge_tokens(self, target_tok: FeatureToken,
source_tok: FeatureToken,
context_container: TokenContainer):
overwrite_nones: bool = self.overwrite_nones
include_detached: bool = self.include_detached_features
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merging tokens: {source_tok} ({type(source_tok)}) '
f'-> {target_tok} ({type(target_tok)})')
self._validate_features(target_tok, source_tok, context_container)
f: str
for f in self.yield_features:
targ = target_tok.get_value(f)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'yield feature: {f}, target={targ}')
if targ is None:
src = source_tok.get_value(f)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'yield feature: {f}, src={src}')
if src is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{src} -> {target_tok.text}.{f}')
setattr(target_tok, f, src)
if include_detached and \
target_tok._detatched_feature_ids is not None:
target_tok._detatched_feature_ids.add(f)
for f in self.overwrite_features:
if overwrite_nones:
src = getattr(source_tok, f)
else:
src = source_tok.get_value(f)
src = FeatureToken.NONE if src is None else src
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'overwrite: {src} -> {target_tok.text}.{f}')
setattr(target_tok, f, src)
if include_detached and \
target_tok._detatched_feature_ids is not None:
target_tok._detatched_feature_ids.add(f)
def _debug_sentence(self, sent: FeatureSentence, name: str):
logger.debug(f'{name}:')
for i, tok in enumerate(sent.tokens):
logger.debug(f' {i}: i={tok.i}, pos={tok.pos_}, ' +
f'ent={tok.ent_}: {tok}')
def _merge_sentence(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merging sentences: {self._source_sent.tokens} ' +
f'-> {self._target_sent.tokens}')
n_toks: int = 0
assert len(self._target_sent) == len(self._source_sent)
for target_tok, source_tok in zip(
self._target_sent, self._source_sent):
assert target_tok.idx == source_tok.idx
self._merge_tokens(target_tok, source_tok, self._target_sent)
assert n_toks == len(self._target_sent)
def _prepare_merge_doc(self):
pass
def _complete_merge_doc(self):
pass
def _merge_doc(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merging docs: {self._source_doc} -> ' +
f'{self._target_doc}')
for target_sent, source_sent in zip(
self._target_doc, self._source_doc):
self._target_sent = target_sent
self._source_sent = source_sent
self._prepare_merge_doc()
try:
self._merge_sentence()
finally:
del self._target_sent
del self._source_sent
self._complete_merge_doc()
self._target_doc._combine_update(self._source_doc)
def parse(self, text: str, *args, **kwargs) -> FeatureDocument:
target_doc = self.target_parser.parse(text, *args, **kwargs)
if self.source_parsers is None or len(self.source_parsers) == 0:
logger.warning(f'No source parsers set on {self}, ' +
'which disables feature combining')
else:
for source_parser in self.source_parsers:
source_doc = source_parser.parse(text, *args, **kwargs)
self._target_doc = target_doc
self._source_doc = source_doc
try:
self._merge_doc()
finally:
del self._target_doc
del self._source_doc
return target_doc
def __getattr__(self, attr, default=None):
"""Delegate attribute requests such as
:obj:`.SpacyFeatureDocumentParser.token_feature_ids`.
"""
try:
return super().__getattribute__(attr)
except AttributeError:
return self.target_parser.__getattribute__(attr)
@dataclass
class MappingCombinerFeatureDocumentParser(CombinerFeatureDocumentParser):
"""Maps the source to respective tokens in the target document using spaCy
artifacts.
"""
validate_features: Set[str] = field(default=frozenset({'idx'}))
"""A set of features to compare across all tokens when copying. If any of
the given features don't match, an mismatch token error is raised. The
default is the token's index in the document, which should not change in
most cases.
"""
merge_sentences: bool = field(default=True)
"""If ``False`` ignore sentences and map everything at the token level.
Otherwise, it use the same hierarchy mapping as the super class. This is
useful when sentence demarcations are not aligned across source document
parsers and this parser.
"""
def _merge_entities_by_token(self, target_tok, source_tok):
"""Add the source sentence entity spans to the target sentence. This is
important so the original spaCy predicted entities are merged from the
source to the target.
"""
tdoc, sdoc = self._target_doc, self._source_doc
tsent = None
try:
tsent = sdoc.get_overlapping_sentences(source_tok.lexspan)
except StopIteration:
pass
if tsent is not None:
tsent = next(tdoc.get_overlapping_sentences(target_tok.lexspan))
skips = set(tsent._ents)
for ent in tsent._ents:
begin, end = ent
if begin == source_tok.idx and ent not in skips:
tsent._ents.append(ent)
def _merge_token_containers(self, target_container: TokenContainer,
rmap: Dict[int, FeatureToken]):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merge: {target_container}, mapping: {rmap}')
visited: Set[FeatureToken] = set()
targ_toks: Set[FeatureToken] = set(target_container.token_iter())
for target_tok in target_container.token_iter():
source_tok: FeatureToken = rmap.get(target_tok.idx)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'entry: {target_tok.idx}/{target_tok} ' +
f'-> {source_tok}')
if source_tok is not None:
visited.add(source_tok)
self._merge_tokens(target_tok, source_tok, target_container)
if source_tok.ent_ != FeatureToken.NONE:
self._merge_entities_by_token(target_tok, source_tok)
for target_tok in (targ_toks - visited):
fname: str
for fname in self.overwrite_features:
if not hasattr(target_tok, fname):
setattr(target_tok, fname, FeatureToken.NONE)
def _merge_sentence(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merging sentences: {self._source_sent.tokens} ' +
f'-> {self._target_sent.tokens}')
rmp: Dict[int, FeatureToken] = self._source_token_mapping
self._merge_token_containers(self._target_sent, rmp)
self._target_doc._combine_update(self._source_doc)
def _prepare_merge_doc(self):
if self.merge_sentences:
self._source_token_mapping = self._source_doc.tokens_by_idx
def _complete_merge_doc(self):
if self.merge_sentences:
del self._source_token_mapping
def _merge_doc(self):
if self.merge_sentences:
super()._merge_doc()
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'merging docs: {self._source_doc} -> ' +
f'{self._target_doc}')
source_token_mapping = self._source_doc.tokens_by_idx
self._merge_token_containers(self._target_doc, source_token_mapping)
self._target_doc._combine_update(self._source_doc) | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/combine.py | combine.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Tuple, Dict, Any, Sequence, Set, ClassVar
from dataclasses import dataclass, field
from abc import abstractmethod, ABCMeta, ABC
import logging
from io import StringIO
from spacy.language import Language
from zensols.util import Hasher
from zensols.config import ImportIniConfig, ImportConfigFactory
from zensols.persist import PersistableContainer, Stash
from zensols.config import Dictable
from . import NLPError, FeatureToken, FeatureSentence, FeatureDocument
logger = logging.getLogger(__name__)
class ComponentInitializer(ABC):
"""Called by :class:`.Component` to do post spaCy initialization.
"""
@abstractmethod
def init_nlp_model(self, model: Language, component: Component):
"""Do any post spaCy initialization on the the referred framework."""
pass
@dataclass
class Component(object):
"""A pipeline component to be added to the spaCy model. There are a list of
these set in the :class:`.LanguageResource`.
"""
name: str = field()
"""The section name."""
pipe_name: str = field(default=None)
"""The pipeline component name to add to the pipeline. If ``None``, use
:obj:`name`.
"""
pipe_config: Dict[str, str] = field(default=None)
"""The configuration to add with the ``config`` kwarg in the
:meth:`.Language.add_pipe` call to the spaCy model.
"""
pipe_add_kwargs: Dict[str, Any] = field(default_factory=dict)
"""Arguments to add along with the call to
:meth:`~spacy.language.Language.add_pipe`.
"""
modules: Sequence[str] = field(default=())
"""The module to import before adding component pipelines. This will register
components mentioned in :obj:`components` when the resepctive module is
loaded.
"""
initializers: Tuple[ComponentInitializer, ...] = field(default=())
"""Instances to initialize upon this object's initialization."""
def __post_init__(self):
if self.pipe_name is None:
self.pipe_name = self.name
def __hash__(self) -> int:
x = hash(self.name)
x += 13 * hash(self.pipe_name)
if self.pipe_config:
x += 13 * hash(str(self.pipe_config.values()))
return x
def init(self, model: Language):
"""Initialize the component and add it to the NLP pipe line. This base class
implementation loads the :obj:`module`, then calls
:meth:`.Language.add_pipe`.
:param model: the model to add the spaCy model (``nlp`` in their
parlance)
"""
for mod in self.modules:
__import__(mod)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating pipe {self.pipe_name} with args: ' +
f'{self.pipe_add_kwargs}')
if self.pipe_config is None:
model.add_pipe(self.pipe_name, **self.pipe_add_kwargs)
else:
model.add_pipe(self.pipe_name, config=self.pipe_config,
**self.pipe_add_kwargs)
for to_init in self.initializers:
to_init.init_nlp_model(model, self)
@dataclass
class FeatureDocumentParser(PersistableContainer, Dictable, metaclass=ABCMeta):
"""This class parses text in to instances of :class:`.FeatureDocument`
instances using :meth:`parse`.
"""
TOKEN_FEATURE_IDS: ClassVar[Set[str]] = FeatureToken.FEATURE_IDS
"""The default value for :obj:`token_feature_ids`."""
def __post_init__(self):
super().__init__()
@staticmethod
def default_instance() -> FeatureDocumentParser:
"""Create the parser as configured in the resource library of the
package.
"""
config: str = (
'[import]\n' +
'config_file = resource(zensols.nlp): resources/obj.conf')
factory = ImportConfigFactory(ImportIniConfig(StringIO(config)))
return factory('doc_parser')
@abstractmethod
def parse(self, text: str, *args, **kwargs) -> FeatureDocument:
"""Parse text or a text as a list of sentences.
:param text: either a string or a list of strings; if the former a
document with one sentence will be created, otherwise a
document is returned with a sentence for each string in
the list
:param args: the arguments used to create the FeatureDocument instance
:param kwargs: the key word arguments used to create the
FeatureDocument instance
"""
def __call__(self, text: str, *args, **kwargs) -> FeatureDocument:
"""Invoke :meth:`parse` with the context arguments.
:see: :meth:`parse`
"""
return self.parse(text, *args, **kwargs)
def __str__(self):
return f'model_name: {self.model_name}, lang: {self.lang}'
def __repr__(self):
return self.__str__()
class FeatureTokenDecorator(ABC):
"""Implementations can add, remove or modify features on a token.
"""
@abstractmethod
def decorate(self, token: FeatureToken):
pass
class FeatureSentenceDecorator(ABC):
"""Implementations can add, remove or modify features on a sentence.
"""
@abstractmethod
def decorate(self, sent: FeatureSentence):
pass
class FeatureDocumentDecorator(ABC):
"""Implementations can add, remove or modify features on a document.
"""
@abstractmethod
def decorate(self, doc: FeatureDocument):
pass
@dataclass
class DecoratedFeatureDocumentParser(FeatureDocumentParser):
"""This class adapts the :class:`.FeatureDocumentParser` adaptors to the
general case using a GoF decorator pattern. This is useful for any post
processing needed on existing configured document parsers.
"""
delegate: FeatureDocumentParser = field()
"""Used to create the feature documents."""
token_decorators: Sequence[FeatureTokenDecorator] = field(default=())
"""A list of decorators that can add, remove or modify features on a token.
"""
sentence_decorators: Sequence[FeatureSentenceDecorator] = field(
default=())
"""A list of decorators that can add, remove or modify features on a
sentence.
"""
document_decorators: Sequence[FeatureDocumentDecorator] = field(
default=())
"""A list of decorators that can add, remove or modify features on a
document.
"""
def decorate(self, doc: FeatureDocument):
td: FeatureTokenDecorator
for td in self.token_decorators:
tok: FeatureToken
for tok in doc.token_iter():
td.decorate(tok)
sd: FeatureSentenceDecorator
for sd in self.sentence_decorators:
sent: FeatureSentence
for sent in doc.sents:
sd.decorate(sent)
dd: FeatureDocumentDecorator
for dd in self.document_decorators:
dd.decorate(doc)
def parse(self, text: str, *args, **kwargs) -> FeatureDocument:
doc: FeatureDocument = self.delegate.parse(text, *args, **kwargs)
self.decorate(doc)
return doc
@dataclass
class CachingFeatureDocumentParser(FeatureDocumentParser):
"""A document parser that persists previous parses using the hash of the
text as a key. Caching is optional given the value of :obj:`stash`, which
is useful in cases this class is extended using other use cases other than
just caching.
"""
delegate: FeatureDocumentParser = field()
"""Used to parse in to documents on cache misses."""
stash: Stash = field(default=None)
"""The stash that persists the feature document instances. If this is not
provided, no caching will happen.
"""
hasher: Hasher = field(default_factory=Hasher)
"""Used to hash the natural langauge text in to string keys."""
@property
def token_feature_ids(self) -> Set[str]:
return self.delegate.token_feature_ids
def _hash_text(self, text: str) -> str:
self.hasher.reset()
self.hasher.update(text)
return self.hasher()
def _load_or_parse(self, text: str, dump: bool, *args, **kwargs) -> \
Tuple[FeatureDocument, str, bool]:
key: str = self._hash_text(text)
doc: FeatureDocument = None
loaded: bool = False
if self.stash is not None:
doc = self.stash.load(key)
if doc is None:
doc = self.delegate.parse(text, *args, **kwargs)
if dump and self.stash is not None:
self.stash.dump(key, doc)
else:
if doc.text != text:
raise NLPError(
f'Document text does not match: <{text}> != >{doc.text}>')
loaded = True
return doc, key, loaded
def parse(self, text: str, *args, **kwargs) -> FeatureDocument:
return self._load_or_parse(text, True, *args, **kwargs)[0]
def clear(self):
"""Clear the caching stash."""
if self.stash is not None:
self.stash.clear() | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/parser.py | parser.py |
__author__ = 'Paul Landes'
from typing import Tuple, Any, Dict, List, Set, Union
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
from enum import Enum, auto
from collections import OrderedDict
from zensols.config import Dictable
from . import (
NLPError, FeatureToken, TokenContainer,
FeatureSpan, FeatureSentence, FeatureDocument
)
class Include(Enum):
"""Indicates what to include at each level.
"""
original = auto()
"""The original text."""
normal = auto()
"""The normalized form of the text."""
tokens = auto()
"""The tokens of the :class:`.TokenContainer`."""
sentences = auto()
"""The sentences of the :class:`.FeatureDocument`."""
@dataclass
class Serialized(Dictable, metaclass=ABCMeta):
"""A base strategy class that can serialize :class:`.TokenContainer`
instances.
"""
container: TokenContainer = field()
"""The container to be serialized."""
includes: Set[Include] = field()
"""The things to be included at the level of the subclass serializer."""
feature_ids: Tuple[str, ...] = field()
"""The feature IDs used when serializing tokens."""
@abstractmethod
def _serialize(self) -> Dict[str, Any]:
"""Implemented to serialize :obj:`container` in to a dictionary."""
pass
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
return self._serialize()
@dataclass
class SerializedTokenContainer(Serialized):
"""Serializes instance of :class:`.TokenContainer`. This is used to
serialize spans and sentences.
"""
def _feature_tokens(self, container: TokenContainer) -> \
List[Dict[str, Any]]:
"""Serialize tokens of ``container`` in to a list of dictionary
features.
"""
tok_feats: List[Dict[str, Any]] = []
tok: FeatureToken
for tok in container.token_iter():
tfs: Dict[str, Any] = tok.get_features(self.feature_ids)
if len(tfs) > 0:
tok_feats.append(tfs)
return tok_feats
def _serialize(self) -> Dict[str, Any]:
dct = OrderedDict()
if Include.original in self.includes:
dct[Include.original.name] = self.container.text
if Include.normal in self.includes:
dct[Include.normal.name] = self.container.norm
if Include.tokens in self.includes:
dct[Include.tokens.name] = self._feature_tokens(self.container)
return dct
@dataclass
class SerializedFeatureDocument(Serialized):
"""A serializer for feature documents. The :obj:`container` has to be an
instance of a :class:`.FeatureDocument`.
"""
sentence_includes: Set[Include] = field()
"""The list of things to include in the sentences of the document."""
def _serialize(self) -> Dict[str, Any]:
doc = SerializedTokenContainer(
container=self.container,
includes=self.includes,
feature_ids=self.feature_ids)
dct = OrderedDict(doc=doc.asdict())
if Include.sentences in self.includes:
sents: List[Dict[str, Any]] = []
dct[Include.sentences.name] = sents
sent: FeatureSentence
for sent in self.container.sents:
ser = SerializedTokenContainer(
container=sent,
includes=self.sentence_includes,
feature_ids=self.feature_ids)
sents.append(ser.asdict())
return dct
@dataclass
class SerializedTokenContainerFactory(Dictable):
"""Creates instances of :class:`.Serialized` from instances of
:class:`.TokenContainer`. These can then be used as
:class:`~zensols.config.dictable.Dictable` instances, specifically with the
``asdict`` and ``asjson`` methods.
"""
sentence_includes: Set[Union[Include, str]] = field()
"""The things to be included in sentences."""
document_includes: Set[Union[Include, str]] = field()
"""The things to be included in documents."""
feature_ids: Tuple[str, ...] = field(default=None)
"""The feature IDs used when serializing tokens."""
def __post_init__(self):
def map_thing(x):
if isinstance(x, str):
x = Include.__members__[x]
return x
# convert strings to enums for easy app configuration
for ai in 'sentence document'.split():
attr = f'{ai}_includes'
val = set(map(map_thing, getattr(self, attr)))
setattr(self, attr, val)
def create(self, container: TokenContainer) -> Serialized:
"""Create a serializer from ``container`` (see class docs).
:param container: he container to be serialized
:return: an object that can be serialized using ``asdict`` and
``asjson`` method.
"""
serialized: Serialized
if isinstance(container, FeatureDocument):
serialized = SerializedFeatureDocument(
container=container,
includes=self.document_includes,
sentence_includes=self.sentence_includes,
feature_ids=self.feature_ids)
elif isinstance(container, FeatureSpan):
serialized = SerializedFeatureDocument(
conatiner=container,
includes=self.sentence_includes)
else:
raise NLPError(f'No serialization method for {type(container)}')
return serialized
def __call__(self, container: TokenContainer) -> Serialized:
"""See :meth:`create`."""
return self.create(container) | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/serial.py | serial.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
Tuple, Set, Dict, Iterable, List, ClassVar, Union, Optional, Type
)
from dataclasses import dataclass, field
from abc import ABCMeta, ABC, abstractmethod
import logging
import sys
from io import TextIOBase
import nltk.translate.bleu_score as bleu
import numpy as np
from zensols.introspect import ClassImporter
from zensols.config import Dictable
from zensols.persist import persisted
from zensols.nlp import TokenContainer
from . import NLPError
logger = logging.getLogger(__name__)
class ScorerError(NLPError):
"""Raised for any scoring errors (this module)."""
pass
@dataclass
class Score(Dictable, metaclass=ABCMeta):
"""Individual scores returned from :class:`.ScoreMethod`.
"""
def asrow(self, meth: str) -> Dict[str, float]:
return {f'{meth}_{x[0]}': x[1] for x in self.asdict().items()}
@dataclass(eq=False)
class ErrorScore(Score):
"""A replacement instance when scoring fails from a raised exception.
"""
method: str = field(repr=False)
"""The method of the :class:`.ScoreMethod` that raised the exception."""
exception: Exception = field()
"""The exception that was raised."""
replace_score: Score = field(default=None)
"""The score to use in place of this score. Otherwise :meth:`asrow` return
a single :obj:`numpy.nan` like :class:`.FloatScore`.
"""
def asrow(self, meth: str) -> Dict[str, float]:
if self.replace_score is not None:
return self.replace_score.asrow(self.method)
return {self.method: np.nan}
def __eq___(self, other) -> bool:
return self.method == other.method and \
str(self.exception) == str(other.exeption)
@dataclass
class FloatScore(Score):
"""Float container. This is needed to create the flat result container
structure. Object creation becomes less import since most clients will use
:meth:`.ScoreSet.asnumpy`.
"""
NAN_INSTANCE: ClassVar[FloatScore] = None
"""Used to add to ErrorScore for harmonic means replacements.
"""
value: float = field()
"""The value of score."""
def asrow(self, meth: str) -> Dict[str, float]:
return {meth: self.value}
FloatScore.NAN_INSTANCE = FloatScore(np.nan)
@dataclass
class HarmonicMeanScore(Score):
"""A score having a precision, recall and the harmonic mean of the two,
F-score.'
"""
NAN_INSTANCE: ClassVar[HarmonicMeanScore] = None
"""Used to add to ErrorScore for harmonic means replacements.
"""
precision: float = field()
recall: float = field()
f_score: float = field()
HarmonicMeanScore.NAN_INSTANCE = HarmonicMeanScore(np.nan, np.nan, np.nan)
@dataclass
class ScoreResult(Dictable):
"""A result of scores created by a :class:`.ScoreMethod`.
"""
scores: Dict[str, Tuple[Score]] = field()
"""The scores by method name."""
correlation_id: Optional[str] = field(default=None)
"""An ID for correlating back to the :class:`.TokenContainer`."""
def __len__(self) -> int:
return len(self.scores)
def __getitem__(self, k: str) -> Dict[str, Tuple[Score]]:
return self.scores[k]
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
dct = super().asdict()
del dct['correlation_id']
if self.correlation_id is None:
self._write_dict(dct, depth, writer)
else:
self._write_line(f'correlation ID: {self.correlation_id}',
depth, writer)
self._write_dict(dct, depth + 1, writer)
@dataclass
class ScoreSet(Dictable):
"""All scores returned from :class:`.Scorer'.
"""
results: Tuple[ScoreResult] = field()
"""A tuple with each element having the results of the respective sentence
pair in :obj:`.ScoreContext.sents`. Each elemnt is a dictionary with the
method are the keys with results as the values as output of the
:class:`.ScoreMethod`. This is created in :class:`.Scorer`.
"""
correlation_id_col: str = field(default='id')
"""The column name for the :obj:`.ScoreResult.correlation_id` added to Numpy
arrays and Pandas dataframes. If ``None``, then the correlation IDS are
used as the index.
"""
def __len__(self) -> int:
return len(self.results)
def __iter__(self) -> Iterable[Dict[str, Tuple[Score]]]:
return iter(self.results)
def __getitem__(self, i: int) -> Dict[str, Tuple[Score]]:
return self.results[i]
@property
def has_correlation_id(self) -> bool:
"""Whether the results have correlation IDs."""
return len(self.results) > 0 and \
self.results[0].correlation_id is not None
def as_numpy(self, add_correlation: bool = True) -> \
Tuple[List[str], np.ndarray]:
"""Return the Numpy array with column descriptors of the results. Spacy
depends on Numpy, so this package will always be availale.
:param add_correlation: whether to add the correlation ID (if there is
one), using :obj:`correlation_id_col`
"""
cols: Set[str] = set()
rows: List[Dict[str, float]] = []
result: ScoreResult
for result in self.results:
row: Dict[str, float] = {}
rows.append(row)
meth: str
for meth, result in result.scores.items():
rdat: Dict[str, float] = result.asrow(meth)
row.update(rdat)
cols.update(rdat.keys())
cols: List[str] = sorted(cols)
nd_rows: List[np.ndarray] = []
for row in rows:
nd_rows.append(np.array(tuple(map(row.get, cols))))
arr = np.stack(nd_rows)
if add_correlation and self.has_correlation_id:
ids = np.array(tuple(map(lambda r: r.correlation_id, self.results)))
ids = np.expand_dims(ids, 1)
arr = np.append(arr, ids, axis=1)
cols.append(self.correlation_id_col)
return cols, arr
def as_dataframe(self, add_correlation: bool = True) -> 'pandas.DataFrame':
"""This gets data from :meth:`as_numpy` and returns it as a Pandas
dataframe.
:param add_correlation: whether to add the correlation ID (if there is
one), using :obj:`correlation_id_col`
:return: an instance of :class:`pandas.DataFrame` of the results
"""
import pandas as pd
cols, arr = self.as_numpy(add_correlation=False)
df = pd.DataFrame(arr, columns=cols)
if add_correlation and self.has_correlation_id:
# add as a dataframe, otherwise string correlation IDs cast the
# numpy array to a string
cid: str = self.correlation_id_col
cids: Tuple[Union[str, int]] = tuple(
map(lambda r: r.correlation_id, self.results))
if cid is None:
df.index = cids
else:
cols: List[str] = df.columns.tolist()
df[cid] = cids
cols.insert(0, cid)
df = df[cols]
return df
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_line('results:', depth, writer)
self._write_iterable(self.results, depth + 1, writer)
@dataclass
class ScoreContext(Dictable):
"""Input needed to create score(s) using :class:`.Scorer'.
"""
pairs: Tuple[Tuple[TokenContainer, TokenContainer]] = field()
"""Sentence, span or document pairs to score (order matters for some scoring
methods such as rouge). Depending on the scoring method the ordering of the
sentence pairs should be:
* ``(<summary>, <source>)``
* ``(<gold>, <prediction>)``
* ``(<references>, <candidates>)``
See :class:`.ScoreMethod` implementations for more information about pair
ordering.
"""
methods: Set[str] = field(default=None)
"""A set of strings, each indicating the :class:`.ScoreMethod` used to score
:obj:`pairs`.
"""
norm: bool = field(default=True)
"""Whether to use the normalized tokens, otherwise use the original text."""
correlation_ids: Tuple[Union[int, str]] = field(default=None)
"""The IDs to correlate with each sentence pair, or ``None`` to skip
correlating them. The length of this tuple must be that of :obj:`pairs`.
"""
def __post_init__(self):
self.validate()
def validate(self):
if self.correlation_ids is not None and \
len(self.pairs) != len(self.correlation_ids):
raise ScorerError(
'Expecting same length pairs to correlation IDs but got: ' +
f'{len(self.pairs)} != {len(self.correlation_ids)}')
@dataclass
class ScoreMethod(ABC):
"""An abstract base class for scoring methods (bleu, rouge, etc).
"""
reverse_sents: bool = field(default=False)
"""Whether to reverse the order of the sentences."""
@classmethod
def _get_external_modules(cls: Type) -> Tuple[str, ...]:
"""Return a list of external module names needed by this method."""
return ()
@classmethod
def missing_modules(cls: Type) -> Tuple[str]:
"""Return a list of missing modules neede by this score method."""
missing: List[str] = []
mod: str
for mod in cls._get_external_modules():
try:
ClassImporter.get_module(mod)
except ModuleNotFoundError:
missing.append(mod)
return missing
@classmethod
def is_available(cls: Type) -> bool:
"""Whether or not this method is available on this system."""
return len(cls.missing_modules()) == 0
@abstractmethod
def _score(self, meth: str, context: ScoreContext) -> Iterable[Score]:
"""See :meth:`score`"""
pass
def score(self, meth: str, context: ScoreContext) -> Iterable[Score]:
"""Score the sentences in ``context`` using method identifer ``meth``.
:param meth: the identifer such as ``bleu``
:param context: the context containing the data to score
:return: the results, which are usually :class:`float` or
:class:`.Score`
"""
scores: Iterable[Score]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'scoring meth: {meth}, ' +
f'reverse: {self.reverse_sents}')
if not isinstance(context.pairs[0][0], TokenContainer):
raise ScorerError(f'Wrong type: {type(context.pairs[0][0])} ' +
f' for first item, expecting {TokenContainer}')
if not isinstance(context.pairs[0][1], TokenContainer):
raise ScorerError(f'Wrong type: {type(context.pairs[0][0])} ' +
f' for second item, expecting {TokenContainer}')
try:
if self.reverse_sents:
prev_pairs = context.pairs
try:
context.pairs = tuple(map(
lambda x: (x[1], x[0]), context.pairs))
scores = self._score(meth, context)
finally:
context.pairs = prev_pairs
else:
scores = self._score(meth, context)
# force generators to realize scores and force any raised exceptions
scores = tuple(scores)
except Exception as e:
logger.info(e, exc_info=True)
scores = tuple([ErrorScore(meth, e)] * len(context.pairs))
return scores
def _tokenize(self, context: ScoreContext) -> \
Iterable[Tuple[Tuple[str], Tuple[str]]]:
s1: TokenContainer
s2: TokenContainer
for s1, s2 in context.pairs:
s1t: Tuple[str]
s2t: Tuple[str]
if context.norm:
s1t = tuple(map(lambda t: t.norm, s1.token_iter()))
s2t = tuple(map(lambda t: t.norm, s2.token_iter()))
else:
s1t = tuple(map(lambda t: t.text, s1.token_iter()))
s2t = tuple(map(lambda t: t.text, s2.token_iter()))
yield (s1t, s2t)
@dataclass
class ExactMatchScoreMethod(ScoreMethod):
"""A scoring method that return 1 for exact matches and 0 otherwise.
"""
equality_measure: str = field(default='norm')
"""The method by which to compare, which is one of:
* ``norm``: compare with :meth:`.TokenContainer.norm`
* ``text``: compare with :obj:`.TokenContainer.text`
* ``equal``: compare using a Python object ``__eq__`` equal compare,
which also compares the token values
"""
def _score(self, meth: str, context: ScoreContext) -> Iterable[FloatScore]:
s1: TokenContainer
s2: TokenContainer
for s1t, s2t in context.pairs:
val: float
if self.equality_measure == 'norm':
val = 1. if s1t.norm == s2t.norm else 0.
elif self.equality_measure == 'text':
val = 1. if s1t.text == s2t.text else 0.
elif self.equality_measure == 'equal':
val = 1. if s1t == s2t else 0.
else:
raise ScorerError(
f"No equality measure: '{self.equality_measure}'")
yield FloatScore(val)
@dataclass
class LevenshteinDistanceScoreMethod(ScoreMethod):
"""A scoring method that computes the Levenshtein distance.
"""
form: str = field(default='canon')
"""The form of the of the text used for the evaluation, which is one of:
* ``text``: the original text with :obj:`.TokenContainer.text`
* ``norm``: the normalized text using :meth:`.TokenContainer.norm`
* ``canon``: :obj:`.TokenContainer.canonical` to normalize out
whitespace for better comparisons
"""
normalize: bool = field(default=True)
"""Whether to normalize the return value as the *distince / the max length
of both sentences*.
"""
@classmethod
def _get_external_modules(cls: Type) -> Tuple[str, ...]:
return ('editdistance',)
def _score(self, meth: str, context: ScoreContext) -> Iterable[FloatScore]:
import editdistance
def container_to_str(container: TokenContainer) -> str:
return container.norm if self.use_norm else container.text
s1: TokenContainer
s2: TokenContainer
for s1t, s2t in context.pairs:
t1: str
t2: str
if self.form == 'text':
# use the normalized canonical form
t1, t2 = s1t.text, s2t.text
elif self.form == 'norm':
# use the normalized canonical form
t1, t2 = s1t.norm, s2t.norm
elif self.form == 'canon':
# use the normalized canonical form
t1, t2 = s1t.canonical, s2t.canonical
else:
raise ScorerError(f"No form: '{self.form}'")
val: int = editdistance.eval(t1, t2)
if self.normalize:
text_len: int = max(len(t1), len(t2))
val = 1. - (val / text_len)
val: float = val
yield FloatScore(val)
@dataclass
class BleuScoreMethod(ScoreMethod):
"""The BLEU scoring method using the :mod:`nltk` package. The first
sentences are the references and the second are the hypothesis.
"""
smoothing_function: bleu.SmoothingFunction = field(default=None)
"""This is an implementation of the smoothing techniques for segment-level
BLEU scores.
Citation:
.. code:: none
Boxing Chen and Collin Cherry (2014) A Systematic Comparison of Smoothing
Techniques for Sentence-Level BLEU. In WMT14.
http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf
"""
weights: Tuple[float, ...] = field(default=(0.25, 0.25, 0.25, 0.25))
"""Weights for each n-gram. For example: a tuple of float weights for
unigrams, bigrams, trigrams and so on can be given: ``weights = (0.1, 0.3,
0.5, 0.1)``.
"""
silence_warnings: bool = field(default=False)
"""Silence the BLEU warning of n-grams not matching ``The hypothesis
contains 0 counts of 3-gram overlaps...``
"""
def __post_init__(self):
if self.silence_warnings:
import warnings
# silence the BLEU warning of n-grams not matching
# The hypothesis contains 0 counts of 3-gram overlaps...
warnings.filterwarnings(
'ignore', message='[.\n]+The hypothesis contains 0 counts.*')
def _score(self, meth: str, context: ScoreContext) -> Iterable[FloatScore]:
s1: TokenContainer
s2: TokenContainer
for s1t, s2t in self._tokenize(context):
val: float = bleu.sentence_bleu(
[s1t], s2t,
weights=self.weights,
smoothing_function=self.smoothing_function)
yield FloatScore(val)
@dataclass
class RougeScoreMethod(ScoreMethod):
"""The ROUGE scoring method using the :mod:`rouge_score` package.
"""
feature_tokenizer: bool = field(default=True)
"""Whether to use the :class:`.TokenContainer` tokenization, otherwise use
the :mod:`rouge_score` package.
"""
@classmethod
def _get_external_modules(cls: Type) -> Tuple[str, ...]:
return ('rouge_score',)
def _score(self, meth: str, context: ScoreContext) -> \
Iterable[HarmonicMeanScore]:
from rouge_score import rouge_scorer
class Tokenizer(object):
@staticmethod
def tokenize(sent: TokenContainer) -> Tuple[str]:
return sents[id(sent)]
s1: TokenContainer
s2: TokenContainer
if self.feature_tokenizer:
scorer = rouge_scorer.RougeScorer([meth], tokenizer=Tokenizer)
pairs = zip(context.pairs, self._tokenize(context))
for (s1, s2), (s1t, s2t) in pairs:
sents = {id(s1): s1t, id(s2): s2t}
res: Dict[str, Score] = scorer.score(s1, s2)
yield HarmonicMeanScore(*res[meth])
else:
scorer = rouge_scorer.RougeScorer([meth])
for s1t, s2t in context.pairs:
res: Dict[str, Score] = scorer.score(
context.s1.text, context.s2.text)
yield HarmonicMeanScore(*res[meth])
@dataclass
class Scorer(object):
"""A class that scores sentences using a set of registered methods
(:obj:`methods`).
"""
methods: Dict[str, ScoreMethod] = field(default=None)
"""The registered scoring methods availale, which are accessed from
:obj:`.ScoreContext.meth`.
"""
default_methods: Set[str] = field(default=None)
"""Methods (keys from :obj:`methods`) to use when none are provided in the
:obj:`.ScoreContext.meth` in the call to :meth:`score`.
"""
@persisted('_get_missing_modules_pw', cache_global=True)
def _get_missing_modules(self) -> Tuple[str]:
missing: List[str] = []
not_avail: List[str] = []
name: str
meth: ScoreMethod
for name, meth in self.methods.items():
missing_mods: Tuple[str] = meth.missing_modules()
if len(missing_mods) > 0:
logger.warning(f'method {meth} is not available: ' +
f'missing {missing_mods}')
not_avail.append(name)
missing.extend(missing_mods)
for name in not_avail:
del self.methods[name]
return tuple(missing_mods)
def score(self, context: ScoreContext) -> ScoreSet:
"""Score the sentences in ``context``.
:param context: the context containing the data to score
:return: the results for each method indicated in ``context``
"""
by_meth: Dict[str, Tuple[Score]] = {}
by_res: List[ScoreResult] = []
meths: Iterable[str] = context.methods
if meths is None:
if self.default_methods is None:
meths = self.methods.keys()
else:
meths = self.default_methods
self._get_missing_modules()
meth: str
for meth in meths:
smeth: ScoreMethod = self.methods.get(meth)
if smeth is None:
raise ScorerError(f"No scoring method: '{meth}'")
by_meth[meth] = tuple(smeth.score(meth, context))
for i in range(len(context.pairs)):
item_res: Dict[str, Score] = {}
corr_id: str = None
meth: str
if context.correlation_ids is not None:
corr_id = context.correlation_ids[i]
res_tup: Tuple[Score]
# for each scored pair
for meth, res_tup in by_meth.items():
item_res[meth] = res_tup[i]
by_res.append(ScoreResult(item_res, correlation_id=corr_id))
return ScoreSet(results=tuple(by_res))
def __call__(self, context: ScoreContext) -> ScoreSet:
"""See :meth:`score`."""
return self.score(context) | zensols.nlp | /zensols.nlp-1.8.0-py3-none-any.whl/zensols/nlp/score.py | score.py |
__author__ = 'Paul Landes'
from typing import List, Tuple, Dict, Union
import logging
import os
import re
import sys
import json
import inspect
from io import StringIO, TextIOWrapper
from pathlib import Path
import setuptools
from zensols.pybuild import Tag, RemoteSet
logger = logging.getLogger(__name__)
class SetupUtil(object):
"""This class is used in ``setup.py`` in place of the typical call to
``setuptools`` and provides a lot of the information contained in the git
repo as metadata, such as the version from the latest tag. It also helps
with finding paths in a the Zensols default Python project configuration.
The class also provides build information to client APIs (see ``source``).
"""
FIELDS = """
name packages package_data version description author author_email url
download_url long_description long_description_content_type install_requires
keywords classifiers
"""
DO_SETUP = True
DEFAULT_ROOT_CONTAINED_FILE = 'README.md'
def __init__(self, name: str, user: str, project: str,
setup_path: Path = None, package_names: List[str] = None,
root_contained_file=None, req_file: str = 'requirements.txt',
has_entry_points=True, **kwargs):
"""Initialize.
:param name: the full name of the package (i.e. ``zensols.zenpybuild``)
:param user: the user name of the author of the package
(i.e. ``plandes``)
:param project: the project name, usually the last project component
(i.e. ``zenpybuild``)
:param setup_path: the path to the ``setup.py`` file
(i.e. ``src/python/setup.py``)
:param package_names: a list of directories in the root that are to be
included in the packge, which is typically the
source (``zensols``) and any directories to be
included in the wheel/egg distribution file
(i.e. ``resources``)
:param root_contained_file: a file used to help find the project root,
which default to ``README.md``
:param req_file: the requirements file, which defaults to
``requirements.txt``, which is found in the same
directory as the ``setup.py``.
"""
self.name = name
self.user = user
self.project = project
if setup_path is None:
setup_path = Path(__file__).parent.absolute()
else:
setup_path = setup_path
self.setup_path = setup_path
if package_names is None:
m = re.match(r'^(.+)\..*', name)
if m:
package_names = [m.group(1)]
else:
package_names = [name]
self.package_names = package_names
if root_contained_file is None:
self.root_contained_file = SetupUtil.DEFAULT_ROOT_CONTAINED_FILE
else:
self.root_contained_file = root_contained_file
self.req_file = req_file
self.has_entry_points = has_entry_points
self.__dict__.update(**kwargs)
@property
def root_path(self) -> Path:
"""Return root path to the project.
"""
return self.find_root(self.setup_path.parent, self.root_contained_file)
@classmethod
def find_root(cls, start_path: Path,
root_contained_file: Path = None) -> Path:
"""Find the root path by iterating to the root looking for the
``root_contained_file`` starting from directory ``start_path``.
"""
if root_contained_file is None:
root_contained_file = cls.DEFAULT_ROOT_CONTAINED_FILE
logger.debug(f'using setup path: {start_path}')
nname, dname = None, start_path
while nname != dname:
rm_file = dname.joinpath(root_contained_file)
logging.debug(f'rm file: {rm_file}')
if rm_file.is_file():
logger.debug(f'found file: {rm_file}')
break
logger.debug(f'nname={nname}, dname={dname}')
nname, dname = dname, dname.parent
logging.debug(f'found root dir: {dname}')
return dname
@property
def packages(self) -> List[str]:
"""Get a list of directories that contain package information to tbe included
with the distribution files.
"""
dirs = []
logger.debug(f'walking on {self.package_names}')
for dname in self.package_names:
for root, subdirs, files in os.walk(dname):
logger.debug(f'root: {root}')
root = os.path.relpath(root, dname)
if root != '.':
dirs.append(os.path.join(dname, root.replace(os.sep, '.')))
return dirs
@property
def long_description(self) -> str:
"""Return a long human readable description of the package, which is the
contents of the ``README.md`` file. This is added so the README shows
up on the pypi module page.
"""
path = Path(self.root_path, self.root_contained_file)
logger.debug(f'reading long desc from {path}')
with open(path, encoding='utf-8') as f:
return f.read()
@property
def short_description(self) -> str:
pat = re.compile(r'^\s*#\s*(.+)$', re.MULTILINE)
desc = self.long_description
m = pat.match(desc)
if m:
return m.group(1)
@property
def install_requires(self) -> List[str]:
"""Get a list of pip dependencies from the requirements file.
"""
path = Path(self.setup_path, self.req_file)
with open(path, encoding='utf-8') as f:
return [x.strip() for x in f.readlines()]
@property
def url(self) -> str:
"""Return the URL used to access the project on GitHub.
"""
return f'https://github.com/{self.user}/{self.project}'
@property
def download_url(self) -> str:
"""Return the download URL used to obtain the distribution wheel.
"""
params = {'url': self.url,
'name': self.name,
'version': self.version,
'path': 'releases/download',
'wheel': 'py3-none-any.whl'}
return '{url}/{path}/v{version}/{name}-{version}-{wheel}'.\
format(**params)
@property
def tag(self) -> Tag:
"""Return the tag for the project.
"""
return Tag(self.root_path)
@property
def remote_set(self) -> RemoteSet:
"""Return a remote set for the project.
"""
return RemoteSet(self.root_path)
@property
def author(self) -> str:
"""Return the author of the package.
"""
commit = self.tag.last_commit
if commit:
return commit.author.name
@property
def author_email(self) -> str:
"""Return the email address of the project.
"""
commit = self.tag.last_commit
if commit:
return commit.author.email
@property
def version(self) -> str:
"""Return the version of the last tag in the git repo.
"""
return self.tag.last_tag
@property
def entry_points(self):
"""Return the entry points (i.e. console application script), if any.
"""
if hasattr(self, 'console_script'):
script = self.console_script
else:
m = re.match(r'.*\.(.+?)$', self.name)
if m:
script = m.group(1)
else:
script = self.name
return {'console_scripts': ['{}={}:main'.format(script, self.name)]}
def get_properties(self, paths: bool = False) -> \
Tuple[List[str], Dict[str, str]]:
"""Return the properties used by ``setuptools``.
"""
fields = self.FIELDS.split()
if paths:
fields.extend('setup_path root_path'.split())
if self.has_entry_points:
fields.append('entry_points')
fset = set(fields)
logger.debug(f'fields: {fset}')
props = {'long_description_content_type': 'text/markdown'}
members = inspect.getmembers(self)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'all members: {members}')
for mem in filter(lambda x: x[0] in fset, members):
logger.debug(f'member: {mem}')
val = mem[1]
if val is not None:
props[mem[0]] = mem[1]
return fields, props
def write(self, depth: int = 0, writer: TextIOWrapper = sys.stdout):
sp = ' ' * (depth * 4)
fields, props = self.get_properties(False)
if 'long_description' in props:
props['long_description'] = props['long_description'][0:20] + '...'
for field in fields:
if field in props:
writer.write(f'{sp}{field}={props[field]}\n')
def get_info(self) -> Dict[str, Union[str, dict]]:
props = self.get_properties(True)[1]
for path in 'setup_path root_path'.split():
props[path] = str(props[path].absolute())
props['build'] = self.tag.build_info
short_description = self.short_description
if short_description:
props['short_description'] = short_description
props['user'] = self.user
props['project'] = self.project
props['remotes'] = tuple(self.remote_set)
return props
def to_json(self, indent: int = 4, writer: TextIOWrapper = sys.stdout) -> str:
json.dump(self.get_info(), writer, indent=indent)
def setup(self):
"""Called in the ``setup.py`` to invoke the Python ``setuptools`` package.
This assembles the information needed and calls ``setuptools.setup``.
:py:func:`setuptools:setup`
"""
if self.DO_SETUP:
_, props = self.get_properties()
sio = StringIO()
self.write(writer=sio)
logger.info('setting up with: ' + sio.getvalue())
setuptools.setup(**props)
else:
return self
@classmethod
def source(cls, start_path: Path = Path('.').absolute(),
rel_setup_path: Path = Path('src/python/setup.py'),
var: str = 'su'):
"""Source the ``setup.py`` ``setuptools`` file to get an instance of this class
to be used in other APIs that want to access build information. This
is done by using ``exec`` to evaluate the ``setup.py`` file and
skipping the call to ``setuptools.setup``.
:param rel_setup_path: the relative path to the ``setup.py`` file,
which defaults to ``src/python/setup.py`` per
standard Zensols build
:param start_path: the path to start looking for the ``rel_setup_path``
:param var: the name of the variable that was was in ``setup.py`` for
the instantiation of this class
"""
logger.debug(f'sourcing: start={start_path}, ' +
f'rel_setup_path={rel_setup_path}')
do_setup = cls.DO_SETUP
try:
cls.DO_SETUP = False
root = cls.find_root(start_path)
setup_path = root / rel_setup_path
logger.debug(f'found root: {root}, setup path = {setup_path}')
setup_path = setup_path.absolute()
logger.debug(f'loading setup file from {setup_path}')
with open(setup_path) as f:
code = f.read()
locs = {'__file__': str(setup_path)}
exec(code, locs)
return locs[var]
finally:
cls.DO_SETUP = do_setup | zensols.pybuild | /zensols.pybuild-0.1.0-py3-none-any.whl/zensols/pybuild/setuputil.py | setuputil.py |
__author__ = 'Paul Landes'
from typing import Dict, List, Union
import logging
import sys
from io import TextIOWrapper
import json
from pathlib import Path
from datetime import datetime
from git import Repo, TagReference
from zensols.pybuild import Version
logger = logging.getLogger(__name__)
class Tag(object):
"""Represents a Git tag. It's main use is determining the last tag in a sorted
(by version) used to increment to the next version. However, it also
creates tags and provides additional information about existing tags.
All tags have an implicit format by sorting in decimal format
(i.e. ``<major>.<minor>.<version>``).
"""
def __init__(self, repo_dir: Path = Path('.'), message: str = 'none',
dry_run: bool = False):
"""Initialize.
:param repo_dir: the root Git repo directory
:param message: the message to use when creating new tags
:param dry_run: if ``True`` do not create new tags
"""
logger.debug('creating tag witih repo dir: {}'.format(repo_dir))
if isinstance(repo_dir, Path):
repo_dir = str(repo_dir.resolve())
self.repo = Repo(repo_dir)
assert not self.repo.bare
self.message = message
self.dry_run = dry_run
def get_entries(self) -> List[Dict[str, str]]:
"""Return a list of dicts, each with information about the tag.
Keys::
- name: the name of the tag
- ver: the version of the tag (in format ``v<major>.<minor>.<debug>``)
- date: date the tag was created
- tag: the tag without the prefix (i.e. sans ``v``)
- message: the comment given at tag creation
"""
tags = self.repo.tags
logger.debug('tags: {}'.format(tags))
tag_entries = []
for tag in tags:
logger.debug('{} ({})'.format(tag, type(tag)))
name = str(tag)
ver = Version.from_string(name)
date = None
if hasattr(tag.object, 'tagged_date'):
date = tag.object.tagged_date
if ver is not None:
tag_entries.append({'name': name,
'ver': ver,
'date': date,
'tag': tag,
'message': tag.object.message})
tag_entries = sorted(tag_entries, key=lambda t: t['ver'])
return tag_entries
@property
def last_tag_entry(self) -> Dict[str, str]:
"""Return the last entry given by ``get_entries``.
:py:meth:`Tag.get_entries`
"""
entries = self.get_entries()
logger.debug('entires: {}'.format(entries))
if (len(entries) > 0):
return entries[-1]
@property
def last_tag(self) -> str:
"""Return the last tag.
"""
entry = self.last_tag_entry
if entry:
return entry['ver'].format(prefix='')
@property
def last_commit(self):
"""Return rhe last commit ID (sha1).
"""
commits = list(self.repo.iter_commits('HEAD'))
if len(commits) > 0:
return commits[0]
@property
def build_info(self) -> Dict[str, Union[str, dict]]:
"""Return information about the last commit and a build time with the current
time.
"""
inf = {'build_date': datetime.now().isoformat()}
last_entry = self.last_tag_entry
if last_entry:
tag = last_entry['tag']
message = None
if hasattr(tag.object, 'message'):
message = tag.object.message
inf.update({'tag': last_entry['ver'].format(prefix=''),
'name': last_entry['name'],
'message': message})
c = self.last_commit
if c:
inf['commit'] = {'author': str(c.author),
'date': c.committed_datetime.isoformat(),
'sha': str(c),
'summary': c.summary}
return inf
def to_json(self, indent: int = 4, writer: TextIOWrapper = sys.stdout) -> str:
"""Return build information in JSON format.
"""
json.dump(self.build_info, writer, indent=4)
def delete_last_tag(self):
"""Delete the last commit tag.
"""
entry = self.last_tag_entry
tag = entry['tag']
name = entry['name']
logger.info('deleting: {}'.format(name))
if not self.dry_run:
TagReference.delete(self.repo, tag)
def recreate_last_tag(self):
"""Delete the last tag and create a new one on the latest commit.
"""
entry = self.last_tag_entry
tag = entry['tag']
name = entry['name']
msg = entry['message']
logger.info('deleting: {}'.format(name))
if not self.dry_run:
TagReference.delete(self.repo, tag)
logger.info('creating {} with commit <{}>'.format(name, msg))
if not self.dry_run:
TagReference.create(self.repo, name, message=msg)
def create(self):
"""Create a new tag on the latest commit.
"""
entry = self.last_tag_entry
if entry is None:
ver = Version.from_string('v0.0.0')
else:
ver = entry['ver']
ver.increment('debug')
new_tag_name = str(ver)
logger.info('creating {} with commit <{}>'.format(
new_tag_name, self.message))
if not self.dry_run:
TagReference.create(self.repo, new_tag_name, message=self.message) | zensols.pybuild | /zensols.pybuild-0.1.0-py3-none-any.whl/zensols/pybuild/tag.py | tag.py |
__author__ = 'Paul Landes'
import re
class Version(object):
"""A container class for a tag version. All tags have an implicit format by
sorting in decimal format (i.e. ``<major>.<minor>.<version>``). This class
contains methods that make it sortable.
"""
def __init__(self, major=0, minor=0, debug=1):
self.major = major
self.minor = minor
self.debug = debug
@classmethod
def from_string(clz, s):
"""Create a version instance from a string formatted version.
:return: a new instance of ``Version``
"""
m = re.search(r'^v?(\d+)\.(\d+)\.(\d+)$', s)
if m is not None:
return Version(int(m.group(1)), int(m.group(2)), int(m.group(3)))
def format(self, prefix='v') -> str:
"""Return a formatted string version of the instance.
"""
return prefix + '{major}.{minor}.{debug}'.format(**self.__dict__)
def increment(self, decimal='debug', inc=1):
"""Increment the version in the instance. By default the debug portion of the
instance is incremented.
"""
if decimal == 'major':
self.major += inc
elif decimal == 'minor':
self.minor += inc
elif decimal == 'debug':
self.debug += inc
else:
raise ValueError('uknown decimal type: {}'.format(decimal))
def __lt__(self, o):
if self.major < o.major:
return True
if self.major > o.major:
return False
if self.minor < o.minor:
return True
if self.minor > o.minor:
return False
if self.debug < o.debug:
return True
if self.debug > o.debug:
return False
# equal
return False
def __le__(self, o):
if self.major <= o.major:
return True
if self.major >= o.major:
return False
if self.minor <= o.minor:
return True
if self.minor >= o.minor:
return False
if self.debug <= o.debug:
return True
if self.debug >= o.debug:
return False
# equal
return False
def __eq__(self, o):
return self.__dict__ == o.__dict__
def __str__(self):
return self.format()
def __repr__(self):
return self.__str__() | zensols.pybuild | /zensols.pybuild-0.1.0-py3-none-any.whl/zensols/pybuild/version.py | version.py |
__author__ = 'Paul Landes'
from pathlib import Path
from zensols.cli import OneConfPerActionOptionsCli
from zensols.pybuild import Tag, SetupUtil
# The version of the applicatin
# *Important*: must also be updated in src/python/setup.py
VERSION = '0.1.0'
class Cli(object):
def __init__(self, setup_path: str = None, output_format: str = None):
self.setup_path = Path(setup_path)
self.output_format = output_format
def write(self):
sutil = SetupUtil.source(start_path=self.setup_path)
if self.output_format == 'json':
sutil.to_json()
else:
sutil.write()
# recommended app command line
class ConfAppCommandLine(OneConfPerActionOptionsCli):
def __init__(self):
repo_dir_op = ['-r', '--repodir', True,
{'dest': 'repo_dir',
'metavar': 'DIRECTORY',
'default': '.',
'help': 'path of the repository'}]
msg_op = ['-m', '--message', True,
{'dest': 'message',
'default': 'none',
'metavar': 'STRING',
'help': 'documentation for the new tag'}]
cnf = {'executors':
[{'name': 'tag',
'executor': lambda params: Tag(**params),
'actions': [{'name': 'last',
'meth': 'print_last_tag',
'doc': 'Print the last tag',
'opts': [repo_dir_op]},
{'name': 'info',
'meth': 'to_json',
'doc': 'give repo version information in JSON',
'opts': [repo_dir_op]},
{'name': 'create',
'doc': 'Create a new tag',
'opts': [repo_dir_op, msg_op]},
{'name': 'del',
'meth': 'delete_last_tag',
'doc': 'Delete the tag',
'opts': [repo_dir_op]},
{'name': 'recreate',
'meth': 'recreate_last_tag',
'opts': [repo_dir_op]}],
'doc': 'Recreate the tag (delete then add)'},
{'name': 'setup',
'executor': lambda params: Cli(**params),
'actions': [{'name': 'write',
'meth': 'write',
'doc': 'print the setup used for setuptools',
'opts': [['-s', '--setupapth', True,
{'metavar': 'DIRECTORY',
'dest': 'setup_path',
'default': '.',
'help': 'the path to the setup directory (setup.py)'}],
['-f', '--format', True,
{'metavar': 'flat|json',
'dest': 'output_format',
'default': 'flat',
'help': 'format used to write the data'}]
]}]}],
'whine': 1}
super(ConfAppCommandLine, self).__init__(cnf, version=VERSION)
def main():
cl = ConfAppCommandLine()
cl.invoke() | zensols.pybuild | /zensols.pybuild-0.1.0-py3-none-any.whl/zensols/pybuild/cli.py | cli.py |
import logging
import os
from zensols.actioncli import Executor
logger = logging.getLogger('zensols.rbak.target')
class Target(object):
"""Represents to where files are backed up. Sometimes this is an external
file system.
"""
def __init__(self, name, executor, config):
self.name = name
self.executor = executor
conf = {'name': name}
conf.update(config.get_options('default'))
conf.update(config.get_options(name))
self.info_file = conf['info_file']
self.mountable = config.get_option_boolean('mountable', name, expect=True)
mp_fmt = config.get_option('path', name, expect=True)
self.path = mp_fmt.format(**conf)
conf['path'] = self.path
self.mount_cmd = conf['mount_cmd'].format(**conf)
self.umount_cmd = conf['umount_cmd'].format(**conf)
self.backup_dir = conf['backup_dir']
@property
def info_path(self):
"""Return the path of the `info.conf` file to determine if this target is
mounted.
"""
return os.path.join(self.path, self.info_file)
@property
def backup_path(self):
"Return the path of where the target directory."
if len(self.backup_dir) > 0:
return os.path.join(self.path, self.backup_dir)
else:
return self.path
@property
def is_mounted(self):
"Return whether or not this target is an external mountable path."
return os.path.isfile(self.info_path)
def _assert_mountable(self):
"Raise an error if this target is mountable."
if not self.mountable:
raise ValueError('target {} is not mountable'.format(self))
def mount(self):
"Mount the target if not already. Raise error if it is not mountable."
self._assert_mountable()
if self.is_mounted:
logger.warning('{} is already mounted'.format(self.path))
else:
logger.info('mounting {}'.format(self))
self.executor.run(self.mount_cmd)
def umount(self):
"Unmount the target if not already. Raise error if it is not mountable."
self._assert_mountable()
if not self.is_mounted:
logger.warning('{} is not mounted'.format(self.path))
else:
logger.info('un-mounting {}'.format(self))
self.executor.run(self.umount_cmd)
def __str__(self):
mnt_str = ', mounted={}'.format(self.is_mounted) if self.mountable else ''
return '{} on {}{}'.format(self.name, self.path, mnt_str)
def __repr__(self):
return self.__str__()
class Source(object):
"""
Represents from where files are backed up.
"""
def __init__(self, name, config):
self.name = name
self.path = config.get_option('path', name, expect=True)
self.basename_dir = config.get_option('basename_dir', name)
@property
def basename(self):
"Return the basename (sans file name) of the source path."
return self.basename_dir or os.path.basename(self.path)
def __str__(self):
return self.path
def __repr__(self):
return self.__str__() | zensols.rbak | /zensols.rbak-0.4-py3-none-any.whl/zensols/rbak/domain.py | domain.py |
import logging
import os
from zensols.actioncli import OneConfPerActionOptionsCli
from zensols.actioncli import Config
from zensols.rbak import Backuper
logger = logging.getLogger('zensols.rbak.cli')
VERSION='0.2'
# recommended app command line
class ConfAppCommandLine(OneConfPerActionOptionsCli):
def __init__(self):
dry_run_op = ['-d', '--dryrun', False,
{'dest': 'dry_run',
'action': 'store_true', 'default': False,
'help': 'dry run to not actually connect, but act like it'}]
sources_op = ['-n', '--sources', False,
{'dest': 'source_names',
'help': 'override the sources property in the config'}]
cnf = {'executors':
[{'name': 'backup',
'executor': lambda params: Backuper(**params),
'actions':[{'name': 'info',
'doc': 'print backup configuration information'},
{'name': 'backup',
'meth': 'sync',
'doc': 'run the backup',
'opts': [dry_run_op, sources_op]},
{'name': 'mount',
'meth': 'mount_all',
'doc': 'mount all targets',
'opts': [dry_run_op]},
{'name': 'umount',
'meth': 'umount_all',
'doc': 'un-mount all targets',
'opts': [dry_run_op]}]}],
# uncomment to add a configparse (ini format) configuration file
'config_option': {'name': 'config',
'opt': ['-c', '--config', False,
{'dest': 'config', 'metavar': 'FILE',
'default': '/etc/rbak.conf',
'help': 'configuration file'}]},
'whine': 1}
super(ConfAppCommandLine, self).__init__(cnf, version=VERSION)
def main():
cl = ConfAppCommandLine()
cl.invoke() | zensols.rbak | /zensols.rbak-0.4-py3-none-any.whl/zensols/rbak/cli.py | cli.py |
import os
import pathlib
import sys
import logging
from zensols.actioncli import Executor
from zensols.rbak import Target, Source
logger = logging.getLogger('zensols.rbak.backup')
class Backuper(object):
"""
This class runs backups from sources to targets.
"""
def __init__(self, config, source_names=None, dry_run=True, executor=None):
self.config = config
self.source_names = source_names
self.executor = executor if executor else Executor(logger, dry_run)
self.backup_cmd = config.get_option('backup_cmd', expect=True)
@property
def dry_run(self):
return self.executor.dry_run
@property
def targets(self):
"Get all configured targets."
if not hasattr(self, '_targets'):
target_names = self.config.get_option_list('targets', expect=True)
self._targets = [Target(l, self.executor, self.config) for l in target_names]
return self._targets
@property
def sources(self):
"Get all configured sources."
if not hasattr(self, '_sources'):
if self.source_names is not None:
source_names = self.source_names.split(' ')
else:
source_names = self.config.get_option_list('sources', expect=True)
self._sources = [Source(l, self.config) for l in source_names]
return self._sources
def info(self):
"Print source and target configuration."
print('sources:')
for targ in self.sources:
print(' {}'.format(targ))
print('targets:')
for targ in self.targets:
print(' {}'.format(targ))
def mount_all(self):
"Mount all mountable targets."
for targ in self.targets:
logger.info('mounting: {}'.format(targ))
try:
targ.mount()
except OSError as e:
logger.error('''can't mount {}--skipping'''.format(targ))
continue
def umount_all(self):
"Umount all mountable targets."
for targ in self.targets:
logger.info('un-mounting: {}'.format(targ))
try:
targ.umount()
except OSError as e:
logger.error('''can't un-mount {}--skipping'''.format(targ))
continue
def sync(self):
"""
Use rsync to backup files.
"""
mounts = []
for targ in self.targets:
if targ.mountable and not targ.is_mounted:
try:
targ.mount()
mounts.append(targ)
except OSError as e:
logger.error('''can't mount {}--skipping'''.format(targ))
continue
if not os.path.isdir(targ.backup_path):
logger.info('creating path: {}'.format(targ.backup_path))
if not self.dry_run:
pathlib.Path(targ.backup_path).mkdir(parents=True, exist_ok=True)
for source in self.sources:
cmd_ctx = {'source': source, 'target': targ}
logger.info('{} -> {}'.format(source.path, targ.backup_path))
cmd = self.backup_cmd.format(**cmd_ctx)
self.executor.run(cmd)
for mnt in mounts:
mnt.umount() | zensols.rbak | /zensols.rbak-0.4-py3-none-any.whl/zensols/rbak/backup.py | backup.py |
__author__ = 'Paul Landes'
from typing import Optional
from dataclasses import dataclass, field
import logging
from zensols.cli import ApplicationError
from . import (
ShowFileError, LocatorType, Extent, Location, Presentation, BrowserManager
)
logger = logging.getLogger(__name__)
@dataclass
class Application(object):
"""Probe screen, open and set the viewing application extends.
"""
browser_manager: BrowserManager = field()
"""Detects and controls the screen."""
width: int = field(default=None)
"""The width to set, or use the configuraiton if not set."""
height: int = field(default=None)
"""The height to set, or use the configuraiton if not set."""
def config(self):
"""Print the display configurations."""
dsps = sorted(self.browser_manager.displays.items(), key=lambda x: x[0])
for n, dsp in dsps:
print(f'{n}:')
dsp.write(1)
def _get_extent(self) -> Optional[Extent]:
extent: Extent
if self.width is None and self.height is None:
extent = None
elif self.width is None or self.height is None:
raise ApplicationError(
'Both width and height are expected when either is given')
else:
extent = Extent(self.width, self.height, 0, 0)
return extent
def show(self, locator: str, locator_type: LocatorType = None,
delimiter: str = ','):
"""Open and display a file with the application's extents set for the
display.
:param locator: the file or URL to display
:param locator_type: specify either a URL or file; determined by default
:param delimiter: the string used to split locator strings
"""
extent: Optional[Extent] = self._get_extent()
pres: Presentation = Presentation.from_str(locator, delimiter, extent)
if locator_type is not None:
loc: Location
for loc in pres.locators:
loc.coerce_type(locator_type)
try:
for loc in pres.locators:
loc.validate()
self.browser_manager.show(pres)
except ShowFileError as e:
raise ApplicationError(str(e)) from e
def __call__(self, *args, **kwargs):
"""See :meth:`show`."""
return self.show(*args, **kwargs) | zensols.showfile | /zensols.showfile-0.3.0-py3-none-any.whl/zensols/showfile/app.py | app.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Sequence, Dict, Union
from dataclasses import dataclass, field
from abc import ABCMeta, abstractmethod
import logging
import platform
from pathlib import Path
from zensols.config import Dictable, ConfigFactory
from zensols.persist import persisted
from . import (
ShowFileError, Size, Extent, Display, LocatorType, Location, Presentation
)
logger = logging.getLogger(__name__)
@dataclass
class Browser(Dictable, metaclass=ABCMeta):
"""An abstract base class for browsers the can visually display files.
"""
@property
@persisted('_screen_size')
def screen_size(self) -> Size:
"""Get the screen size for the current display."""
return self._get_screen_size()
@abstractmethod
def _get_screen_size(self) -> Size:
"""Get the screen size for the current display."""
pass
@abstractmethod
def show(self, presentation: Presentation):
"""Display the content.
:param presentation: the file/PDF (or image) to display
"""
pass
@dataclass
class BrowserManager(object):
"""Manages configured browsers and invoking them to display files and URLs.
It also contains configuration for application extents based configured
displays.
"""
config_factory: ConfigFactory = field()
"""Set by the framework and used to get other configurations."""
browser: Browser = field(default=None)
"""The platform implementation of the file browser."""
display_names: Sequence[str] = field(default_factory=list)
"""The configured display names, used to fetch displays in the
configuration.
"""
def __post_init__(self):
if self.browser is None:
os_name = platform.system().lower()
sec_name = f'{os_name}_browser'
if sec_name not in self.config_factory.config.sections:
sec_name = 'default_browser'
self.browser: Browser = self.config_factory(sec_name)
@property
@persisted('_displays')
def displays(self) -> Dict[str, Size]:
"""The configured displays."""
def map_display(name: str) -> Display:
targ = Extent(**fac(f'{name}_target').asdict())
return Display(**fac(name).asdict() |
{'name': name, 'target': targ})
fac = self.config_factory
return {d.name: d for d in map(map_display, self.display_names)}
def _get_extent(self) -> Extent:
screen: Size = self.browser.screen_size
display: Display = self.displays_by_size.get(screen)
logger.debug(f'detected: {screen} -> {display}')
if display is None:
logger.warning(
f'no display entry for bounds: {screen}--using default')
extent = Extent(
x=0, y=0,
width=screen.width // 2,
height=screen.height)
else:
extent = display.target
return extent
@property
@persisted('_displays_by_size')
def displays_by_size(self) -> Dict[Size, Display]:
"""A dictionary of displays keyed by size."""
return {Size(d.width, d.height): d for d in self.displays.values()}
def locator_to_presentation(self, locator: Union[str, Path, Presentation],
extent: Extent = None) -> Presentation:
"""Create a presentation instance from a string, path, or other
presentation.
:param locator: the PDF (or image) file or URL to display
:param extent: the position and size of the window after browsing
"""
pres: Presentation
if isinstance(locator, (str, Path)):
loc_type: LocatorType = LocatorType.from_type(locator)
loc: Location = Location(source=locator, type=loc_type)
pres = Presentation(locators=(loc,))
elif isinstance(locator, Presentation):
pres = locator
else:
raise ShowFileError(f'Unsupported locator type: {type(locator)}')
pres.extent = self._get_extent() if extent is None else extent
return pres
def show(self, locator: Union[str, Path, Presentation],
extent: Extent = None) -> Presentation:
"""Display ``locator`` content on the screen and optionally resize the
window to ``extent``.
:param locator: the PDF (or image) file or URL to display
:param extent: the position and size of the window after browsing
"""
pres: Presentation = self.locator_to_presentation(locator, extent)
self.browser.show(pres)
return pres
def __call__(self, *args, **kwargs):
"""See :meth:`show`."""
return self.show(*args, **kwargs) | zensols.showfile | /zensols.showfile-0.3.0-py3-none-any.whl/zensols/showfile/browser.py | browser.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Union, Tuple, Any, Set
from dataclasses import dataclass, field
from enum import Enum, auto
import urllib.parse as up
from pathlib import Path
from zensols.util import APIError
from zensols.config import Dictable
from zensols.persist import persisted, PersistedWork
class ShowFileError(APIError):
"""Raised for any :module:`zensols.showfile` API error.
"""
pass
class FileNotFoundError(ShowFileError):
"""Raised when a locator is a file, but the file isn't found."""
def __init__(self, path: Path):
super().__init__(f'File not found: {path}')
self.path = path
class LocatorType(Enum):
"""Identifies a URL or a file name.
"""
file = auto()
url = auto()
@staticmethod
def from_type(instance: Any) -> LocatorType:
type: LocatorType
if isinstance(instance, Path):
type = LocatorType.file
elif isinstance(instance, str):
type = LocatorType.url
else:
raise ShowFileError(f'Unknown type: {type(instance)}')
return type
@staticmethod
def from_str(s: str) -> Tuple[LocatorType, str]:
"""Return whether ``s`` looks like a file or a URL."""
st: LocatorType = None
path: str = None
try:
result: up.ParseResult = up.urlparse(s)
if result.scheme == 'file' and len(result.path) > 0:
st = LocatorType.url
path = result.path
elif all([result.scheme, result.netloc]):
st = LocatorType.url
except Exception:
pass
st = LocatorType.file if st is None else st
return st, path
@dataclass(eq=True, unsafe_hash=True)
class Size(Dictable):
"""A screen size configuration. This is created either for the current
display, or one that's configured.
"""
width: int = field()
height: int = field()
def __str__(self):
return f'{self.width} X {self.height}'
@dataclass(eq=True, unsafe_hash=True)
class Extent(Size):
"""The size (parent class) and the position of the screen.
"""
x: int = field(default=0)
y: int = field(default=0)
@dataclass(eq=True, unsafe_hash=True)
class Display(Size):
"""The screen display.
"""
_DICTABLE_WRITE_EXCLUDES = {'name'}
name: str = field()
"""The name of the display as the section name in the configuration."""
target: Extent = field()
"""The extends of the display or what to use for the Preview app."""
def __str__(self):
return super().__str__() + f' ({self.name})'
@dataclass
class Location(Dictable):
"""Has where to find the data and what it is to view.
"""
source: Union[str, Path] = field()
"""Where to find the data to display."""
type: LocatorType = field(default=None)
"""The type of resource (PDF or URL) to display."""
def __post_init__(self):
self._url = PersistedWork('_url', self)
self._path = PersistedWork('_path', self)
self._file_url_path = None
if self.type is None:
if isinstance(self.source, Path):
self.type = LocatorType.file
self.validate()
else:
self.type, path = LocatorType.from_str(self.source)
if self.type == LocatorType.url and path is not None:
path = Path(path)
self._file_url_path = path
if self.type == LocatorType.file and isinstance(self.source, str):
self.source = Path(self.source)
def validate(self):
if self.type == LocatorType.file or self.is_file_url:
path: Path = self.path
if not path.is_file():
raise FileNotFoundError(path)
@property
def is_file_url(self) -> bool:
return self._file_url_path is not None
@property
@persisted('_url')
def url(self) -> str:
url: str = self.source
if isinstance(self.source, Path):
url = f'file://{self.source.absolute()}'
return url
@property
@persisted('_path')
def path(self) -> Path:
if isinstance(self.source, Path):
return self.source
else:
if self._file_url_path is None:
raise ShowFileError(f'Not a path or URL path: {self.source}')
return self._file_url_path
def coerce_type(self, locator_type: LocatorType):
if locator_type != self.type:
if locator_type == LocatorType.file:
type, path = LocatorType.from_str(self.source)
if path is not None:
self.type = LocatorType.file
self.source = Path(path)
else:
self.source = self.url
self.type = LocatorType.url
self._file_url_path = None
elif locator_type == LocatorType.url and self.is_file_url:
self._file_url_path = None
self._url.clear()
self._url.clear()
@dataclass
class Presentation(Dictable):
"""Contains all the data to view all at once and where on the screen to
display it.
"""
locators: Tuple[Location] = field()
"""The locations of the content to display"""
extent: Extent = field(default=None)
"""Where to display the content."""
@staticmethod
def from_str(locator_defs: str, delimiter: str = ',',
extent: Extent = None) -> Presentation:
locs: Tuple[Location] = tuple(
map(Location, locator_defs.split(delimiter)))
return Presentation(locs, extent)
@property
@persisted('_loctypes')
def locator_type_set(self) -> Set[LocatorType]:
return frozenset(map(lambda loc: loc.type, self.locators)) | zensols.showfile | /zensols.showfile-0.3.0-py3-none-any.whl/zensols/showfile/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Dict, Sequence, Set, Tuple, Union
from dataclasses import dataclass, field
from enum import Enum, auto
import logging
import textwrap
import re
from pathlib import Path
import applescript as aps
from applescript._result import Result
from zensols.config import ConfigFactory
from . import (
ShowFileError, LocatorType, Size, Extent, Location, Presentation, Browser
)
logger = logging.getLogger(__name__)
class ApplescriptError(ShowFileError):
"""Raised for macOS errors.
"""
pass
class ErrorType(Enum):
"""Types of errors raised by :class:`.ApplescriptError`.
"""
ignore = auto()
warning = auto()
error = auto()
@dataclass
class DarwinBrowser(Browser):
config_factory: ConfigFactory = field()
"""The configuration factory used to create a default :class:`.Browser`
instance for URL viewing.
"""
script_paths: Dict[str, Path] = field()
"""The applescript file paths used for managing show apps (``Preview.app``
and ``Safari.app``).
"""
web_extensions: Set[str] = field()
"""Extensions that indicate to use Safari.app rather than Preview.app."""
applescript_warns: Dict[str, str] = field()
"""A set of string warning messages to log instead raise as an
:class:`.ApplicationError`.
"""
update_page: Union[bool, int] = field(default=False)
"""How to update the page in Preview.app after the window displays. If
``True``, then record page before refresh, then go to the page after
rendered. This is helpful when the PDF has changed and preview goes back to
the first page. If this is a number, then go to that page number in
Preview.app.
"""
switch_back_app: str = field(default=None)
"""The application to activate (focus) after the resize is complete."""
mangle_url: bool = field(default=False)
"""Whether to add ending ``/`` neede by Safari on macOS."""
def _get_error_type(self, res: Result) -> ErrorType:
err: str = res.err
for warn, error_type in self.applescript_warns.items():
if err.find(warn) > -1:
return ErrorType[error_type]
return ErrorType.error
def _exec(self, cmd: str, app: str = None) -> str:
ret: aps.Result
if app is None:
ret = aps.run(cmd)
else:
ret = aps.tell.app(app, cmd)
if ret.code != 0:
err_type: ErrorType = self._get_error_type(ret)
cmd_str: str = textwrap.shorten(cmd, 40)
msg: str = f'Could not invoke <{cmd_str}>: {ret.err} ({ret.code})'
if err_type == ErrorType.warning:
logger.warning(msg)
elif err_type == ErrorType.error:
raise ApplescriptError(msg)
elif logger.isEnabledFor(logging.DEBUG):
logger.debug(f'script output: <{ret.err}>')
return ret.out
def get_show_script(self, name: str) -> str:
"""The applescript content used for managing app ``name``."""
with open(self.script_paths[name]) as f:
return f.read()
def _invoke_open_script(self, name: str, arg: str, extent: Extent,
func: str = None, add_quotes: bool = True):
"""Invoke applescript.
:param name: the key of the script in :obj:`script_paths`
:param arg: the first argument to pass to the applescript (URL or file
name)
:param exent: the bounds to set on the raised window
"""
show_script: str = self.get_show_script(name)
qstr: str = '"' if add_quotes else ''
update_page: str
page_num: str = 'null'
if isinstance(self.update_page, bool):
update_page = str(self.update_page).lower()
page_num = 'null'
else:
update_page = 'true'
page_num = str(self.update_page)
func = f'show{name.capitalize()}' if func is None else func
fn = (f'{func}({qstr}{arg}{qstr}, {extent.x}, {extent.y}, ' +
f'{extent.width}, {extent.height}, {update_page}, {page_num})')
cmd = (show_script + '\n' + fn)
if logger.isEnabledFor(logging.DEBUG):
path: Path = self.script_paths[name]
logger.debug(f'invoking "{fn}" from {path}')
self._exec(cmd)
self._switch_back()
def _switch_back(self):
"""Optionally active an application after running the show-script, which
is usually the previous running application.
"""
if self.switch_back_app is not None:
self._exec(f'tell application "{self.switch_back_app}" to activate')
def _get_screen_size(self) -> Size:
bstr: str = self._exec('bounds of window of desktop', 'Finder')
bounds: Sequence[int] = tuple(map(int, re.split(r'\s*,\s*', bstr)))
width, height = bounds[2:]
return Size(width, height)
def _safari_compliant_url(self, url: str) -> str:
if self.mangle_url and not url.endswith('/'):
url = url + '/'
return url
def _show_file(self, path: Path, extent: Extent):
self._invoke_open_script('preview', str(path.absolute()), extent)
def _show_url(self, url: str, extent: Extent):
url = self._safari_compliant_url(url)
self._invoke_open_script('safari', url, extent)
def _show_urls(self, urls: Tuple[str], extent: Extent):
def map_url(url: str) -> str:
url = self._safari_compliant_url(url)
return f'"{url}"'
url_str: str = ','.join(map(map_url, urls))
url_str = "{" + url_str + "}"
self._invoke_open_script(
name='safari-multi',
arg=url_str,
func='showSafariMulti',
extent=extent,
add_quotes=False)
def show(self, presentation: Presentation):
def map_loc(loc: Location) -> Location:
if loc.is_file_url or loc.type == LocatorType.file:
path: Path = loc.path
if path.suffix[1:] in self.web_extensions:
loc.coerce_type(LocatorType.url)
return loc
extent: Extent = presentation.extent
urls: Tuple[str] = None
locs: Tuple[Location] = tuple(map(map_loc, presentation.locators))
if len(locs) > 1:
loc_set: Set[LocatorType] = set(map(lambda lc: lc.type, locs))
if len(loc_set) != 1 or next(iter(loc_set)) != LocatorType.file:
urls = tuple(map(lambda loc: loc.url, locs))
if urls is not None:
self._show_urls(urls, extent)
else:
loc: Location
for loc in presentation.locators:
if loc.type == LocatorType.file:
self._show_file(loc.path, extent)
else:
if loc.is_file_url:
self._show_file(loc.path, extent)
else:
self._show_url(loc.url, extent) | zensols.showfile | /zensols.showfile-0.3.0-py3-none-any.whl/zensols/showfile/darwin.py | darwin.py |
__author__ = 'Paul Landes'
from typing import Callable, List, Any, Dict, Tuple, ClassVar
from dataclasses import dataclass, field
from enum import Enum, auto
import sys
import logging
from pathlib import Path
import json
import yaml
from zensols.util import stdout
from zensols.cli import ApplicationError
from zensols.introspect import IntegerSelection
from zensols.nlp import FeatureDocument, FeatureDocumentParser
from zensols.datdesc import HyperparamModel
from . import Match, MatchResult, Matcher
logger = logging.getLogger(__name__)
class OutputFormat(Enum):
text = auto()
sphinx = auto()
json = auto()
yaml = auto()
@dataclass
class Application(object):
"""An API to match spans of semantically similar text across documents.
"""
doc_parser: FeatureDocumentParser = field()
"""The feature document that normalizes (whitespace) parsed documents."""
matcher: Matcher = field()
"""Used to match spans of text."""
def _read_and_parser_file(self, path: Path) -> FeatureDocument:
with open(path) as f:
content: str = f.read()
return self.doc_parser(content)
def match(self, source_file: Path, target_file: Path,
output_format: OutputFormat = OutputFormat.text,
selection: IntegerSelection = IntegerSelection('0'),
output_file: Path = Path('-'),
detail: bool = False):
"""Match spans across two text files.
:param source_file: the source match file
:param target_file: the target match file
:param output_format: the format to write the hyperparemters
:param selection: the matches to output
:param output_file: the output file or ``-`` for standard out
:param detail: whether to output more information
"""
source: FeatureDocument = self._read_and_parser_file(source_file)
target: FeatureDocument = self._read_and_parser_file(target_file)
res: MatchResult = self.matcher(source, target)
matches: List[Match] = selection(res.matches)
line: str = (('_' * 79) + '\n')
with stdout(output_file) as f:
if output_format == OutputFormat.text:
match: Match
for i, match in enumerate(matches):
if detail:
f.write(f'<{match.source_span.norm}> -> ' +
f'<{match.target_span.norm}>\n')
match.write(depth=1, writer=f)
else:
if i > 0:
f.write(line)
f.write(f'source: <{match.source_span.norm}>\n')
f.write(f'target: <{match.target_span.norm}>\n')
elif output_format in {OutputFormat.json, OutputFormat.yaml}:
mdcts: Tuple[Dict[str, Any]] = list(
map(lambda m: m.asflatdict(include_norm=detail), matches))
if OutputFormat.json == output_format:
json.dump({'matches': mdcts}, f, indent=4)
else:
yaml.dump({'matches': list(map(dict, mdcts))}, f)
else:
raise ApplicationError(
f'Unsupported format: {output_format.name}')
def write_hyperparam(self, output_format: OutputFormat = OutputFormat.text):
"""Write the matcher's hyperparameter documentation.
:param output_format: the format to write the hyperparemters
"""
hyp: HyperparamModel = self.matcher.hyp
fn: Callable = {
OutputFormat.text: lambda: hyp.write(include_doc=True),
OutputFormat.sphinx: lambda: hyp.write_sphinx(),
OutputFormat.json: lambda: hyp.asjson(writer=sys.stdout, indent=4),
OutputFormat.yaml: lambda: hyp.asyaml(writer=sys.stdout),
}[output_format]
fn()
@dataclass
class ProtoApplication(object):
CLI_META: ClassVar[Dict[str, Any]] = {'is_usage_visible': False}
app: Application = field()
def _match(self):
self.app.match(
Path('test-resources/source.txt'),
Path('test-resources/summary.txt'),
output_format=OutputFormat.text,
selection=IntegerSelection('2:4'))
def proto(self):
"""Used for REPL prototyping."""
self._match() | zensols.spanmatch | /zensols.spanmatch-0.0.1-py3-none-any.whl/zensols/spanmatch/app.py | app.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import List, Dict, ClassVar, Set, Tuple, Any
from dataclasses import dataclass, field
import logging
from collections import OrderedDict
import sys
from io import TextIOBase
import numpy as np
from torch import Tensor
from zensols.util import APIError
from zensols.config import Dictable
from zensols.persist import persisted
from zensols.nlp import FeatureToken, LexicalSpan, FeatureSpan, FeatureDocument
logger = logging.getLogger(__name__)
class DocumentMatchError(APIError):
"""Thrown for any document matching errors."""
pass
@dataclass(eq=False)
class TokenPoint(object):
"""A token and its position in the document and in embedded space.
"""
_CASED: ClassVar[str] = True
"""Whether to treat tokens as case sensitive."""
token: FeatureToken = field()
"""The token used in document :obj:`doc` used for clustering."""
doc: FeatureDocument = field()
"""The document that contains :obj:`token`."""
def __post_init__(self):
self._hash = hash((self.token, self.doc))
@property
def key(self) -> str:
"""The key used by :class:`.Matcher` used to index :class:`.WordFlow`s.
"""
key = self.token.lemma_
if not self._CASED:
key = key.lower()
return key
@property
@persisted('_embedding')
def embedding(self) -> np.ndarray:
"""The token embedding."""
if not hasattr(self.token, 'embedding'):
raise DocumentMatchError(f'Missing embedding: {self.token}')
tensor: Tensor = self.token.embedding
arr: np.ndarray = tensor.cpu().detach().numpy()
return np.expand_dims(arr.mean(axis=0), 0)
@property
def position(self) -> float:
"""The position of the token in the document."""
return self.token.i / self.doc.token_len
def __eq__(self, other: TokenPoint) -> bool:
return self._hash == other._hash and self.token == other.token
def __hash__(self) -> int:
return self._hash
def __str__(self) -> str:
return f'{self.token.norm}:{self.token.i}'
def __repr__(self) -> str:
return self.__str__()
@dataclass(order=True)
class WordFlow(Dictable):
"""The flow of a word between two documents.
"""
value: float = field()
"""The value of flow."""
source_key: str = field()
"""The :obj:`.TokenPoint.key`. of the originating document."""
target_key: str = field()
"""The :obj:`.TokenPoint.key`. of the target document."""
source_tokens: Tuple[TokenPoint] = field(repr=False)
"""The originating tokens that map from :obj:`source_key`."""
target_tokens: Tuple[TokenPoint] = field(repr=False)
"""The target tokens that map from :obj:`target_key`."""
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_tokens: bool = False):
self._write_line(str(self), depth, writer)
if include_tokens:
params: Dict[str, Any] = dict(
depth=depth + 2,
writer=writer,
include_type=False,
feature_ids={'norm', 'i', 'sent_i', 'idx'},
inline=True)
self._write_line('source:', depth + 1, writer)
for tok in self.source_tokens:
tok.token.write_attributes(**params)
self._write_line('target:', depth + 1, writer)
for tok in self.target_tokens:
tok.token.write_attributes(**params)
def __str__(self) -> str:
return f'{self.source_key} -> {self.target_key}: {self.value:.3f}'
@dataclass(order=False)
class Match(Dictable):
"""A span of matching text between two documents.
"""
source_tokens: Set[TokenPoint] = field(default_factory=set)
"""The originating tokens from the document."""
target_tokens: Set[TokenPoint] = field(default_factory=set)
"""The target tokens from the document"""
flow_values: List[float] = field(default_factory=list)
"""The values of each word flow."""
def __post_init__(self):
self._hash = hash((tuple(sorted(self.source_tokens)),
tuple(sorted(self.target_tokens)),
tuple(self.flow_values)))
@property
def total_flow_value(self) -> float:
"""The sum of the :obj:`flow_values`."""
return sum(self.flow_values)
@property
def source_document(self) -> FeatureDocument:
"""The originating document."""
return next(iter(self.source_tokens)).doc
@property
def target_document(self) -> FeatureDocument:
"""The target document."""
return next(iter(self.target_tokens)).doc
def _get_lexspan(self, tokens: Set[TokenPoint], doc: FeatureDocument) -> \
FeatureSpan:
return LexicalSpan.widen(map(lambda t: t.token.lexspan, tokens))
@property
def source_lexspan(self) -> LexicalSpan:
"""The originating document's lexical span."""
return self._get_lexspan(self.source_tokens, self.source_document)
@property
def target_lexspan(self) -> LexicalSpan:
"""The target document's lexical span."""
return self._get_lexspan(self.target_tokens, self.target_document)
@property
def source_span(self) -> FeatureSpan:
"""The originating document's span."""
return self.source_document.get_overlapping_span(
self.source_lexspan, inclusive=False).to_sentence()
@property
def target_span(self) -> FeatureSpan:
"""The target document's span."""
return self.target_document.get_overlapping_span(
self.target_lexspan, inclusive=False).to_sentence()
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_tokens: bool = False,
include_flow: bool = True,
char_limit: int = sys.maxsize):
if include_flow:
self._write_line(f'flow: {self.total_flow_value}', depth, writer)
if include_tokens:
self._write_line('source:', depth, writer)
for tok in self.source_tokens:
self._write_line(tok, depth + 1, writer)
self._write_line('target:', depth, writer)
for tok in self.target_tokens:
self._write_line(tok, depth + 1, writer)
else:
self._write_line(f'source {self.source_lexspan}:',
depth, writer)
self.source_span.write_text(depth + 1, writer, limit=char_limit)
self._write_line(f'target {self.target_lexspan}:',
depth, writer)
self.target_span.write_text(depth + 1, writer, limit=char_limit)
def asflatdict(self, *args,
include_norm: bool = False,
include_text: bool = False,
**kwargs):
dct = OrderedDict(
[['flow', float(self.total_flow_value)],
['source_span', self.source_lexspan.asflatdict()],
['target_span', self.target_lexspan.asflatdict()]])
if include_norm:
dct['source_norm'] = self.source_span.norm
dct['target_norm'] = self.target_span.norm
if include_text:
dct['source_text'] = self.source_span.text
dct['target_text'] = self.target_span.text
return dct
def to_str(self, tokens: bool = False, spans: bool = True,
flow: bool = True) -> str:
s: str
if tokens:
s = f'{self.source_tokens} -> {self.target_tokens}'
else:
if spans:
s = (f'{self.source_span}{self.source_lexspan} -> ' +
f'{self.target_span}{self.target_lexspan}')
else:
s = f'{self.source_span} -> {self.target_span}'
if flow:
s = s + f', flow={self.total_flow_value:.3e}'
return s
def __hash__(self) -> int:
return self._hash
def __eq__(self, other: Match) -> bool:
return self.flow_values == other.flow_values and \
self.source_tokens == other.source_tokens and \
self.target_tokens == other.target_tokens
def __lt__(self, other: Match) -> bool:
return self.source_lexspan < other.source_lexspan
def __str__(self) -> str:
return self.to_str()
def __repr__(self) -> str:
return self.__str__()
@dataclass
class MatchResult(Dictable):
"""Contains the lexical text match pairs from the first to the second
document given by :meth:`.Matcher.match`.
"""
_DICTABLE_WRITE_EXCLUDES: ClassVar[Set[str]] = set(
'keys cost dist'.split())
_DICTABLE_ATTRIBUTES: ClassVar[Set[str]] = set('flows'.split())
keys: List[str] = field(repr=False)
"""The :obj:`.TokenPoint.key`s to tokens used to normalize document
frequencies in the nBOW.
"""
source_points: List[TokenPoint] = field(repr=False)
"""The first document's token points."""
target_points: List[TokenPoint] = field(repr=False)
"""The second document's token points."""
source_tokens: Dict[str, List[TokenPoint]] = field(repr=False)
"""The first document's token points indexed by the :obj:`.TokenPoint.key`.
"""
target_tokens: Dict[str, List[TokenPoint]] = field(repr=False)
"""The first document's token points indexed by the :obj:`.TokenPoint.key`.
"""
cost: np.ndarray = field(repr=False)
"""The earth mover distance solution, which is the cost of transportation
from first to the second document.
"""
dist: np.ndarray = field(repr=False)
"""The distance matrix of all token's in the embedded space."""
matches: Tuple[Match] = field(default=None)
"""The matching passages between the documents."""
@property
@persisted('_transit', transient=True)
def transit(self) -> np.ndarray:
return self.cost * self.dist
@property
@persisted('_flows', transient=True)
def flows(self) -> Tuple[WordFlow]:
"""The Word Mover positional flows."""
trans: np.ndarray = self.transit
paths: np.ndarray = np.nonzero(trans)
wflows: List[WordFlow] = []
for r, c in zip(paths[0], paths[1]):
fr: str = self.keys[r]
to: str = self.keys[c]
wflows.append(WordFlow(
source_key=fr,
target_key=to,
source_tokens=self.source_tokens[fr],
target_tokens=self.target_tokens[to],
value=trans[r, c]))
wflows.sort(reverse=True)
return tuple(wflows)
@property
@persisted('_mapping', transient=True)
def mapping(self) -> Tuple[WordFlow]:
"""Like :obj:`flows` but do not duplicate sources"""
srcs: Set[str] = set()
flows: List[WordFlow] = []
flow: WordFlow
for flow in self.flows:
if flow.source_key not in srcs:
flows.append(flow)
srcs.add(flow.source_key)
return tuple(flows)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_source: bool = True, include_target: bool = True,
include_tokens: bool = False, include_mapping: bool = True,
match_detail: bool = False):
flow_val: float = sum(map(lambda f: f.value, self.flows))
mapping: float = sum(map(lambda f: f.value, self.mapping))
if include_source:
self._write_line('source:', depth, writer)
self.source_points[0].doc.write_text(depth + 1, writer)
if include_target:
self._write_line('target:', depth, writer)
self.target_points[0].doc.write_text(depth + 1, writer)
self._write_line('flow:', depth, writer)
self._write_line(f'mapped: {mapping}', depth + 1, writer)
self._write_line(f'total: {flow_val}', depth + 1, writer)
if include_mapping:
self._write_line('mapping:', depth, writer)
flow: WordFlow
for flow in self.mapping:
flow.write(depth + 1, writer, include_tokens=include_tokens)
self._write_line('matches:', depth, writer)
match: Match
for i, match in enumerate(self.matches):
if match_detail:
self._write_line(f'{i}:', depth + 1, writer)
match.write(depth + 2, writer)
else:
self._write_line(match, depth + 1, writer) | zensols.spanmatch | /zensols.spanmatch-0.0.1-py3-none-any.whl/zensols/spanmatch/domain.py | domain.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import List, Dict, Type, Tuple, Iterable, Set
from dataclasses import dataclass, field
import logging
import collections
import numpy as np
from sklearn.cluster import AgglomerativeClustering
import ot
from scipy.spatial import distance
from zensols.nlp import FeatureToken, FeatureDocument
from zensols.datdesc import HyperparamModel
from . import DocumentMatchError, TokenPoint, MatchResult, Match
logger = logging.getLogger(__name__)
@dataclass(order=True, frozen=True)
class _BiMatch(object):
flow: float
forward: Match
reverse: Match
@dataclass
class Matcher(object):
"""Creates matching spans of text between two documents by first using the
word mover algorithm and then clustering by tokens' positions in their
respective documents.
"""
dtype: Type = field(default=np.float64)
"""The floating point type used for word mover and clustering."""
hyp: HyperparamModel = field(default=None)
"""The model's hyperparameters.
Hyperparameters::
:param cased: whether or not to treat text as cased
:type cased: bool
:param distance_metric: the default distance metric for
calculating the distance from each
embedded :class:`.tokenpoint`. :see:
:function:`scipy.spatial.distance.cdist`
:type distance_metric: str; one of: descendant, ancestor, all, euclidean
:param bidirect_match: whether to order matches by a bidirectional
flow
:type bidirect_match: str; one of: none, norm, sum
:param source_distance_threshold: the source document clustering
threshold distance
:type source_distance_threshold: float
:param target_distance_threshold: the target document clustering
threshold distance
:type target_distance_threshold: float
:param source_position_scale: used to scale the source document
positional embedding component
:type source_position_scale: float
:param target_position_scale: used to scale the target document
positional embedding component
:type target_position_scale: float
:param min_flow_value: the minimum match flow; any matches that
fall below this value are filtered
:type min_flow_value: float
:param min_source_token_span: the minimum source span length in
tokens to be considered for matchs
:type min_source_token_span: int
:param min_target_token_span: the minimum target span length in
tokens to be considered for matchs
:type min_target_token_span: int
"""
def __post_init__(self):
TokenPoint._CASED = self.hyp.cased
def _nbow(self, doc: FeatureDocument) -> \
Tuple[List[TokenPoint], Dict[str, List[TokenPoint]]]:
"""Create the nBOW (bag of words) used for document frequencies."""
def filter_toks(t: FeatureToken) -> bool:
return not t.is_stop and not t.is_punctuation and not t.is_space
toks: List[TokenPoint] = []
by_key: Dict[str, List[TokenPoint]] = collections.defaultdict(list)
ftoks: Tuple[FeatureToken] = tuple(
filter(filter_toks, doc.token_iter()))
if len(ftoks) == 0:
ftoks = doc.tokens
for tok in ftoks:
tp = TokenPoint(tok, doc)
tp_key: str = tp.key
toks.append(tp)
by_key[tp_key].append(tp)
return toks, dict(by_key)
@staticmethod
def _tok_agg(toks: List[TokenPoint], lix: int, dist: List[np.ndarray],
distix: List[int]):
"""Aggregate tokens' embeddings by taking the mean."""
if toks is not None:
emb: np.ndarray = np.concatenate(
tuple(map(lambda t: t.embedding, toks)))
emb: np.ndarray = np.mean(emb, axis=0)
dist.append(emb)
distix.append(lix)
def _wmd(self, a: FeatureDocument, b: FeatureDocument) -> MatchResult:
"""Use the word mover algorithm to create a token to token matches.
:param a: the source document
:param b: the target document
"""
aps: Tuple[List[TokenPoint]]
bps: Tuple[List[TokenPoint]]
atoks: Dict[str, List[TokenPoint]]
btoks: Dict[str, List[TokenPoint]]
aps, atoks = self._nbow(a)
bps, btoks = self._nbow(b)
tp_keys: List[str] = sorted(set(atoks.keys()) | set(btoks.keys()))
n_words: int = len(tp_keys)
hist = np.zeros((2, n_words), dtype=self.dtype)
adist: List[np.ndarray] = []
adistix: List[int] = []
bdist: List[np.ndarray] = []
bdistix: List[int] = []
for lix, tp_key in enumerate(tp_keys):
ats: List[TokenPoint] = atoks.get(tp_key)
bts: List[TokenPoint] = btoks.get(tp_key)
self._tok_agg(ats, lix, adist, adistix)
self._tok_agg(bts, lix, bdist, bdistix)
if ats is not None:
hist[0, lix] = len(ats)
if bts is not None:
hist[1, lix] = len(bts)
adist: np.ndarray = np.stack(adist)
adist = adist / np.linalg.norm(adist, axis=1, keepdims=True)
bdist: np.ndarray = np.stack(bdist)
bdist = bdist / np.linalg.norm(bdist, axis=1, keepdims=True)
dist_mat = distance.cdist(adist, bdist, metric=self.hyp.distance_metric)
dist_arr: np.ndarray = np.zeros((n_words, n_words), dtype=self.dtype)
dist_arr[np.ix_(adistix, bdistix)] = dist_mat
if logger.isEnabledFor(logging.DEBUG):
cnthist = hist.copy()
hist[0] = hist[0] / hist[0].sum()
hist[1] = hist[1] / hist[1].sum()
if logger.isEnabledFor(logging.DEBUG):
for i in range(hist.shape[1]):
logger.debug(
f'{tp_keys[i]}: a={int(cnthist[0][i])}/{hist[0][i]}, ' +
f'b={int(cnthist[1][i])}/{hist[1][i]}')
cost: np.ndarray = ot.emd(hist[0], hist[1], dist_arr)
return MatchResult(
source_points=aps,
target_points=bps,
keys=tp_keys,
source_tokens=atoks,
target_tokens=btoks,
cost=cost,
dist=dist_arr)
def _pos_cluster(self, points: List[TokenPoint], distance_threshold: float,
position_scale: float) -> Dict[TokenPoint, int]:
"""Cluster a document (the source or target document) token embeddings
using their positions.
:param points: the token embeddings
:return: the mapping from each point to their cluster
"""
if len(points) == 1:
return {points[0]: 0}
model = AgglomerativeClustering(
n_clusters=None,
distance_threshold=distance_threshold)
emb: np.ndarray = np.concatenate(
tuple(map(lambda p: p.embedding, points)))
# normalize the embeddings to unit length
emb = emb / np.linalg.norm(emb, axis=1, keepdims=True)
# add the position dimension, ascale it, and concatenate to the word
# embeddings
pos: np.ndarray = np.array(tuple(map(lambda p: p.position, points)))
# scale with the hyperparameter
pos = pos * position_scale
emb = np.concatenate((emb, np.expand_dims(pos, axis=1)), axis=1)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'embedding shape: {emb.shape}, points: {len(points)}')
# cluster the tokens across the embedded space and their doc position
model.fit(emb)
by_cluster: Dict[TokenPoint, int] = {}
pix: int
cid: int
for pix, cid in enumerate(model.labels_):
tp: TokenPoint = points[pix]
by_cluster[tp] = cid
return by_cluster
def _cluster_by_position(self, res: MatchResult, fwd: bool) -> Tuple[Match]:
"""Cluster points using their posisiton in the document.
:return: the matched document spans from the source to the target
document
"""
def filter_matches(match: Match) -> bool:
return len(match.source_tokens) >= min_src_ts and \
len(match.target_tokens) >= min_targ_ts and \
match.total_flow_value > h.min_flow_value
h: HyperparamModel = self.hyp
min_src_ts: int = h.min_source_token_span \
if fwd else h.min_target_token_span
min_targ_ts: int = h.min_target_token_span \
if fwd else h.min_source_token_span
aclusts: Dict[TokenPoint, int] = self._pos_cluster(
res.source_points,
h.source_distance_threshold if fwd else h.target_distance_threshold,
h.source_position_scale if fwd else h.target_position_scale)
bclusts: Dict[TokenPoint, int] = self._pos_cluster(
res.target_points,
h.target_distance_threshold if fwd else h.source_distance_threshold,
h.target_position_scale if fwd else h.source_position_scale)
clusts: Dict[Tuple[int, int], Match] = collections.defaultdict(Match)
for flow in res.mapping:
ap: TokenPoint
for ap in flow.source_tokens:
aclust: int = aclusts[ap]
bp: TokenPoint
for bp in flow.target_tokens:
bclust: int = bclusts[bp]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f' {ap}({aclust}) -> {bp} {bclust}')
match: Match = clusts[(aclust, bclust)]
match.source_tokens.add(ap)
match.target_tokens.add(bp)
match.flow_values.append(flow.value)
matches: Iterable[Match] = filter(filter_matches, clusts.values())
return tuple(sorted(matches))
def _reorder_bimatch(self, forward_res: MatchResult,
reverse_res: MatchResult) -> Tuple[Match]:
def filter_match(m: Match) -> bool:
if m in seen:
return False
seen.add(m)
return True
seen: Set[Match] = set()
bd_match: str = self.hyp.bidirect_match
bims: List[_BiMatch] = []
forward_flows: np.ndarray = np.array(tuple(map(
lambda m: m.total_flow_value, forward_res.matches)))
reverse_flows: np.ndarray = np.array(tuple(map(
lambda m: m.total_flow_value, reverse_res.matches)))
if bd_match == 'norm':
forward_flows = forward_flows / forward_flows.sum()
reverse_flows = reverse_flows / reverse_flows.sum()
elif bd_match == 'sum':
pass
else:
raise DocumentMatchError(
'Unknown bidirection match type: {bd_match}')
for fix, fwd in enumerate(forward_res.matches):
for rix, rev in enumerate(reverse_res.matches):
if fwd.source_lexspan.overlaps_with(rev.target_lexspan) and \
fwd.target_lexspan.overlaps_with(rev.source_lexspan) or True:
ff: float = forward_flows[fix]
rf: float = reverse_flows[rix]
bims.append(_BiMatch(
flow=ff + rf,
forward=fwd,
reverse=rev))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{len(bims)} bidirectional matches found')
if len(bims) > 0:
bims.sort(reverse=True)
return tuple(filter(filter_match, map(lambda bm: bm.forward, bims)))
def match(self, source_doc: FeatureDocument,
target_doc: FeatureDocument) -> MatchResult:
"""Match lexical spans of text from one document to the other.
:param source_doc: the source document from where words flow
:param target_doc: the target document to where words flow
:return: the matched document spans from the source to the target
document
"""
res: MatchResult = self._wmd(source_doc, target_doc)
res.matches = self._cluster_by_position(res, True)
if self.hyp.bidirect_match != 'none':
rr_res: MatchResult = self._wmd(target_doc, source_doc)
rr_res.matches = self._cluster_by_position(rr_res, False)
res.matches = self._reorder_bimatch(res, rr_res)
else:
# sorted by lexical source by default
res.matches = sorted(
res.matches,
key=lambda m: m.total_flow_value,
reverse=True)
if logger.isEnabledFor(logging.INFO):
logger.info(f'{len(res.matches)} matches found')
return res
def __call__(self, source_doc: FeatureDocument,
target_doc: FeatureDocument) -> MatchResult:
"""See :meth:`match`."""
return self.match(source_doc, target_doc) | zensols.spanmatch | /zensols.spanmatch-0.0.1-py3-none-any.whl/zensols/spanmatch/match.py | match.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Any, Type, Optional, Tuple, Dict
from abc import ABC, abstractmethod
from enum import Enum
import logging
import inspect
import copy as cp
from pathlib import Path
import textwrap
from time import time
from zensols.util import APIError
from zensols.introspect import (
ClassImporter, ClassResolver, DictionaryClassResolver
)
from zensols.config import Configurable
logger = logging.getLogger(__name__)
class FactoryError(APIError):
"""Raised when an object can not be instantianted by a :class:`.ConfigFactory`.
"""
def __init__(self, msg: str, factory: ConfigFactory = None):
if factory is not None:
config = factory.config
if config is not None and hasattr(config, 'config_file') and \
isinstance(config.config_file, (str, Path)):
cf = config.config_file
if isinstance(cf, Path):
cf = cf.absolute()
msg += f', in file: {cf}'
super().__init__(msg)
class FactoryState(Enum):
"""The state updated from an instance of :class:`.ConfigFactory`. Currently
the only state is that an object has finished being created.
Future states might inlude when a :class:`.ImportConfigFactory` has created
all objects from a configuration shared session.
"""
CREATED = 1
class FactoryStateObserver(ABC):
"""An interface that recieves notifications that the factory has created this
instance. This is useful for classes such as :class:`.Writeback`.
:see: :class:`.Writeback`
"""
@abstractmethod
def _notify_state(self, state: FactoryState):
pass
class FactoryClassImporter(ClassImporter):
"""Just like the super class, but if instances of type
:class:`.FactoryStateObserver` are notified with a
:class:`.FactoryState.CREATED`.
"""
def _bless(self, inst: Any) -> Any:
if isinstance(inst, FactoryStateObserver):
inst._notify_state(FactoryState.CREATED)
return super()._bless(inst)
class ImportClassResolver(ClassResolver):
"""Resolve a class name from a list of registered class names without the
module part. This is used with the ``register`` method on
:class:`.ConfigFactory`.
:see: :meth:`.ConfigFactory.register`
"""
def __init__(self, reload: bool = False):
self.reload = reload
def create_class_importer(self, class_name: str):
return FactoryClassImporter(class_name, reload=self.reload)
def find_class(self, class_name: str):
class_importer = self.create_class_importer(class_name)
return class_importer.get_module_class()[1]
class ConfigFactory(object):
"""Creates new instances of classes and configures them given data in a
configuration :class:`.Configurable` instance.
"""
NAME_ATTRIBUTE = 'name'
"""The *name* of the parameter given to ``__init__``. If a parameter of this
name is on the instance being created it will be set from the name of the
section.
"""
CONFIG_ATTRIBUTE = 'config'
"""The *configuration* of the parameter given to ``__init__``. If a parameter
of this name is on the instance being created it will be set as the
instance of the configuration given to the initializer of this factory
instance.
"""
CONFIG_FACTORY_ATTRIBUTE = 'config_factory'
"""The *configuration factory* of the parameter given to ``__init__``. If a
parameter of this name is on the instance being created it will be set as
the instance of this configuration factory.
"""
CLASS_NAME = 'class_name'
"""The class name attribute in the section that identifies the fully qualified
instance to create.
"""
def __init__(self, config: Configurable, pattern: str = '{name}',
default_name: str = 'default',
class_resolver: ClassResolver = None):
"""Initialize a new factory instance.
:param config: the configuration used to create the instance; all data
from the corresponding section is given to the
``__init__`` method
:param pattern: section pattern used to find the values given to the
``__init__`` method
:param config_param_name: the ``__init__`` parameter name used for the
configuration object given to the factory's
``instance`` method; defaults to ``config``
:param config_param_name: the ``__init__`` parameter name used for the
instance name given to the factory's
``instance`` method; defaults to ``name``
"""
self.config = config
self.pattern = pattern
self.default_name = default_name
if class_resolver is None:
self.class_resolver = DictionaryClassResolver(
self.INSTANCE_CLASSES)
else:
self.class_resolver = class_resolver
@classmethod
def register(cls, instance_class: Type, name: str = None):
"""Register a class with the factory. This method assumes the factory instance
was created with a (default) :class:`.DictionaryClassResolver`.
:param instance_class: the class to register with the factory (not a
string)
:param name: the name to use as the key for instance class lookups;
defaults to the name of the class
"""
if name is None:
name = instance_class.__name__
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'registering: {instance_class} for {cls} -> {name}')
cls.INSTANCE_CLASSES[name] = instance_class
def _find_class(self, class_name: str) -> Type:
"""Resolve the class from the name."""
return self.class_resolver.find_class(class_name)
def _class_name_params(self, name: str) -> Tuple[str, Dict[str, Any]]:
"""Get the class name and parameters to use to create an instance.
:param name: the configuration section name, which is the object name
:return: a tuple of the fully qualified class name and the parameters
used as arguments to the class initializer; if a class is not
provided it defaults to :class:`.Settings`
"""
sec = self.pattern.format(**{'name': name})
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'section: {sec}')
params: Dict[str, Any] = {}
try:
params.update(self.config.populate({}, section=sec))
except Exception as e:
raise FactoryError(
f'Can not populate from section {sec}', self) from e
class_name = params.get(self.CLASS_NAME)
if class_name is None:
if len(params) == 0:
raise FactoryError(f"No such entry: '{name}'", self)
else:
class_name = 'zensols.config.Settings'
else:
del params[self.CLASS_NAME]
return class_name, params
def _has_init_parameter(self, cls: Type, param_name: str):
args = inspect.signature(cls.__init__)
return param_name in args.parameters
def _instance(self, cls_desc: str, cls: Type, *args, **kwargs):
"""Return the instance.
:param cls_desc: a description of the class (i.e. section name)
:param cls: the class to create the instance from
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'args: {args}, kwargs: {kwargs}')
try:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'config factory creating instance of {cls}')
inst = cls(*args, **kwargs)
if isinstance(inst, FactoryStateObserver):
inst._notify_state(FactoryState.CREATED)
except Exception as e:
llen = 200
kwstr = str(kwargs)
if len(kwstr) > llen:
kwstr = 'keys: ' + (', '.join(kwargs.keys()))
kwstr = textwrap.shorten(kwstr, llen)
raise FactoryError(f'Can not create \'{cls_desc}\' for class ' +
f'{cls}({args})({kwstr}): {e}', self) from e
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'inst: {inst.__class__}')
return inst
def instance(self, name: Optional[str] = None, *args, **kwargs):
"""Create a new instance using key ``name``.
:param name: the name of the class (by default) or the key name of the
class used to find the class; this is the section name for
the :class:`.ImportConfigFactory`
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'new instance of {name}')
t0 = time()
name = self.default_name if name is None else name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating instance of {name}')
class_name, params = self._class_name_params(name)
if self.CLASS_NAME in kwargs:
class_name = kwargs.pop(self.CLASS_NAME)
cls = self._find_class(class_name)
params.update(kwargs)
if self._has_init_parameter(cls, self.CONFIG_ATTRIBUTE) \
and self.CONFIG_ATTRIBUTE not in params:
logger.debug('setting config parameter')
params['config'] = self.config
if self._has_init_parameter(cls, self.NAME_ATTRIBUTE) \
and self.NAME_ATTRIBUTE not in params:
logger.debug('setting name parameter')
params['name'] = name
if self._has_init_parameter(cls, self.CONFIG_FACTORY_ATTRIBUTE) \
and self.CONFIG_FACTORY_ATTRIBUTE not in params:
logger.debug('setting config factory parameter')
params['config_factory'] = self
if logger.isEnabledFor(logging.DEBUG):
for k, v in params.items():
logger.debug(f'populating {k} -> {v} ({type(v)})')
inst = self._instance(name, cls, *args, **params)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created {name} instance of {cls.__name__} ' +
f'in {(time() - t0):.2f}s')
return inst
def get_class(self, name: str) -> Type:
"""Return a class by name.
:param name: the name of the class (by default) or the key name of the
class used to find the class; this is the section name for
the :class:`.ImportConfigFactory`
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'new instance of {name}')
name = self.default_name if name is None else name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating instance of {name}')
class_name, params = self._class_name_params(name)
return self._find_class(class_name)
def from_config_string(self, v: str) -> Any:
"""Create an instance from a string used as option values in the configuration.
"""
try:
v = eval(v)
except Exception:
pass
return self.instance(v)
def clone(self) -> Any:
"""Return a copy of this configuration factory that functionally works the
same.
"""
return cp.copy(self)
def __call__(self, *args, **kwargs):
"""Calls ``instance``.
"""
return self.instance(*args, **kwargs) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/facbase.py | facbase.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Iterable, Tuple, List, Dict, Any, Set, Sequence, Union
import logging
from itertools import chain
from collections import ChainMap
from pathlib import Path
from configparser import (
ConfigParser, ExtendedInterpolation, InterpolationMissingOptionError
)
from zensols.introspect import ClassImporterError
from . import (
ConfigurableError, ConfigurableFileNotFoundError,
Configurable, ConfigurableFactory, IniConfig, ImportYamlConfig, rawconfig,
)
logger = logging.getLogger(__name__)
class _ParserAdapter(object):
"""Adapts a :class:`~configparser.ConfigParser` to a :class:`.Configurable`.
"""
def __init__(self, conf: Configurable, defs: Dict[str, str]):
self.conf = conf
self.defs = defs
def get(self, section: str, option: str, *args, **kwags):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f'get ({type(self.conf).__name__}): {section}:{option}')
if self.conf.has_option(option, section):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('contains option')
val = self.conf.get_option(option, section)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'option not found, trying defs: {self.defs}')
val = self.defs.get(f'{section}:{option}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using defaults value: {val}')
if val is None:
# raise an InterpolationMissingOptionError
try:
self.conf.get_option(option, section)
except ConfigurableError as e:
raise ConfigurableError(
f'Can not get option {section}:{option}') from e
return val
def optionxform(self, option: str) -> str:
return option.lower()
def items(self, section: str, raw: bool = False):
return self.conf.get_options(section)
def __str__(self) -> str:
return str(self.conf.__class__.__name__)
def __repr__(self) -> str:
return self.__str__()
class _SharedExtendedInterpolation(ExtendedInterpolation):
"""Adds other :class:`Configurable` instances to available parameter to
substitute.
"""
def __init__(self, children: Tuple[Configurable, ...],
robust: bool = False):
super().__init__()
defs = {}
for child in children:
with rawconfig(child):
for sec in child.sections:
for k, v in child.get_options(sec).items():
defs[f'{sec}:{k}'] = v
self.children = tuple(map(lambda c: _ParserAdapter(c, defs), children))
self.robust = robust
def before_get(self, parser: ConfigParser, section: str, option: str,
value: str, defaults: ChainMap):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'before_get: section: {section}:{option}: {value}')
res = value
last_ex = None
parsers = tuple(chain.from_iterable([[parser], self.children]))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'defaults: {defaults}')
for pa in parsers:
try:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'inter: {pa}: {section}:{option} = {value}')
res = super().before_get(pa, section, option, value, defaults)
last_ex = None
break
except InterpolationMissingOptionError as e:
last_ex = e
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'missing option: {e}')
if (not self.robust) and (last_ex is not None):
msg = f'can not set {section}:{option} = {value}: {last_ex}'
raise ConfigurableError(msg)
return res
class _BootstrapConfig(IniConfig):
"""Configuration class extends using advanced interpolation with
:class:`~configparser.ExtendedInterpolation`. One of these is created
every time an instance of :class:`.ImportIniConfig` is created, which
includes nested configruation imports when we *descend* recursively.
"""
def __init__(self, parent: IniConfig, children: Tuple[Configurable, ...]):
"""Initialize.
:param parent: the initial config having only the import, load and
reference sections
:param children: the children initialized with
:class:`.ImportIniConfig`, which are later used to
copy forward configuration as configurations are
loaded
"""
super().__init__(parent, parent.default_section)
self.children = [parent] + list(children)
def append_child(self, child: Configurable):
self.children.append(child)
for c in self.children:
with rawconfig(c):
c.copy_sections(self)
def _create_config_parser(self) -> ConfigParser:
parser = ConfigParser(
interpolation=_SharedExtendedInterpolation(self.children))
with rawconfig(self.config_file):
for sec in self.config_file.sections:
parser.add_section(sec)
for k, v in self.config_file.get_options(sec).items():
parser.set(sec, k, v)
return parser
def _create_and_load_parser(self, parser: ConfigParser):
# skip reloading, as that was done when the parser was created
pass
class ImportIniConfig(IniConfig):
"""A configuration that uses other :class:`.Configurable` classes to load other
sections. A special ``import`` section is given that indicates what other
sections to load as children configuration. Each of those indicated to
import are processed in order by:
1. Creating the delegate child :class:`Configurable` given in the
section.
2. Copying all sections from child instance to the parent.
3. Variable interpolation as a function of
:class:`~configparser.ConfigParser` using
:class:`~configparser.ExtendedInterpolation`.
The ``import`` section has a ``sections`` entry as list of sections to
load, a ``references`` entry indicating which sections to provide as
children sections in child loaders, a ``config_file`` and ``config_files`
entries to load as children directly.
For example::
[import]
references = list: default, package, env
sections = list: imp_obj
[imp_obj]
type = importini
config_file = resource: resources/obj.conf
This configuration loads a resource import INI, which is an implementation
of this class, and provides sections ``default``, ``package`` and ``env``
for any property string interpolation while loading ``obj.conf``.
See the `API documentation
<https://plandes.github.io/util/doc/config.html#import-ini-configuration>`_
for more information.
"""
IMPORT_SECTION = 'import'
SECTIONS_SECTION = 'sections'
SINGLE_CONFIG_FILE = ConfigurableFactory.SINGLE_CONFIG_FILE
CONFIG_FILES = 'config_files'
REFS_NAME = 'references'
CLEANUPS_NAME = 'cleanups'
TYPE_NAME = ConfigurableFactory.TYPE_NAME
_IMPORT_SECTION_FIELDS = {SECTIONS_SECTION, SINGLE_CONFIG_FILE,
CONFIG_FILES, REFS_NAME, CLEANUPS_NAME}
def __init__(self, *args,
config_section: str = IMPORT_SECTION,
exclude_config_sections: bool = True,
children: Tuple[Configurable, ...] = (),
use_interpolation: bool = True,
**kwargs):
"""Initialize.
:param config_file: the configuration file path to read from
:param default_section: default section (defaults to `default`)
:param robust: if `True`, then don't raise an error when the
configuration file is missing
:param config_section: the name of the section that has the
configuration (i.e. the ``sections`` entry)
:param exclude_config_sections:
if ``True``, the import and other configuration sections are
removed
:param children: additional configurations used both before and after
bootstrapping
:param use_interpolation: if ``True``, interpolate variables using
:class:`~configparser.ExtendedInterpolation`
"""
super().__init__(*args, use_interpolation=use_interpolation, **kwargs)
self.config_section = config_section
self.exclude_config_sections = exclude_config_sections
if children is None:
self._raise('Missing importini children')
self.children = children
if exclude_config_sections and \
(self.default_section == self.config_section):
self._raise('You must set exclude_config_sections to False ' +
'when the import and config section are the same')
def _get_bootstrap_config(self) -> _BootstrapConfig:
"""Create the config that is used to read only the sections needed to
import/load other configuration. This adds the import section, any
sections it *refers* to, and the sections it indicates to load.
References are those needed to continue parsing the rest of the boot
strap configuration for this instance. This usually includes a
``default`` section that might have a ``resources`` property used to
populate a load section paths.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating bootstrap parser')
conf_sec = self.config_section
bs_config = IniConfig(self.config_file)
cparser = bs_config.parser
has_secs = bs_config.has_option(self.SECTIONS_SECTION, conf_sec)
has_refs = bs_config.has_option(self.REFS_NAME, conf_sec)
# add sections and references to the temporary config
if has_secs or has_refs:
secs = set()
# add load sections
if has_secs:
sec_lst: List[Union[str, Path]] = self.serializer.parse_object(
bs_config.get_option(self.SECTIONS_SECTION, conf_sec))
secs.update(set(sec_lst))
# add references
if has_refs:
refs: List[Union[str, Path]] = self.serializer.parse_object(
bs_config.get_option(self.REFS_NAME, conf_sec))
secs.update(set(refs))
# add the import section itself, used later to load children config
secs.add(conf_sec)
# remove all sections but import, load and reference from the
# parser
to_remove = set(bs_config.sections) - secs
for r in to_remove:
cparser.remove_section(r)
return _BootstrapConfig(bs_config, self.children)
def _validate_bootstrap_config(self, config: Configurable):
"""Validate that the import section doesn't have bad configuration."""
conf_sec: str = self.config_section
if conf_sec in config.sections:
import_sec: Dict[str, str] = config.populate({}, conf_sec)
import_props: Set[str] = set(import_sec.keys())
refs: List[str] = import_sec.get(self.REFS_NAME)
file_props: Set[str] = {self.SINGLE_CONFIG_FILE, self.CONFIG_FILES}
aliens = import_props - self._IMPORT_SECTION_FIELDS
if len(aliens) > 0:
props = ', '.join(map(lambda p: f"'{p}'", aliens))
self._raise(f"Invalid options in section '{conf_sec}'" +
f": {props}")
if len(file_props & import_props) == 2:
self._raise(
f"Cannot have both '{self.SINGLE_CONFIG_FILE}' " +
f"and '{self.CONFIG_FILES}' in section '{conf_sec}'")
if refs is not None:
for ref in refs:
if ref not in config.sections:
self._raise(
f"Reference '{ref}' in section '{conf_sec}' not " +
f"found, got: {set(config.sections)}")
def _create_config(self, section: str,
params: Dict[str, Any]) -> Configurable:
"""Create a config from a section."""
return ConfigurableFactory.from_section(params, section)
def _create_configs(self, section: str, params: Dict[str, Any],
bs_config: _BootstrapConfig) -> List[Configurable]:
"""Create one or more :class:`~zensols.config.Configuration` instance depending
on if one or more configuration files are given. Configurations are
created with using a :class:`~zensols.config.ConfigurationFactory` in
:meth:`_create_config`. This method is called once to create all
configuration files for obj:`CONFIG_FILES` and again for each section
for :obj:`SECTIONS_SECTION`.
:param section: the import ini section to load
:param params: the section options/properties
:param bs_config: the bootstrap loader created in
:meth:`_get_bootstrap_config`
"""
configs: List[Configurable] = []
conf_files: List[str] = params.get(self.CONFIG_FILES)
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating configs from section: [{section}]')
if conf_files is None:
try:
# create a configuration from the section as a section load
configs.append(self._create_config(section, params))
except ClassImporterError as e:
raise ConfigurableError(
f"Could not import section '{section}': {e}") from e
else:
# otherwise, synthesize a section load for each given config file
sparams = dict(params)
del sparams[self.CONFIG_FILES]
try:
for cf in conf_files:
parsed_cf = self.serializer.parse_object(cf)
# skip Nones substituted by introplation (like when
# ConfigurationImporter subtitutues a missing config file)
if parsed_cf is not None:
sparams[self.SINGLE_CONFIG_FILE] = parsed_cf
conf = self._create_config(section, sparams)
configs.append(conf)
except ClassImporterError as e:
raise ConfigurableError(
f"Could not import '{cf}' in section '{section}': {e}") \
from e
# add configurations as children to the bootstrap config
for config in configs:
# recursively create new import ini configs and add the children
# we've created thus far for forward interpolation capability
if isinstance(config, (ImportIniConfig, ImportYamlConfig)):
if logger.isEnabledFor(logging.INFO):
logger.info(f'descending: {config}')
if logger.isEnabledFor(logging.INFO):
logger.info(f'adding bootstrap {bs_config.children} + ' +
f'self {self.children} to {config}')
# add children bootstrap config that aren't add duplicates
# children created with this instance
ids: Set[int] = set(map(lambda c: id(c), bs_config.children))
new_children = list(bs_config.children)
new_children.extend(
tuple(filter(lambda c: id(c) not in ids, self.children)))
config.children = tuple(new_children)
# add the configurable to the bootstrap config
bs_config.append_child(config)
return configs
def _get_children(self) -> Tuple[List[str], Iterable[Configurable]]:
""""Get children used for this config instance. This is done by import each
import section and files by delayed loaded for each.
Order is important as each configuration can refer to previously loaded
configurations. For this reason, the :class:`_ConfigLoader` is needed
to defer loading: one for loading sections, and one for loading file.
"""
# guard on OS level config file since the super class allows different
# types such as directory; we only deal with files in this class
if isinstance(self.config_file, Path) and \
not self.config_file.is_file():
raise ConfigurableFileNotFoundError(self.config_file)
# create the bootstrap config used to start the import process
bs_config: _BootstrapConfig = self._get_bootstrap_config()
conf_sec: str = self.config_section
conf_secs: Set[str] = {conf_sec}
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsing section: {conf_sec}')
# look for bad configuration in the import section
self._validate_bootstrap_config(bs_config)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating children for: {conf_sec}')
# first load files given in the import section
if bs_config.has_option(self.SINGLE_CONFIG_FILE, conf_sec):
fname: Union[Path, str] = self.serializer.parse_object(
bs_config.get_option(self.SINGLE_CONFIG_FILE, conf_sec))
params = {self.SINGLE_CONFIG_FILE: fname}
self._create_configs('<no section>', params, bs_config)
elif bs_config.has_option(self.CONFIG_FILES, conf_sec):
sec = bs_config.populate(section=conf_sec)
fnames: List[str] = self.serializer.parse_object(
bs_config.get_option(self.CONFIG_FILES, conf_sec))
for fname in fnames:
# enable resource descriptors
fname: Any = self.serializer.parse_object(fname)
params = {self.SINGLE_CONFIG_FILE: fname}
self._create_configs('<no section>', params, bs_config)
# load each import section, again in order
if bs_config.has_option(self.SECTIONS_SECTION, conf_sec):
secs: List[Union[Path, str]] = self.serializer.parse_object(
bs_config.get_option(self.SECTIONS_SECTION, conf_sec))
for sec in secs:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f"populating section '{sec}', {bs_config.children}")
conf_secs.add(sec)
params = bs_config.populate({}, section=sec)
self._create_configs(sec, params, bs_config)
# allow the user to remove more sections after import
if bs_config.has_option(self.CLEANUPS_NAME, conf_sec):
cleanups: Sequence[str] = self.serializer.parse_object(
bs_config.get_option(self.CLEANUPS_NAME, conf_sec))
conf_secs.update(cleanups)
return conf_secs, bs_config.children
def _load_imports(self, parser: ConfigParser):
if logger.isEnabledFor(logging.INFO):
logger.info(f'importing {self._get_container_desc()}, ' +
f'children={self.children}')
csecs, children = self._get_children()
overwrites: Set = set()
# copy each configuration added to the bootstrap loader in the order we
# added them.
c: Configurable
for c in children:
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading configuration {c} -> {self}')
par_secs: List[str] = parser.sections()
sec: str
# copy every section from the child to target our new parser
for sec in c.sections:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'importing section {c}:[{sec}]')
if sec not in par_secs:
parser.add_section(sec)
# assume everything is resolvable as this is the last step in
# the loading of this instance
try:
opts = c.get_options(sec)
except InterpolationMissingOptionError as e:
msg = f'Could not populate {c}:[{sec}]: {e}'
self._raise(msg, e)
for k, v in opts.items():
key = f'{sec}:{k}'
has = parser.has_option(sec, k)
fv = self._format_option(k, v, sec)
# overwrite the option/property when not yet set or its
# already by overwriten by a previous child; however, don't
# set it when its new per this instance's import iteration
if not has or key in overwrites:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'overwriting {sec}:{k}: {v} -> {fv}')
parser.set(sec, k, fv)
overwrites.add(key)
if logger.isEnabledFor(logging.INFO):
logger.info(f'imported {len(children)} children to {self}')
if self.exclude_config_sections:
self._config_sections = csecs
def _create_and_load_parser(self, parser: ConfigParser):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('creating and loading parser')
super()._create_and_load_parser(parser)
self._load_imports(parser)
if hasattr(self, '_config_sections'):
for sec in self._config_sections:
parser.remove_section(sec)
del self._config_sections
del self.children
return parser | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/importini.py | importini.py |
__author__ = 'Paul Landes'
from typing import Union, Dict, Any
from dataclasses import dataclass
import logging
from pathlib import Path
from io import TextIOBase
import json
from zensols.persist import persisted, PersistedWork
from . import (
ConfigurableError, ConfigurableFileNotFoundError, DictionaryConfig
)
logger = logging.getLogger(__name__)
@dataclass
class JsonConfig(DictionaryConfig):
"""A configurator that reads JSON as a two level dictionary. The top level
keys are the section and the values are a single depth dictionary with
string keys and values.
A caveat is if all the values are terminal, in which case the top level
singleton section is ``default_section`` given in the initializer and the
section content is the single dictionary.
"""
def __init__(self, config_file: Union[Path, TextIOBase],
default_section: str = None, deep: bool = False):
"""Initialize.
:param config_file: the configuration file path to read from; if the
type is an instance of :class:`io.TextIOBase`, then
read it as a file object
:param config: configures this instance (see class docs)
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`
"""
if isinstance(config_file, str):
self.config_file = Path(config_file).expanduser()
else:
self.config_file = config_file
self._parsed_config = PersistedWork('_parsed_config', self)
super().__init__(config=None,
default_section=default_section,
deep=deep)
def _narrow_root(self, conf: Dict[str, Any]) -> Dict[str, str]:
if not isinstance(conf, dict):
raise ConfigurableError(
f'Expecting a root level dict: {self.config_file}')
return conf
@persisted('_parsed_config')
def _get_config(self) -> Dict[str, Dict[str, Any]]:
if hasattr(self, '_ext_config'):
return self._ext_config
if logger.isEnabledFor(logging.INFO):
logger.info(f'loading config: {self.config_file}')
if isinstance(self.config_file, TextIOBase):
conf = json.load(self.config_file)
self.config_file.seek(0)
else:
if not self.config_file.is_file():
raise ConfigurableFileNotFoundError(self.config_file)
with open(self.config_file) as f:
conf = json.load(f)
conf = self._narrow_root(conf)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'raw json: {conf}')
has_terminals = True
for k, v in conf.items():
if isinstance(v, dict):
has_terminals = False
break
if has_terminals:
conf = {self.default_section: conf}
return conf
def _set_config(self, source: Dict[str, Any]):
self._ext_config = source
self._parsed_config.clear()
self.invalidate() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/jsonconfig.py | jsonconfig.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
import logging
import os
from frozendict import frozendict
from . import DictionaryConfig
logger = logging.getLogger(__name__)
@dataclass
class Keychain(object):
"""A wrapper to macOS's Keychain service using binary ``/usr/bin/security``.
This provides a cleartext password for the given service and account.
"""
account: str = field()
"""The account, which is usually an email address."""
service: str = field(default='python-passwords')
"""the service (grouping in Keychain.app)"""
@staticmethod
def getpassword(account: str, service: str) -> str:
"""Get the password for the account and service (see class docs).
"""
cmd = ('/usr/bin/security find-generic-password ' +
f'-w -s {service} -a {account}')
with os.popen(cmd) as p:
s = p.read().strip()
return s
@property
def password(self):
"""Get the password for the account and service provided as member variables
(see class docs).
"""
logger.debug(f'getting password for service={self.service}, ' +
f'account={self.account}')
return self.getpassword(self.account, self.service)
class KeychainConfig(DictionaryConfig):
"""A configuration that adds a user and password based on a macOS Keychain.app
entry. The account (user name) and service (a grouping in Keychain.app) is
provided and the password is fetched.
Example::
[import]
sections = list: keychain_imp
[keychain_imp]
type = keychain
account = my-user-name
default_section = login
"""
def __init__(self, account: str, user: str = None,
service: str = 'python-passwords',
default_section: str = 'keychain'):
"""Initialize.
:param account: the account (usually an email address) used to fetch in
Keychain.app
:param user: the name of the user to use in the generated entry, which
defaults to ``acount``
:param service: the service (grouping in Keychain.app)
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`
"""
super().__init__(default_section=default_section)
keychain = Keychain(account, service)
conf = {self.default_section:
{'user': account if user is None else user,
'password': keychain.password}}
self._dict_config = frozendict(conf) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/keychain.py | keychain.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, Iterable, Tuple, Optional
from dataclasses import dataclass, fields, asdict, field
import dataclasses
import sys
import logging
from collections import OrderedDict
from itertools import chain
import inspect
import json
import yaml
from io import TextIOBase, StringIO
from zensols.introspect import ClassResolver
from . import ConfigurationError, Writable
logger = logging.getLogger(__name__)
@dataclass
class Dictable(Writable):
"""A class that that generates a dictionary recursively from data classes and
primitive data structures.
To override the default behavior of creating a dict from a
:class:`dataclass`, override the :meth:`_from_dictable` method.
In addition to the fields from the dataclass, if the attribute
``_DICTABLE_ATTRIBUTES`` is set, those are added as well (see
:meth:`_get_dictable_attributes`).
See :meth:`write` for how a dictable writes itself as a sublcass of
:class:`.Writable` and usage of class attributes
``_DICTABLE_WRITABLE_DESCENDANTS`` and ``_DICTABLE_WRITE_EXCLUDES``.
.. document private functions
.. automethod:: _get_dictable_attributes
.. automethod:: _from_dictable
.. automethod:: _write_descendants
.. automethod:: _writable_dict
:see: :meth:`write`
"""
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
"""Return human readable and attribute names.
:return: tuples of (<human readable name>, <attribute name>)
"""
attrs = map(lambda f: (f.name, f.name),
filter(lambda f: f.repr, fields(self)))
if hasattr(self, '_DICTABLE_ATTRIBUTES'):
add_attrs = getattr(self, '_DICTABLE_ATTRIBUTES')
attrs = chain.from_iterable(
[attrs, map(lambda a: (a, a), add_attrs)])
return attrs
def _split_str_to_attributes(self, attrs: str) -> \
Iterable[Tuple[str, str]]:
return map(lambda s: (s, s), attrs.split())
def _add_class_name_param(self, class_name_param: str,
dct: Dict[str, Any]):
if class_name_param is not None:
cls = self.__class__
dct[class_name_param] = ClassResolver.full_classname(cls)
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
"""A subclass can override this method to give create a custom specific
dictionary to be returned from the :meth:`asjson` client access method.
:param recurse: if ``True``, recursively create dictionary so some
values might be dictionaries themselves
:param readable: use human readable and attribute keys when available
:param class_name_param: if set, add a ``class_name_param`` key with
the class's fully qualified name (includes
module name)
:return: a JSON'able tree of dictionaries with primitive data
:see: :meth:`asjson`
:see: :meth:`asdict`
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'from dictable: {type(self)}')
dct = OrderedDict()
self._add_class_name_param(class_name_param, dct)
for readable_name, name in self._get_dictable_attributes():
v = getattr(self, name)
if readable:
k = readable_name
else:
k = name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dict: <{k}> -> {type(v)}')
dct[k] = self._from_object(v, recurse, readable)
return dct
def _from_dict(self, obj: dict, recurse: bool, readable: bool) -> \
Dict[str, Any]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'from dict: {type(obj)}')
dct = {}
for k, v in obj.items():
dct[str(k)] = self._from_object(v, recurse, readable)
return dct
def _from_dataclass(self, obj: Any, recurse: bool, readable: bool) -> \
Dict[str, Any]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'from dataclass: {type(obj)}')
if not dataclasses.is_dataclass(obj):
raise ConfigurationError(f'Not a dataclass: {obj.__class__}')
return self._from_dict(asdict(obj), recurse, readable)
def _format_dictable(self, obj: Any) -> Optional[str]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'format dictable: {type(obj)}')
v = None
if hasattr(self, '_DICTABLE_FORMATS'):
fmt_str = self._DICTABLE_FORMATS.get(type(obj))
if fmt_str is not None:
v = fmt_str.format(obj)
return v
def _format(self, obj: Any) -> str:
v = None
if obj is not None:
if isinstance(obj, _DICTABLE_CLASS):
v = obj._format_dictable(obj)
else:
v = self._format_dictable(obj)
if v is None:
if isinstance(obj, (int, float, bool, str)):
v = obj
else:
v = str(obj)
return v
def _from_object(self, obj: Any, recurse: bool, readable: bool) -> Any:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'from object: {type(obj)}')
if recurse:
if inspect.isclass(obj):
# leave classes in tact and use JSONEncoder to convert in
# ``asjson`` if necessary
ret = obj
elif isinstance(obj, _DICTABLE_CLASS):
ret = obj._from_dictable(recurse, readable)
elif dataclasses.is_dataclass(obj):
ret = self._from_dataclass(obj, recurse, readable)
elif isinstance(obj, dict):
ret = self._from_dict(obj, recurse, readable)
elif isinstance(obj, (tuple, list, set)):
ret = map(lambda o: self._from_object(o, recurse, readable),
obj)
ret = list(ret)
else:
ret = self._format(obj)
return ret
def asdict(self, recurse: bool = True, readable: bool = True,
class_name_param: str = None) -> Dict[str, Any]:
"""Return the content of the object as a dictionary.
:param recurse: if ``True``, recursively create dictionary so some
values might be dictionaries themselves
:param readable: use human readable and attribute keys when available
:param class_name_param: if set, add a ``class_name_param`` key with
the class's fully qualified name (includes
module name)
:return: a JSON'able tree of dictionaries with primitive data
:see: :meth:`asjson`
:see: :meth:`_from_dictable`
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'asdict: {type(self)}')
return self._from_dictable(recurse, readable, class_name_param)
def asflatdict(self, *args, **kwargs) -> Dict[str, Any]:
"""Like :meth:`asdict` but flatten in to a data structure suitable for
writing to JSON or YAML.
"""
dct: Dict[str, Any] = self.asdict(*args, **kwargs)
io = StringIO()
json.dump(dct, io)
io.seek(0)
return json.load(io)
def asjson(self, writer: TextIOBase = None,
recurse: bool = True, readable: bool = True, **kwargs) -> str:
"""Return a JSON string representing the data in this instance.
"""
dct: Dict[str, Any] = self.asdict(recurse=recurse, readable=readable)
if writer is None:
return json.dumps(dct, **kwargs)
else:
return json.dump(dct, writer, **kwargs)
def asyaml(self, writer: TextIOBase = None,
recurse: bool = True, readable: bool = True, **kwargs) -> str:
"""Return a YAML string representing the data in this instance.
"""
dct: Dict[str, Any] = self.asflatdict(
recurse=recurse, readable=readable)
if writer is None:
writer = StringIO()
yaml.dump(dct, writer, **kwargs)
return writer.getvalue()
else:
return yaml.dump(dct, writer, **kwargs)
def _get_description(self, include_type: bool = False) -> str:
def fmap(desc: str, name: str) -> str:
v = getattr(self, name)
if isinstance(v, str):
v = "'" + v + "'"
else:
v = self._format(v)
return f'{desc}={v}'
v = ', '.join(map(lambda x: fmap(*x), self._get_dictable_attributes()))
if include_type:
v = f'{type(self).__name__}({v})'
return v
def _write_descendants(self, depth: int = 0,
writer: TextIOBase = sys.stdout):
"""Write this instance by using the :meth:`write` method on children instead of
writing the generated dictionary.
"""
for readable_name, name in self._get_dictable_attributes():
v = getattr(self, name)
if self._is_container(v):
self._write_line(f'{readable_name}:', depth, writer)
self._write_object(v, depth + 1, writer)
elif dataclasses.is_dataclass(v):
self._write_line(f'{readable_name}:', depth, writer)
self._write_dict(self._from_dataclass(v, True, True),
depth + 1, writer)
else:
self._write_key_value(readable_name, v, depth, writer)
def _writable_dict(self) -> Dict[str, Any]:
"""Return a :class:`dict` that contains what will be output when :meth:`write`
is called.
"""
dct = self._from_dictable(True, True)
if hasattr(self, '_DICTABLE_WRITE_EXCLUDES'):
for attr in getattr(self, '_DICTABLE_WRITE_EXCLUDES'):
del dct[attr]
return dct
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
"""Write this instance as either a :class:`Writable` or as a :class:`Dictable`.
If class attribute ``_DICTABLE_WRITABLE_DESCENDANTS`` is set as
``True``, then use the :meth:`write` method on children instead of
writing the generated dictionary. Otherwise, write this instance by
first creating a ``dict`` recursively using :meth:`asdict`, then
formatting the output.
If the attribute ``_DICTABLE_WRITE_EXCLUDES`` is set, those attributes
are removed from what is written in the :meth:`write` method.
Note that this attribute will need to be set in all descendants in the
instance hierarchy since writing the object instance graph is done
recursively.
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
"""
name = '_DICTABLE_WRITABLE_DESCENDANTS'
if hasattr(self, name) and (getattr(self, name) is True):
self._write_descendants(depth, writer)
else:
self._write_dict(self._writable_dict(), depth, writer)
def _write_key_value(self, k: Any, v: Any, depth: int, writer: TextIOBase):
sp = self._sp(depth)
v = self._format(v)
writer.write(f'{sp}{k}: {v}\n')
def __str__(self) -> str:
return self._get_description(True)
_DICTABLE_CLASS = Dictable
@dataclass
class DefaultDictable(Dictable):
"""A convenience utility class that provides access to methods such as
:meth:`write` and :meth:`asjson` without needing inheritance.
"""
data: Dict[str, Any] = field()
"""The data used to be JSON or written."""
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
return self.data | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/dictable.py | dictable.py |
from __future__ import annotations
__author__ = 'Paul Landes'
import typing
from typing import (
Tuple, Dict, Optional, Union, Any, Type, Iterable, Callable, ClassVar, Set
)
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field
import dataclasses
import logging
import types
import re
from frozendict import frozendict
from zensols.introspect import ClassResolver, ClassImporter
from zensols.persist import persisted, PersistedWork, Deallocatable
from . import (
Settings, Dictable, FactoryError, ImportClassResolver, ConfigFactory
)
logger = logging.getLogger(__name__)
class RedefinedInjectionError(FactoryError):
"""Raised when any attempt to redefine or reuse injections for a class.
"""
pass
@dataclass
class ModulePrototype(Dictable):
"""Contains the prototype information necessary to create an object instance
using :class:`.ImportConfigFactoryModule.
"""
_DICTABLE_ATTRIBUTES: ClassVar[Set[str]] = {'params', 'config'}
_CHILD_PARAM_DIRECTIVES: ClassVar[Set[str]] = frozenset(
'param reload type share'.split())
"""The set of allowed directives (i.e. ``instance``) entries parsed by
:meth:`_parse`.
"""
factory: ImportConfigFactory = field()
"""The factory that created this prototype."""
name: str = field()
"""The name of the instance to create, which is usually the application
config section name.
"""
config_str: str = field()
"""The string parsed from the parethesis in the prototype string."""
@persisted('_parse_pw', allocation_track=False)
def _parse(self) -> Tuple[Dict[str, Any], Dict[str, Any]]:
conf: str = self.config_str
instance_params: Dict[str, Any] = {}
inst_conf: Dict[str, Any] = None
reload: bool = False
try:
if conf is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'parsing param config: {conf}')
inst_conf = eval(conf)
unknown: Set[str] = set(inst_conf.keys()) - \
self._CHILD_PARAM_DIRECTIVES
if len(unknown) > 0:
raise FactoryError(f'Unknown directive(s): {unknown}',
self.factory)
if 'param' in inst_conf:
cparams = inst_conf['param']
cparams = self.factory.config.serializer.populate_state(
cparams, {})
instance_params.update(cparams)
if 'reload' in inst_conf:
reload = inst_conf['reload']
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting reload: {reload}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'applying param config: {inst_conf}')
finally:
self.factory._set_reload(reload)
return instance_params, inst_conf
@property
def params(self) -> Dict[str, Any]:
return self._parse()[0]
@property
def config(self) -> Any:
return self._parse()[1]
class ImportConfigFactory(ConfigFactory, Deallocatable):
"""Import a class by the fully qualified class name (includes the module).
This is a convenience class for setting the parent class ``class_resolver``
parameter.
"""
_MODULES: ClassVar[Type[ImportConfigFactoryModule]] = []
_MODULE_REGEXP: ClassVar[str] = r'(?:\((.+)\))?:\s*(.+)'
"""The ``instance`` regular expression used to identify children attributes to
set on the object. The process if creation can chain from parent to
children recursively.
"""
_INJECTS: ClassVar[Dict[str, str]] = {}
"""Track injections to fail on any attempts to redefine."""
def __init__(self, *args, reload: Optional[bool] = False,
shared: Optional[bool] = True,
reload_pattern: Optional[Union[re.Pattern, str]] = None,
**kwargs):
"""Initialize the configuration factory.
:param reload: whether or not to reload the module when resolving the
class, which is useful for debugging in a REPL
:param shared: when ``True`` instances are shared and only created
once across sections for the life of this
``ImportConfigFactory`` instance
:param reload_pattern: if set, reload classes that have a fully
qualified name that match the regular expression
regarless of the setting ``reload``
:param kwargs: the key word arguments given to the super class
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating import config factory, reload: {reload}')
super().__init__(*args, **kwargs, class_resolver=ImportClassResolver())
self._set_reload(reload)
if shared:
self._shared = {}
else:
self._shared = None
self.shared = shared
if isinstance(reload_pattern, str):
self.reload_pattern = re.compile(reload_pattern)
else:
self.reload_pattern = reload_pattern
self._init_modules()
@classmethod
def register_module(cls: Type, mod: ImportConfigFactoryModule):
if cls not in cls._MODULES:
cls._MODULES.append(mod)
def _init_modules(self):
mod_type: Type[ImportConfigFactoryModule]
modules: Tuple[ImportConfigFactoryModule] = tuple(
map(lambda t: t(self), self._MODULES))
mod_names: str = '|'.join(map(lambda m: m.name, modules))
self._module_regexes: re.Pattern = re.compile(
'^(' + mod_names + ')' + self._MODULE_REGEXP + '$', re.DOTALL)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mod regex: {self._module_regexes}')
self._modules: Dict[str, ImportConfigFactoryModule] = {
m.name: m for m in modules}
def __getstate__(self):
state = dict(self.__dict__)
state['_shared'] = None if self._shared is None else {}
del state['class_resolver']
del state['_modules']
del state['_module_regexes']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.class_resolver = ImportClassResolver()
self._init_modules()
def clear(self):
"""Clear any shared instances.
"""
if self._shared is not None:
self._shared.clear()
def clear_instance(self, name: str) -> Any:
"""Remove a shared (cached) object instance.
:param name: the section name of the instance to evict and the same
string used to create with :meth:`instance` or
:meth:`new_instance`
:return: the instance that was removed (if present), otherwise ``None``
"""
if self._shared is not None:
return self._shared.pop(name, None)
def clone(self) -> Any:
"""Return a copy of this configuration factory that functionally works the
same. However, it does not copy over any resources generated during
the life of the factory.
"""
clone = super().clone()
clone.clear()
return clone
def deallocate(self):
super().deallocate()
if self._shared is not None:
for v in self._shared.values():
if isinstance(v, Deallocatable):
v.deallocate()
self._shared.clear()
def instance(self, name: Optional[str] = None, *args, **kwargs):
if self._shared is None:
inst = super().instance(name, *args, **kwargs)
else:
inst = self._shared.get(name)
if inst is None:
inst = super().instance(name, *args, **kwargs)
self._shared[name] = inst
return inst
def new_instance(self, name: str = None, *args, **kwargs):
"""Create a new instance without it being shared. This is done by
evicting the existing instance from the shared cache when it is created
next time the contained instances are shared.
:param name: the name of the class (by default) or the key name of the
class used to find the class
:param args: given to the ``__init__`` method
:param kwargs: given to the ``__init__`` method
:see: :meth:`instance`
:see: :meth:`new_deep_instance`
"""
inst = self.instance(name, *args, **kwargs)
self.clear_instance(name)
return inst
def new_deep_instance(self, name: str = None, *args, **kwargs):
"""Like :meth:`new_instance` but copy all recursive instances as new objects as
well.
"""
prev_shared = self._shared
self._shared = None
try:
inst = self.instance(name, *args, **kwargs)
finally:
self._shared = prev_shared
return inst
def _set_reload(self, reload: bool):
self.reload = reload
self.class_resolver.reload = reload
def _attach_persistent(self, inst: Any, name: str, kwargs: Dict[str, str]):
persist = persisted(**kwargs)
new_meth = persist(lambda self: getattr(inst, name))
new_meth = types.MethodType(new_meth, inst)
setattr(inst, name, new_meth)
def from_config_string(self, v: str) -> Any:
"""Create an instance from a string used as option values in the
configuration.
"""
m: re.Match = self._module_regexes.match(v)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'match: <{v}> -> {m}')
if m is not None:
name, config, section = m.groups()
mod: ImportConfigFactoryModule = self._modules.get(name)
if mod is not None:
mod_inst = ModulePrototype(self, section, config)
v = mod.instance(mod_inst)
return v
def _class_name_params(self, name: str) -> Tuple[str, Dict[str, Any]]:
class_name: str
params: Dict[str, Any]
class_name, params = super()._class_name_params(name)
insts = {}
initial_reload = self.reload
try:
for k, v in params.items():
if isinstance(v, str):
insts[k] = self.from_config_string(v)
finally:
self._set_reload(initial_reload)
params.update(insts)
return class_name, params
def _instance(self, sec_name: str, cls: Type, *args, **kwargs):
reset_props = False
class_name = ClassResolver.full_classname(cls)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'import instance: section name: {sec_name}, ' +
f'cls={class_name}, args={args}, kwargs={kwargs}')
pw_injects = self._process_injects(sec_name, kwargs)
prev_defined_sec = self._INJECTS.get(class_name)
if prev_defined_sec is not None and prev_defined_sec != sec_name:
# fail when redefining injections, and thus class metadata,
# configuration
msg = ('Attempt redefine or reuse injection for class ' +
f'{class_name} in section {sec_name} previously ' +
f'defined in section {prev_defined_sec}')
raise RedefinedInjectionError(msg, self)
if len(pw_injects) > 0 and class_name not in self._INJECTS:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'sec assign {sec_name} = {class_name}')
self._INJECTS[class_name] = sec_name
initial_reload = self.reload
reload = self.reload
if self.reload_pattern is not None:
m = self.reload_pattern.match(class_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'class {class_name} matches reload pattern ' +
f'{self.reload_pattern}: {m}')
reload = m is not None
try:
self._set_reload(reload)
if reload:
# we still have to reload at the top level (root in the
# instance graph)
cresolver: ClassResolver = self.class_resolver
class_importer = cresolver.create_class_importer(class_name)
inst = class_importer.instance(*args, **kwargs)
reset_props = True
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'base call instance: {sec_name}')
inst = super()._instance(sec_name, cls, *args, **kwargs)
finally:
self._set_reload(initial_reload)
self._add_injects(inst, pw_injects, reset_props)
mod: ImportConfigFactoryModule
for mod in self._modules.values():
inst = mod.post_populate(inst)
return inst
def _process_injects(self, sec_name, kwargs):
pname = 'injects'
pw_param_set = kwargs.get(pname)
props = []
if pw_param_set is not None:
del kwargs[pname]
for params in eval(pw_param_set):
params = dict(params)
prop_name = params['name']
del params['name']
pw_name = f'_{prop_name}_pw'
params['path'] = pw_name
if prop_name not in kwargs:
raise FactoryError(f"No property '{prop_name}' found '" +
f"in section '{sec_name}'", self)
params['initial_value'] = kwargs[prop_name]
# don't delete the key here so that the type can be defined for
# dataclasses, effectively as documentation
#
# del kwargs[prop_name]
props.append((pw_name, prop_name, params))
return props
def _add_injects(self, inst: Any, pw_injects, reset_props: bool):
cls: Type = inst.__class__
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding injects: {len(pw_injects)}')
for pw_name, prop_name, inject in pw_injects:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'inject: {pw_name}, {prop_name}, {inject}')
init_val = inject.pop('initial_value')
pw = PersistedWork(owner=inst, **inject)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'set: {pw.is_set()}: {pw}')
if not pw.is_set():
pw.set(init_val)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting member {pw_name}={pw} on {cls}')
setattr(inst, pw_name, pw)
if reset_props or not hasattr(cls, prop_name):
logger.debug(f'setting property {prop_name}={pw_name}')
getter = eval(f"lambda s: getattr(s, '{pw_name}')()")
setter = eval(f"lambda s, v: hasattr(s, '{pw_name}') " +
f"and getattr(s, '{pw_name}').set(v)")
prop = property(getter, setter)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'set property: {prop}')
setattr(cls, prop_name, prop)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'create instance {cls}')
@dataclass
class ImportConfigFactoryModule(metaclass=ABCMeta):
"""A module used by :class:`.ImportConfigFactory` to create instances using
special formatted string (i.e. ``instance:``). Subclasses implement the
object creation based on the formatting of the string.
"""
_EMPTY_CHILD_PARAMS: ClassVar[Dict[str, Any]] = frozendict()
"""Constant used to create object instances with initializers that have no
parameters.
"""
factory: ImportConfigFactory = field()
"""The parent/owning configuration factory instance."""
@abstractmethod
def _instance(self, proto: ModulePrototype) -> Any:
pass
def post_populate(self, inst: Any) -> Any:
"""Called to populate or replace the created instance after being
generated by :class:`.ImportConfigFactory`.
"""
return inst
@property
def name(self) -> str:
"""The name of the module and prefix used in the instance formatted
string.
"""
return self._NAME
def instance(self, proto: ModulePrototype) -> Any:
"""Return a new instance from the a prototype input."""
return self._instance(proto)
def _create_instance(self, section: str, config_params: Dict[str, str],
params: Dict[str, Any]) -> Any:
"""Create the instance using of an object using :obj:`factory`.
:param section: the name of the section in the app config
:param config_params: configuration based parameters to indicate (i.e.
whether to share the instance, create a deep copy
etc)
:param params: the parameters given to the class initializer
"""
fac: ImportConfigFactory = self.factory
secs = fac.config.serializer.parse_object(section)
if isinstance(secs, (tuple, list)):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'list instance: {type(secs)}')
inst = list(map(lambda s: fac.instance(s, **params), secs))
if isinstance(secs, tuple):
inst = tuple(inst)
elif isinstance(secs, dict):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dict instance: {type(secs)}')
inst = {}
for k, v in secs.items():
v = fac.instance(v, **params)
inst[k] = v
elif isinstance(secs, str):
create_type: str = None
try:
if config_params is not None:
create_type: str = config_params.get('share')
meth: Callable = {
None: fac.instance,
'default': fac.instance,
'evict': fac.new_instance,
'deep': fac.new_deep_instance,
}.get(create_type)
if meth is None:
raise FactoryError('Unknown create type: {create_type}')
inst = meth(secs, **params)
except Exception as e:
raise FactoryError(
f"Could not create instance from section '{section}'",
fac) from e
else:
raise FactoryError(
f'Unknown instance type {type(secs)}: {secs}', fac)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating instance in section {section} ' +
f'with {params}, config: {config_params}')
return inst
@dataclass
class _InstanceImportConfigFactoryModule(ImportConfigFactoryModule):
"""A module that uses the :obj:`factory` to create the instance from a
section.
The configuration string prototype has the form::
instance[(<parameters>)]: <instance section name>
Parameters are option, but when included are used as parameters to the new
instance's initializer.
"""
_NAME: ClassVar[str] = 'instance'
def _instance(self, proto: ModulePrototype) -> Any:
return self._create_instance(proto.name, proto.config, proto.params)
ImportConfigFactory.register_module(_InstanceImportConfigFactoryModule)
@dataclass
class _ObjectImportConfigFactoryModule(ImportConfigFactoryModule):
"""A module that creates an instance from a fully qualified class name.
The configuration string prototype has the form::
object[(<parameters>)]: <fully qualified class name>
Parameters are option, but when included are used as parameters to the new
instance's initializer.
"""
_NAME: ClassVar[str] = 'object'
def _instance(self, proto: ModulePrototype) -> Any:
cls: Type = self.factory._find_class(proto.name)
desc = f'object instance {proto.name}'
return ConfigFactory._instance(
self.factory, desc, cls, **proto.params)
ImportConfigFactory.register_module(_ObjectImportConfigFactoryModule)
@dataclass
class _DataClassImportConfigFactoryModule(ImportConfigFactoryModule):
"""A module that creates an instance of a dataclass using the class's
metadata.
The configuration string prototype has the form::
dataclass(<fully qualified class name>): <instance section name>
This is most useful in YAML for nested structure composite dataclass
configurations.
"""
_NAME: ClassVar[str] = 'dataclass'
def _dataclass_from_dict(self, cls: Type, data: Any):
if isinstance(data, str):
data = self.factory.from_config_string(data)
if isinstance(data, str):
data = self.factory.config.serializer.parse_object(data)
if dataclasses.is_dataclass(cls) and isinstance(data, dict):
fieldtypes = {f.name: f.type for f in dataclasses.fields(cls)}
try:
param = {f: self._dataclass_from_dict(fieldtypes[f], data[f])
for f in data}
except KeyError as e:
raise FactoryError(
f"No datacalass field {e} in '{cls}, data: {data}'")
data = cls(**param)
elif isinstance(data, (tuple, list)):
origin: Type = typing.get_origin(cls)
cls: Type = typing.get_args(cls)
if isinstance(cls, (tuple, list, set)) and len(cls) == 1:
cls = next(iter(cls))
data: Iterable[Any] = map(
lambda x: self._dataclass_from_dict(cls, x), data)
data = origin(data)
return data
def _instance(self, proto: ModulePrototype) -> Any:
class_name: str = proto.config_str
if not ClassImporter.is_valid_class_name(class_name):
raise FactoryError(f'Not a valid class name: {class_name}')
from_dict: Callable = self._dataclass_from_dict
cls: Type = self.factory._find_class(class_name)
ep: Dict[str, Any] = self._EMPTY_CHILD_PARAMS
inst: Settings = self._create_instance(proto.name, ep, ep)
if isinstance(inst, (tuple, list)):
elems = map(lambda x: from_dict(cls, x.asdict()), inst)
inst = inst.__class__(elems)
else:
inst = from_dict(cls, inst.asdict())
return inst
def post_populate(self, inst: Any) -> Any:
if isinstance(inst, Settings) and len(inst) == 1:
inst_dict = inst.asdict()
k = next(iter(inst_dict.keys()))
v = inst_dict[k]
if isinstance(v, dict):
cls: Optional[str] = v.pop(self._NAME, None)
if cls is not None:
cls: Type = self.factory._find_class(cls)
dc: Any = self._dataclass_from_dict(cls, v)
inst_dict[k] = dc
return inst
ImportConfigFactory.register_module(_DataClassImportConfigFactoryModule)
@dataclass
class _CallImportConfigFactoryModule(ImportConfigFactoryModule):
"""A module that calls a method of another instance in the application
context.
The configuration string prototype has the form::
call[(<parameters>)]: <instance section name>
Parameters may have a ``method`` key with the name of the method. The
remainder of the paraemters are used in the method call.
"""
_NAME: ClassVar[str] = 'call'
def _instance(self, proto: ModulePrototype) -> Any:
cble: Any = self.factory.instance(proto.name)
params: Dict[str, Any] = proto.params
method: Optional[str] = params.pop('method', None)
if method is None:
return cble(**params)
else:
meth = getattr(cble, method)
return meth(**params)
ImportConfigFactory.register_module(_CallImportConfigFactoryModule) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/importfac.py | importfac.py |
__author__ = 'Paul Landes'
from typing import Dict, Set
import logging
import collections
import os
from zensols.persist import persisted
from . import ConfigurableError, Configurable
logger = logging.getLogger(__name__)
class EnvironmentConfig(Configurable):
"""An implementation configuration class that holds environment variables.
This config will need to be added to children to :class:`.ImportIniConfig`
if used in the configuration or import sections.
"""
def __init__(self, section_name: str = 'env', map_delimiter: str = None,
skip_delimiter: bool = False, includes: Set[str] = None):
"""Initialize with a string given as described in the class docs.
The string ``<DOLLAR>`` used with ``map_delimiter`` is the same as
``$`` since adding the dollar in some configuration scenarios has
parsing issues. One example is when ``$$`` failes on copying section
to an :class:`.IniConfig`.
:param section_name: the name of the created section with the
environment variables
:param map_delimiter: when given, all environment values are replaced
with a duplicate; set this to ``$`` when using
:class:`configparser.ExtendedInterpolation` for
environment variables such as ``PS1``
:param skip_delimiter: a string, when present, causes the environment
variable to be skipped; this is useful for
environment variables that cause interpolation
errors
:param includes: if given, the set of environment variables to set
excluding the rest; include all if ``None``
"""
super().__init__(section_name)
if map_delimiter == '<DOLLAR>':
map_delimiter = '$'
self.map_delimiter = map_delimiter
self.skip_delimiter = skip_delimiter
self.includes = includes
@persisted('_parsed_config')
def _get_parsed_config(self) -> Dict[str, str]:
"""Parse the configuration string given in the initializer (see class docs).
"""
conf = collections.defaultdict(lambda: {})
for kv in self.config_str.split(self.option_sep):
m = self.KEY_VAL_REGEX.match(kv)
if m is None:
raise ConfigurableError(f'unexpected format: {kv}')
sec, name, value = m.groups()
sec = self.default_section if sec is None else sec
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'section={sec}, name={name}, value={value}')
conf[sec][name] = value
return conf
@persisted('_keys')
def _get_keys(self) -> Dict[str, str]:
return self._get_parsed_config().keys()
@property
@persisted('_sections')
def sections(self) -> Set[str]:
return frozenset([self.default_section])
def has_option(self, name: str, section: str = None) -> bool:
keys = self._get_keys()
return self.default_section == section and name in keys
@persisted('_env_section')
def _get_env_section(self) -> Dict[str, str]:
opts = {}
delim = self.map_delimiter
if delim is not None:
repl = f'{delim}{delim}'
for k, v in os.environ.items():
if ((self.includes is not None) and (k not in self.includes)) or \
(self.skip_delimiter and v.find(delim) >= 0):
continue
if delim is None:
val = v
else:
val = v.replace(delim, repl)
opts[k] = val
return opts
def get_options(self, section: str = None) -> Dict[str, str]:
if section == self.default_section:
opts = self._get_env_section()
else:
opts = {}
return opts | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/envconfig.py | envconfig.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Dict, Set, Type, Any
import logging
from collections import OrderedDict
from . import ConfigurableError, Configurable, TreeConfigurable, Dictable
logger = logging.getLogger(__name__)
class DictionaryConfig(TreeConfigurable, Dictable):
"""This is a simple implementation of a dictionary backing configuration. The
provided configuration is just a two level dictionary. The top level keys
are the section and the values are a single depth dictionary with string
keys and values.
You can override :meth:`_get_config` to restructure the dictionary for
application specific use cases. One such example is
:meth:`.JsonConfig._get_config`.
.. document private functions
.. automethod:: _get_config
"""
def __init__(self, config: Dict[str, Dict[str, Any]] = None,
default_section: str = None, deep: bool = False):
"""Initialize.
:param config: configures this instance (see class docs)
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`
"""
super().__init__(default_section=default_section)
if config is None:
self._dict_config = {}
else:
self._dict_config = config
self._deep = deep
self.invalidate()
@classmethod
def from_config(cls: Type, source: Configurable,
**kwargs: Dict[str, Any]) -> DictionaryConfig:
"""Create an instance from another configurable.
:param source: contains the source data from which to copy
:param kwargs: initializer arguments for the new instance
:return: a new instance of this class with the data copied from
``source``
"""
secs: Dict[str, Any] = OrderedDict()
params: Dict[str, Any] = dict(kwargs)
if 'default_section' not in params and \
source.default_section is not None:
params['default_section'] = source.default_section
if isinstance(source, DictionaryConfig):
params['deep'] = source.deep
for sec in sorted(source.sections):
svs = OrderedDict()
secs[sec] = svs
source.populate(svs, sec)
return cls(secs, **kwargs)
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
return self._get_config()
def _get_config(self) -> Dict[str, Any]:
return self._dict_config
def _set_config(self, source: Dict[str, Any]):
self._dict_config = source
self.invalidate()
@property
def options(self) -> Dict[str, Any]:
if self._deep:
return super().options
else:
return Configurable.options.fget(self)
def get_options(self, section: str = None) -> Dict[str, str]:
if self._deep:
return super().get_options(section)
else:
conf = self._get_config()
sec = conf.get(section)
if sec is None:
raise ConfigurableError(f'no section: {section}')
return sec
def get_option(self, name: str, section: str = None) -> str:
if self._deep:
return super().get_option(name, section)
else:
return Configurable.get_option(self, name, section)
def has_option(self, name: str, section: str = None) -> bool:
if self._deep:
return super().has_option(name, section)
else:
conf = self._get_config()
sec = conf.get(section)
if sec is not None:
return sec.contains(name)
return False
@property
def sections(self) -> Set[str]:
"""Return the top level keys of the dictionary as sections (see class
doc).
"""
if self._deep:
return super().sections
else:
return set(self._get_config().keys())
@sections.setter
def sections(self, sections: Set[str]):
raise RuntimeError('Can not set sections')
def set_option(self, name: str, value: str, section: str = None):
section = self.default_section if section is None else section
if section not in self.sections:
dct = {}
self._dict_config[section] = dct
else:
dct = self._dict_config[section]
dct[name] = value
def remove_section(self, section: str):
self._get_config().pop(section)
def __repr__(self):
return super().__repr__() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/dictconfig.py | dictconfig.py |
__author__ = 'Paul Landes'
from typing import Set, Dict, List, Union
from abc import ABCMeta, abstractmethod
import logging
import os
from io import TextIOBase, StringIO
from pathlib import Path
from copy import deepcopy
from configparser import ConfigParser, ExtendedInterpolation
from ..persist.domain import Primeable
from . import ConfigurableFileNotFoundError, ConfigurableError, Configurable
logger = logging.getLogger(__name__)
class IniConfig(Configurable, Primeable):
"""Application configuration utility. This reads from a configuration and
returns sets or subsets of options.
"""
def __init__(self, config_file: Union[Path, TextIOBase] = None,
default_section: str = None, use_interpolation: bool = False):
"""Create with a configuration file path.
:param config_file: the configuration file path to read from; if the
type is an instance of :class:`io.TextIOBase`, then
read it as a file object
:param default_section: default section (defaults to `default`)
:param use_interpolation: if ``True``, interpolate variables using
:class:`~configparser.ExtendedInterpolation`
:param robust: if `True`, then don't raise an error when the
configuration file is missing
"""
super().__init__(default_section)
if isinstance(config_file, str):
self.config_file = Path(config_file).expanduser()
else:
self.config_file = config_file
self.use_interpolation = use_interpolation
self.nascent = deepcopy(self.__dict__)
self._cached_sections = {}
self._raw = False
self._conf = None
def _create_config_parser(self) -> ConfigParser:
"Factory method to create the ConfigParser."
if self.use_interpolation:
parser = ConfigParser(interpolation=ExtendedInterpolation())
else:
parser = ConfigParser()
return parser
def _read_config_content(self, cpath: Path, parser: ConfigParser):
if cpath.is_file():
with open(cpath) as f:
parser.read_file(f)
elif cpath.is_dir():
writer = StringIO()
for fpath in cpath.iterdir():
if fpath.is_file():
with open(fpath) as f:
writer.write(f.read())
writer.write('\n')
writer.seek(0)
parser.read_file(writer)
def _create_and_load_parser_from_file(self, cpath: Path,
parser: ConfigParser):
if logger.isEnabledFor(logging.INFO):
logger.info(f'{self.__class__.__name__}: loading config: {cpath}')
if not cpath.exists():
raise ConfigurableFileNotFoundError(cpath)
elif cpath.is_file() or cpath.is_dir():
self._read_config_content(cpath, parser)
else:
raise ConfigurableError(f'Unknown file type: {cpath}')
return parser
def _create_and_load_parser(self, parser: ConfigParser):
if isinstance(self.config_file, (str, Path)):
self._create_and_load_parser_from_file(self.config_file, parser)
elif isinstance(self.config_file, TextIOBase):
writer = self.config_file
writer.seek(0)
parser.read_file(writer)
writer.seek(0)
elif isinstance(self.config_file, Configurable):
is_ini = isinstance(self.config_file, IniConfig)
src: Configurable = self.config_file
sec: str = None
if is_ini:
self.config_file._raw = True
try:
for sec in src.sections:
parser.add_section(sec)
for k, v in src.get_options(sec).items():
parser.set(sec, k, v)
finally:
if is_ini:
self.config_file._raw = False
elif self.config_file is None:
pass
else:
raise ConfigurableError(
f'Unknown create type: {type(self.config_file)}')
@property
def parser(self) -> ConfigParser:
"""Load the configuration file.
"""
if self._conf is None:
parser: ConfigParser = self._create_config_parser()
self._create_and_load_parser(parser)
self._conf = parser
return self._conf
def reload(self):
self._conf = None
def has_option(self, name: str, section: str = None) -> bool:
section = self.default_section if section is None else section
conf = self.parser
if conf.has_section(section):
return conf.has_option(section, name)
else:
return False
def get_options(self, section: str = None) -> Dict[str, str]:
opts = None
section = self.default_section if section is None else section
conf: ConfigParser = self.parser
if conf is None:
if not self.robust:
raise self._raise('No configuration given')
elif conf.has_section(section):
opts = dict(conf.items(section, raw=self._raw))
if opts is None:
self._raise(f"No section: '{section}'")
return opts
def get_option(self, name: str, section: str = None) -> str:
opt = None
section = self.default_section if section is None else section
conf: ConfigParser = self.parser
if conf is None:
if not self.robust:
self._raise('No configuration given')
elif conf.has_option(section, name):
opt = conf.get(section, name, raw=self._raw)
if opt is None:
if not conf.has_section(section):
self._raise(f"No section: '{section}'")
self._raise(f"No option: '{section}:{name}'")
return opt
@property
def sections(self) -> Set[str]:
"""All sections of the INI file.
"""
return frozenset(self.parser.sections() or ())
def _format_option(self, name: str, value: str, section: str) -> str:
try:
value = self.serializer.format_option(value)
except TypeError as e:
raise ConfigurableError(
f'Can not serialize {section}:{name}: {e}') from e
return value
def set_option(self, name: str, value: str, section: str = None):
section = self.default_section if section is None else section
value = self._format_option(name, value, section)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting option {name}: {section}:{value}')
if not self.parser.has_section(section):
self.parser.add_section(section)
try:
self.parser.set(section, name, value)
except Exception as e:
raise ConfigurableError(
f'Cannot set {section}:{name} = {value}: {e}') from e
def remove_section(self, section: str):
self.parser.remove_section(section)
def get_raw_str(self) -> str:
""""Return the contents of the configuration parser with no interpolated
values.
"""
sio = StringIO()
self.parser.write(sio)
return sio.getvalue()
def derive_from_resource(self, path: str, copy_sections=()) -> \
Configurable:
"""Derive a new configuration from the resource file name ``path``.
:param path: a resource file (i.e. ``resources/app.conf``)
:param copy_sections: a list of sections to copy from this to the
derived configuration
"""
kwargs = deepcopy(self.nascent)
kwargs['config_file'] = path
conf = self.__class__(**kwargs)
self.copy_sections(conf, copy_sections)
return conf
def prime(self):
self.parser
def _get_container_desc(self, include_type: bool = True,
max_path_len: int = 3) -> str:
mod = ''
if isinstance(self.config_file, (str, Path)):
parts = self.config_file.parts
path = Path(*parts[max(0, len(parts)-max_path_len):])
tpe = 'f=' if include_type else ''
mod = f'{tpe}{path}'
elif isinstance(self.config_file, Configurable):
tpe = 'c=' if include_type else ''
mod = f'{tpe}[{self.config_file}]'
return mod
def _get_section_short_str(self):
if self._conf is None:
# getting sections invokes parsing, which causes issues if used in
# a debugging statement when we're not yet ready to parse
return ''
secs = tuple(self.parser.sections())
if len(secs) > 0:
return secs[0]
return ''
def _get_short_str(self) -> str:
sec = self._get_section_short_str()
return f'{self.__class__.__name__}({self._get_container_desc()}){{{sec}}}'
class rawconfig(object):
"""Treat all option fetching on ``config`` as raw, or without interpolation.
This is usually used when ``config`` is the target of section copying with
:meth:`.Configuration.copy_sections`,
"""
def __init__(self, config: Configurable):
self.config = config if isinstance(config, IniConfig) else None
def __enter__(self):
if self.config is not None:
self.config._raw = True
def __exit__(self, type, value, traceback):
if self.config is not None:
self.config._raw = False
class ExtendedInterpolationConfig(IniConfig):
"""Configuration class extends using advanced interpolation with
:class:`~configparser.ExtendedInterpolation`.
"""
def __init__(self, *args, **kwargs):
kwargs['use_interpolation'] = True
super().__init__(*args, **kwargs)
class ExtendedInterpolationEnvConfig(ExtendedInterpolationConfig):
"""An :class:`.IniConfig` implementation that creates a section called ``env``
with environment variables passed.
"""
def __init__(self, *args, remove_vars: List[str] = None,
env: dict = None, env_sec: str = 'env', **kwargs):
self.remove_vars = remove_vars
if env is None:
env = {}
for k, v in os.environ.items():
env[k] = v.replace('$', '$$')
self.env = env
else:
self.env = env
self.env_sec = env_sec
super().__init__(*args, **kwargs)
def _munge_default_vars(self, vars):
if vars is not None and self.remove_vars is not None:
for n in self.remove_vars:
if n in vars:
del vars[n]
return vars
def _create_config_parser(self) -> ConfigParser:
parser = super()._create_config_parser()
sec = self.env_sec
parser.add_section(sec)
for k, v in self.env.items():
logger.debug(f'adding env section {sec}: {k} -> {v}')
v = self._format_option(k, v, sec)
parser.set(sec, k, v)
return parser
class CommandLineConfig(IniConfig, metaclass=ABCMeta):
"""A configuration object that allows creation by using command line arguments
as defaults when the configuration file is missing.
Sub classes must implement the ``set_defaults`` method. All defaults set
in this method are then created in the default section of the configuration
when created with the static method ``from_args``, which is called with the
parsed command line arguments (usually from some instance or instance of
subclass :class:`.SimpleActionCli`.
"""
def set_default(self, name: str, value: str, clobber: bool = None):
"""Set a default value in the ``default`` section of the configuration.
"""
if clobber is not None:
self.set_option(name, clobber, self.default_section)
elif name not in self.options and value is not None:
self.set_option(name, value, self.default_section)
@abstractmethod
def set_defaults(self, *args, **kwargs):
pass
@classmethod
def from_args(cls, config=None, *args, **kwargs):
if config is None:
self = cls()
self._conf = self._create_config_parser()
self.parser.add_section(self.default_section)
else:
self = config
self.set_defaults(*args, **kwargs)
return self | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/iniconfig.py | iniconfig.py |
__author__ = 'Paul Landes'
from typing import Union, Any, Iterable, ClassVar, Dict
from abc import ABC, abstractmethod
import sys
import logging
from logging import Logger
import textwrap as tw
from collections import OrderedDict
import itertools as it
from io import TextIOBase, StringIO
from functools import lru_cache
from . import ConfigurationError
@lru_cache(maxsize=50)
def _get_str_space(n_spaces: int) -> str:
return ' ' * n_spaces
class Writable(ABC):
"""An interface for classes that have multi-line debuging capability.
.. document private functions
.. automethod:: _trunc
.. automethod:: _sp
.. automethod:: _set_indent
.. automethod:: _write_line
.. automethod:: _write_block
.. automethod:: _write_wrap
.. automethod:: _write_object
.. automethod:: _write_iterable
.. automethod:: _write_dict
"""
WRITABLE_INDENT_SPACE: ClassVar[int] = 4
"""The default number of spaces to indent each level."""
WRITABLE_MAX_COL: ClassVar[int] = 80
"""The default maximum column size before wrapping text."""
WRITABLE_INCLUDE_INDEX: ClassVar[bool] = False
"""Whether to include index numbers with levels in sequences."""
@classmethod
def _trunc(cls, s: str, max_len: int = None) -> str:
max_len = cls.WRITABLE_MAX_COL if max_len is None else max_len
sl = len(s)
if sl >= max_len:
ml = max_len - 3
s = s[:ml] + '...'
return s
def _get_str_space(self, n_spaces: int) -> int:
return _get_str_space(n_spaces)
def _sp(self, depth: int):
"""Utility method to create a space string.
"""
indent = getattr(self, '_indent', None)
indent = self.WRITABLE_INDENT_SPACE if indent is None else indent
return self._get_str_space(depth * indent)
def _set_indent(self, indent: int = None):
"""Set the indentation for the instance. By default, this value is 4.
:param indent: the value to set as the indent for this instance, or
``None`` to unset it
"""
self._indent = indent
_get_str_space.cache_clear()
def _write_empty(self, writer: TextIOBase, count: int = 1):
"""Write an empty line(s).
:param count: the number of newlines to add
"""
writer.write('\n'.join([''] * (count + 1)))
def _write_line(self, line: str, depth: int, writer: TextIOBase,
max_len: Union[bool, int] = False,
repl_newlines: bool = False):
"""Write a line of text ``line`` with the correct indentation per ``depth`` to
``writer``.
:param max_line: truncate to the given length if an :class:`int` or
:obj:`WRITABLE_MAX_COL` if ``True``
:repl_newlines: whether to replace newlines with spaces
"""
s = f'{self._sp(depth)}{line}'
if repl_newlines:
s = s.replace('\n', ' ')
if max_len is True:
s = self._trunc(s)
elif max_len is False:
pass
elif isinstance(max_len, int):
s = self._trunc(s, max_len)
else:
raise ConfigurationError(
"Parameter 'max_len' must either be a boolean or integer")
writer.write(s)
self._write_empty(writer)
def _write_divider(self, depth: int, writer: TextIOBase, char: str = '_',
width: int = None, header: str = None):
"""Write a text based dividing line (like <hr></hr> in html).
"""
width = self.WRITABLE_MAX_COL if width is None else width
width = width - (depth * self.WRITABLE_INDENT_SPACE)
if header is None:
line = self._sp(depth) + (char * width)
else:
sp = self._sp(depth)
htext = self._trunc(header, width)
bar = ('-' * int((width - len(htext)) / 2))
line = sp + bar + htext + bar
if (len(htext) % 2) != 0:
line += '-'
writer.write(line)
self._write_empty(writer)
def _write_wrap(self, text: str, depth: int, writer: TextIOBase,
width: int = None, **kwargs):
"""Like :meth:`_write_line` but wrap text per ``width``.
:param text: the text to word wrap
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
:param width: the width of the text before wrapping, which defaults to
:obj:`WRITABLE_MAX_COL`
:param kwargs: the keyword arguments given to :meth:`textwarp.wrap`
"""
width = self.WRITABLE_MAX_COL if width is None else width
lines = tw.wrap(text, width=width, **kwargs)
self._write_block(lines, depth, writer)
def _write_block(self, lines: Union[str, Iterable[str]], depth: int,
writer: TextIOBase, limit: int = None):
"""Write a block of text with indentation.
:param limit: the max number of lines in the block to write
"""
add_ellipses = False
sp = self._sp(depth)
if isinstance(lines, str):
lines = lines.split('\n')
if limit is not None:
all_lines = tuple(lines)
if len(all_lines) > limit:
add_ellipses = True
limit -= 1
lines = it.islice(all_lines, limit)
for line in lines:
writer.write(sp)
writer.write(line)
self._write_empty(writer)
if add_ellipses:
writer.write(sp)
writer.write('...')
self._write_empty(writer)
def _write_object(self, obj: Any, depth: int, writer: TextIOBase):
"""Write an object based on the class of the instance.
"""
if isinstance(obj, dict):
self._write_dict(obj, depth, writer)
elif isinstance(obj, (list, tuple, set)):
self._write_iterable(obj, depth, writer)
elif isinstance(obj, _WRITABLE_CLASS):
obj.write(depth, writer)
else:
self._write_line(str(obj), depth, writer)
def _write_key_value(self, k: Any, v: Any, depth: int, writer: TextIOBase):
"""Write a key value pair from a dictionary.
"""
sp = self._sp(depth)
writer.write(f'{sp}{k}: {v}\n')
def _write_iterable(self, data: Iterable[Any], depth: int,
writer: TextIOBase, include_index: bool = None):
"""Write list ``data`` with the correct indentation per ``depth`` to
``writer``.
:param include_index: if ``True``, add an incrementing index for each
element in the output
"""
if include_index is None:
include_index = self.WRITABLE_INCLUDE_INDEX
for i, v in enumerate(data):
if include_index:
self._write_line(f'i: {i}', depth, writer)
self._write_object(v, depth + (1 if include_index else 0), writer)
def _is_container(self, v: Any) -> bool:
"""Return whether or not ``v`` is a container object: ``dict``, ``list``,
``tuple`` or a this class.
"""
return isinstance(v, (dict, list, tuple, _WRITABLE_CLASS))
def _write_dict(self, data: Dict, depth: int, writer: TextIOBase,
inline: bool = False, one_line: bool = False):
"""Write dictionary ``data`` with the correct indentation per ``depth``
to ``writer``.
:param data: the data wto write
:param inline: whether to write values in one line (separate from key)
:param one_line: whether to print all of ``data`` on one line
"""
sp = self._sp(depth)
keys = data.keys()
if not isinstance(data, OrderedDict):
keys = sorted(keys)
if one_line:
kvs: str = ', '.join(map(lambda t: f'{t[0]}={t[1]}', data.items()))
writer.write(f'{sp}{kvs}\n')
else:
for k in keys:
v = data[k]
if not inline and self._is_container(v):
writer.write(f'{sp}{k}:\n')
self._write_object(v, depth + 1, writer)
else:
self._write_key_value(k, v, depth, writer)
@abstractmethod
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
"""Write the contents of this instance to ``writer`` using indention ``depth``.
:param depth: the starting indentation depth
:param writer: the writer to dump the content of this writable
"""
pass
def write_to_log(self, logger: Logger, level: int = logging.INFO,
depth: int = 0, split_lines: bool = True):
"""Just like :meth:`write` but write the content to a log message.
:param logger: the logger to write the message containing content of
this writable
:param level: the logging level given in the :mod:`logging` module
:param depth: the starting indentation depth
:param split_lines: if ``True`` each line is logged separately
"""
sio = StringIO()
self.write(depth, sio)
lines = (sio.getvalue(),)
if split_lines:
lines = lines[0].strip().split('\n')
for line in lines:
logger.log(level, line)
_WRITABLE_CLASS = Writable | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/writable.py | writable.py |
__author__ = 'Paul Landes'
import dataclasses
from typing import Set, Type, Any
import logging
import sys
import collections
from io import TextIOBase
from zensols.config import ClassResolver, Writable
logger = logging.getLogger(__name__)
class ClassExplorer(Writable):
"""A utility class that recursively reports class metadata in an object graph.
"""
ATTR_META_NAME = 'ATTR_EXP_META'
"""The attribute name set on classes to find to report their fields. When the
value of this is set as a class attribute, each of that object instances'
members are pretty printed. The value is a tuple of string attribute
names.
"""
def __init__(self, include_classes: Set[Type],
exclude_classes: Set[Type] = None,
indent: int = 4, attr_truncate_len: int = 80,
include_dicts: bool = False,
include_private: bool = False,
dictify_dataclasses: bool = False):
self.include_classes = include_classes
if exclude_classes is None:
self.exclude_classes = set()
else:
self.exclude_classes = exclude_classes
self.indent = indent
self.attr_truncate_len = attr_truncate_len
self.include_dicts = include_dicts
self.include_private = include_private
self.dictify_dataclasses = dictify_dataclasses
def get_metadata(self, inst: Any) -> dict:
self.visited = set()
try:
include_classes = set(self.include_classes | set([inst.__class__]))
meta = self._get_metadata(
inst, tuple(include_classes), tuple(self.exclude_classes))
finally:
del self.visited
return meta
def _get_dict(self, inst: dict, include_classes: Set[Type],
exclude_classes: Set[Type]) -> dict:
oid = id(inst)
if oid not in self.visited:
children = []
self.visited.add(oid)
for k, v in inst.items():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'desc {k} -> {type(v)}')
v = self._get_metadata(v, include_classes, exclude_classes)
if v is not None:
children.append({'attr': k, 'child': v})
return {'class_name': '<dict>', 'children': children}
def _is_traversable(self, inst: Any, include_classes: Set[Type],
exclude_classes: Set[Type]) -> bool:
return isinstance(inst, include_classes) and \
not isinstance(inst, exclude_classes)
def _get_metadata(self, inst: Any, include_classes: Set[Type],
exclude_classes: Set[Type]) -> dict:
oid = id(inst)
if oid in self.visited:
return None
self.visited.add(oid)
dat = None
if self.include_dicts and isinstance(inst, dict):
dat = self._get_dict(inst, include_classes, exclude_classes)
elif self._is_traversable(inst, include_classes, exclude_classes):
dat = collections.OrderedDict()
cls = inst.__class__
class_name = ClassResolver.full_classname(cls)
children = []
dat['class_name'] = class_name
is_dataclass = self.dictify_dataclasses and \
dataclasses.is_dataclass(inst)
has_attr_meta = hasattr(cls, self.ATTR_META_NAME)
if hasattr(inst, 'name'):
dat['name'] = getattr(inst, 'name')
if has_attr_meta or is_dataclass:
attrs = {}
dat['attrs'] = attrs
if not has_attr_meta and is_dataclass:
try:
attr_names = dataclasses.asdict(inst)
except Exception as e:
logger.info(
f'can not get attr names for {type(inst)}: {e}')
attr_names = ()
elif has_attr_meta:
attr_names = getattr(cls, self.ATTR_META_NAME)
# TODO: skip attributes that will or have already been
# traversed as a "traversable" object on a recursion
for attr in attr_names:
v = getattr(inst, attr)
if isinstance(v, dict):
v = self._get_dict(v, include_classes, exclude_classes)
if v is not None:
children.append({'attr': attr, 'child': v})
else:
attrs[attr] = v
for attr in inst.__dir__():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'desc meta: {type(inst)}.{attr}')
if self.include_private or not attr.startswith('_'):
try:
child_inst = getattr(inst, attr)
except Exception as e:
msg = f'error: can not traverse attribute {attr}: {e}'
logger.info(msg)
child_inst = msg
if isinstance(child_inst, dict):
child = self._get_dict(
child_inst, include_classes, exclude_classes)
else:
child = self._get_metadata(
child_inst, include_classes, exclude_classes)
if child is not None:
children.append({'attr': attr, 'child': child})
if len(children) > 0:
dat['children'] = children
return dat
def write(self, inst: Any, depth: int = 0,
writer: TextIOBase = sys.stdout):
meta = self.get_metadata(inst)
self._write(meta, depth, None, writer)
def write_metadata(self, depth: int = 0,
writer: TextIOBase = sys.stdout,
metadata: dict = None):
self._write(metadata, depth, None, writer)
def _write(self, metadata: dict, depth: int, attr: str, writer):
cn = f'{attr}: ' if attr is not None else ''
name = f" ({metadata['name']})" if 'name' in metadata else ''
sp = self._sp(depth)
sp2 = self._sp(depth + 1)
writer.write(f"{sp}{cn}{metadata['class_name']}{name}\n")
if 'attrs' in metadata:
for k, v in metadata['attrs'].items():
v = self._trunc(str(v), max_len=self.attr_truncate_len)
writer.write(f'{sp2}{k}: {v}\n')
if 'children' in metadata:
for c in metadata['children']:
self._write(c['child'], depth + 1, c['attr'], writer) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/meta.py | meta.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Dict, Set, Iterable, List, Any, Union, Optional, Type, Tuple
from abc import ABCMeta, abstractmethod
import sys
import logging
from collections import OrderedDict
import inspect
from pathlib import Path
from io import TextIOBase
from . import ConfigurationError, Serializer, Dictable, Settings
logger = logging.getLogger(__name__)
class ConfigurableError(ConfigurationError):
"""Base class raised for any configuration based errors."""
pass
class ConfigurableFileNotFoundError(ConfigurableError):
"""Raised when a configuration file is not found for those file based
instances of :class:`.Configurable`.
"""
def __init__(self, path: Path, source: Union[Path, Any] = None):
msg = f"No such file: '{path}'"
if isinstance(source, Path):
msg += f' loading from {source}'
super().__init__(msg)
self.path = path
self.source = source
class Configurable(Dictable, metaclass=ABCMeta):
"""An abstract base class that represents an application specific
configuration.
Note that many of the getters are implemented in ``configparser``.
However, they are reimplemented here for consistency among parser.
"""
def __init__(self, default_section: str = None):
"""Initialize.
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`;
which defaults to ``defualt``
"""
if default_section is None:
self.default_section = 'default'
else:
self.default_section = default_section
self.serializer = self._create_serializer()
def _create_serializer(self) -> Serializer:
return Serializer()
@abstractmethod
def get_options(self, section: str = None) -> Dict[str, str]:
"""Get all options for a section. If ``opt_keys`` is given return only
options with those keys.
:param section: section in the ini file to fetch the value; defaults to
constructor's ``default_section``
"""
pass
@abstractmethod
def has_option(self, name: str, section: str = None) -> bool:
pass
def get_option(self, name: str, section: str = None) -> str:
"""Return an option from ``section`` with ``name``.
:param section: section in the ini file to fetch the value; defaults to
constructor's ``default_section``
:param vars: contains the defaults for missing values of ``name``
"""
val = None
opts = self.get_options(section or self.default_section)
if opts is not None:
val = opts.get(name)
if val is None:
raise ConfigurableError(
f"No option '{name}' found in section: {section}")
return val
def reload(self):
"""Reload the configuration from the backing store.
"""
pass
def get_option_list(self, name: str, section: str = None) -> List[str]:
"""Just like :meth:`get_option` but parse as a list using ``split``.
:param section: section in the ini file to fetch the value; defaults to
constructor's ``default_section``
"""
val = self.get_option(name, section)
return self.serializer.parse_list(val)
def get_option_boolean(self, name: str, section: str = None) -> bool:
"""Just like :meth:`get_option` but parse as a boolean (any case `true`).
:param section: section in the ini file to fetch the value; defaults to
constructor's ``default_section``
:param vars: contains the defaults for missing values of ``name``
"""
val = self.get_option(name, section)
val = val.lower() if val else 'false'
return val == 'true'
def get_option_int(self, name: str, section: str = None):
"""Just like :meth:`get_option` but parse as an integer.
:param section: section in the ini file to fetch the value; defaults to
constructor's ``default_section``
"""
val = self.get_option(name, section)
if val:
return int(val)
def get_option_float(self, name: str, section: str = None):
"""Just like :meth:`get_option` but parse as a float.
"""
val = self.get_option(name, section)
if val:
return float(val)
def get_option_path(self, name: str, section: str = None):
"""Just like :meth:`get_option` but return a ``pathlib.Path`` object of
the string.
"""
val = self.get_option(name, section)
path = None
if val is not None:
path = Path(val)
return path
def get_option_object(self, name: str, section: str = None):
"""Just like :meth:`get_option` but parse as an object per object syntax
rules.
:see: :meth:`.Serializer.parse_object`
"""
val = self.get_option(name, section)
if val:
return self.serializer.parse_object(val)
@property
def options(self) -> Dict[str, Any]:
"""All options from the default section.
"""
return self.get_options()
def populate(self, obj: Any = None, section: str = None,
parse_types: bool = True) -> Union[dict, Settings]:
"""Set attributes in ``obj`` with ``setattr`` from the all values in
``section``.
"""
section = self.default_section if section is None else section
sec = self.get_options(section)
if sec is None:
# needed for the YamlConfig class
raise ConfigurableError(
f"No section from which to populate: '{section}'")
return self.serializer.populate_state(sec, obj, parse_types)
def __getitem__(self, section: str = None) -> Settings:
return self.populate(section=section)
@property
def sections(self) -> Set[str]:
"""All sections of the configuration file.
"""
return frozenset()
def set_option(self, name: str, value: str, section: str = None):
"""Set an option on this configurable.
:param name: the name of the option
:param value: the value to set
:param section: the section (if applies) to add the option
:raises NotImplementedError: if this class does not support this
operation
"""
raise NotImplementedError()
def copy_sections(self, to_populate: Configurable,
sections: Iterable[str] = None,
robust: bool = False) -> Exception:
"""Copy all sections from this configuruable to ``to_populate``.
:param to_populate: the target configuration object
:param sections: the sections to populate or ``None`` to copy allow
:param robust: if ``True``, when any exception occurs (namely
interplation exceptions), don't copy and remove the
section in the target configuraiton
:return: the last exception that occured while trying to copy the
properties
"""
last_ex = None
if sections is None:
sections = self.sections
for sec in sections:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'copying section {sec}')
try:
opts: Dict[str, Any] = self.get_options(sec)
if opts is None:
raise ConfigurableError(f"No such section: '{sec}'")
for k, v in opts.items():
to_populate.set_option(k, v, sec)
# robust is needed by lib.ConfigurationImporter._load(); but deal
# only with interpolation errors
except ConfigurableError as e:
raise e
except Exception as e:
if not robust:
raise e
else:
to_populate.remove_section(sec)
last_ex = e
return last_ex
def remove_section(self, section: str):
"""Remove a seciton with the given name."""
raise NotImplementedError()
def merge(self, to_populate: Configurable):
"""Copy all data from this configuruable to ``to_populate``, and clobber
any overlapping properties in the process.
:param to_populate: the target configuration object
"""
to_populate.copy_sections(self, to_populate.sections)
def _get_calling_module(self, depth: int = 0):
"""Get the last module in the call stack that is not this module or
``None`` if the call originated from this module.
"""
for frame in inspect.stack():
mod = inspect.getmodule(frame[depth])
logger.debug(f'calling module: {mod}')
if mod is not None:
mod_name = mod.__name__
if mod_name != __name__:
return mod
def resource_filename(self, resource_name: str, module_name: str = None):
"""Return a resource based on a file name. This uses the
``pkg_resources`` package first to find the resources. If it doesn't
find it, it returns a path on the file system.
:param: resource_name the file name of the resource to obtain (or name
if obtained from an installed module)
:param module_name: the name of the module to obtain the data, which
defaults to ``__name__``
:return: a path on the file system or resource of the installed module
"""
if module_name is None:
mod = self._get_calling_module()
logger.debug(f'calling module: {mod}')
if mod is not None:
module_name = mod.__name__
return self.serializer.resource_filename(resource_name, module_name)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
for sec in sorted(self.sections):
self._write_line(sec, depth, writer)
opts: Dict[str, str] = self.get_options(sec)
if opts is None:
raise ConfigurationError(f'No such section: {sec}')
if not isinstance(opts, dict):
raise ConfigurationError(
f"Expecting dict but got {type(opts)} in section '{sec}'")
for k in sorted(opts.keys()):
v = opts[k]
self._write_line(f'{k}: {v}', depth + 1, writer)
def asdict(self, *args, **kwargs) -> Dict[str, Any]:
secs = OrderedDict()
for sec in sorted(self.sections):
svs = OrderedDict()
secs[sec] = svs
opts = self.get_options(sec)
for k in sorted(opts.keys()):
svs[k] = opts[k]
return secs
def as_deep_dict(self) -> Dict[str, Any]:
"""Return a deep :class:`builtins.dict` with the top level with section
names as keys and deep (i.e. ``json:``) values as nested dictionaries.
"""
secs = OrderedDict()
for sec in sorted(self.sections):
svs = OrderedDict()
secs[sec] = svs
self.populate(svs, sec)
return secs
def as_one_tier_dict(self, *args, **kwargs) -> Dict[str, Any]:
"""Return a flat one-tier :class:`builtins.dict` with keys in
``<section>:<option>`` format.
"""
flat: Dict[str, Any] = {}
for sec, opts in self.asdict(False).items():
for k, v in opts.items():
flat[f'{sec}:{k}'] = v
return flat
def _get_section_short_str(self):
try:
return next(iter(self.sections))
except StopIteration:
return ''
def _get_short_str(self) -> str:
sec = self._get_section_short_str()
return f'{self.__class__.__name__}{{{sec}}}'
def _get_container_desc(self, include_type: bool = True,
max_path_len: int = 3) -> str:
return self.__class__.__name__
def _raise(self, msg: str, err: Exception = None):
config_file: Optional[Union[Path, str]] = None
if hasattr(self, 'config_file'):
config_file = self.config_file
if isinstance(config_file, str):
msg = f'{msg} in file {config_file}'
elif isinstance(config_file, Path):
msg = f'{msg} in file {config_file.absolute()}'
else:
msg = f'{msg} in {self._get_container_desc()}'
if err is None:
raise ConfigurableError(msg)
else:
raise ConfigurableError(msg) from err
def __str__(self):
return f'<{self._get_short_str()}>'
def __repr__(self):
return self.__str__()
class TreeConfigurable(Configurable, metaclass=ABCMeta):
"""A hierarchical configuration. The sections are the root nodes, but each
section's values can be nested :class:`~builtins.dict` instances. These
values are traversable with a string dot path notation.
"""
def __init__(self, default_section: str = None,
default_vars: Dict[str, Any] = None,
sections_name: str = 'sections',
sections: Set[str] = None):
"""Initialize.
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`;
which defaults to ``defualt``
:param default_vars: used in place of missing variables duing value
interpolation; **deprecated**: this will go away in
a future release
:param sections_name: the dot notated path to the variable that has a
list of sections
:param sections: used as the set of sections for this instance
"""
super().__init__(default_section=default_section)
self.sections_name = sections_name
self.default_vars = default_vars if default_vars else {}
self._sections = sections
self._options = None
@abstractmethod
def _get_config(self) -> Dict[str, Any]:
pass
@abstractmethod
def _set_config(self, source: Dict[str, Any]):
pass
@property
def config(self) -> Dict[str, Any]:
"""The configuration as a nested set of :class:`~builtins.dict`.
:see: :meth:`invalidate`
"""
return self._get_config()
@config.setter
def config(self, source: Dict[str, Any]):
"""The configuration as a nested set of :class:`~builtins.dict`.
:see: :meth:`invalidate`
"""
self._set_config(source)
@property
def root(self) -> Optional[str]:
"""The root name of the configuration file, if one exists. If more than
one root exists, return the first.
"""
if not hasattr(self, '_root'):
root_keys: Iterable[str] = self.config.keys()
if len(root_keys) > 0:
self._root = next(iter(root_keys))
else:
self._root = None
return self._root
@classmethod
def _is_primitive(cls, obj) -> bool:
return isinstance(obj, (float, int, bool, str, set,
list, tuple, Type, Path))
def _flatten(self, context: Dict[str, Any], path: str,
n: Dict[str, Any], sep: str = '.'):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'path: {path}, n: <{n}>, context: <{context}>')
if n is None:
context[path] = None
elif self._is_primitive(n):
context[path] = n
elif isinstance(n, (dict, Settings)):
for k, v in n.items():
k = path + sep + k if len(path) else k
self._flatten(context, k, v, sep)
else:
self._raise(f'Unknown yaml type {type(n)}: {n}')
def invalidate(self):
"""This should be called when the underlying :obj:`config` object graph
changes *under the nose* of this instance.
"""
context = {}
context.update(self.default_vars)
self._flatten(context, '', self.config)
self._all_keys = set(context.keys())
self._sections = None
self._options = None
if hasattr(self, '_root'):
del self._root
def _find_node(self, n: Union[Dict, Any], path: str, name: str):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'search: n={n}, path={path}, name={name}')
if path == name:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found: <{n}>')
return n
elif isinstance(n, dict):
for k, v in n.items():
k = path + '.' + k if len(path) else k
v = self._find_node(v, k, name)
if v is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found {name} -> {v}')
return v
if logger.isEnabledFor(logging.DEBUG):
logger.debug('not found: {}'.format(name))
def get_tree(self, name: Optional[str] = None) -> Dict[str, Any]:
"""Get the node in the configuration, which is a nested set
:class:`~builtins.dict` instances as an object graph.
:param name: the doted notation indicating which node in the tree to
retrieve
"""
if name is None:
return self.config
return self._find_node(self.config, '', name)
def _get_option(self, name: str) -> str:
node = self.get_tree(name)
if self._is_primitive(node):
return node
elif self.default_vars is not None and name in self.default_vars:
return self.default_vars[name]
elif node is None:
# values in YAML can be set to ``null``
return None
else:
self._raise(f'Unknown type or state: {name} ({type(node)})')
@property
def options(self) -> Dict[str, Any]:
if self._options is None:
self.config
self._options = {}
for k in self._all_keys:
self._options[k] = self._get_option(k)
return self._options
def has_option(self, name: str, section: str = None) -> bool:
opts = self.options
return name in opts
def get_option(self, name: str, section: str = None) -> str:
"""Return an option using a dot encoded path.
:param section: ignored
"""
if self.default_vars is not None and name in self.default_vars:
return self.default_vars[name]
else:
ops = self.options
if name in ops:
return ops[name]
else:
self._raise(f'No such option: {name}')
def get_options(self, name: str = None) -> Dict[str, Any]:
name = self.default_section if name is None else name
if self.default_vars is not None and name in self.default_vars:
return self.default_vars[name]
else:
node = self.get_tree(name)
if not isinstance(node, str) or isinstance(node, list):
return node
elif name in self.default_vars:
return self.default_vars[name]
else:
self._raise(f'No such option: {name}')
def _get_at_depth(self, node: Any, s_level: int, level: int,
path: List[str]) -> Set[str]:
def map_node(x: Tuple[str, Any]) -> str:
k, v = x
if isinstance(v, dict):
if len(path) > 0:
k = '.'.join(path) + '.' + k
else:
k = None
return k
nodes: Set[str] = set()
if isinstance(node, dict):
if level < s_level:
for k, child in node.items():
path.append(k)
ns = self._get_at_depth(child, s_level, level + 1, path)
path.pop()
nodes.update(ns)
elif level == s_level:
return set(filter(lambda x: x is not None,
map(map_node, node.items())))
return nodes
def _find_sections(self) -> Set[str]:
secs: Set[str]
sec_key = f'{self.root}.{self.sections_name}'
if self.has_option(sec_key):
secs: Dict[str, Any] = self.get_tree(sec_key)
if isinstance(secs, str):
secs = self.get_option_list(sec_key)
elif isinstance(secs, int):
secs = self._get_at_depth(self.get_tree(None), secs, 0, [])
secs = frozenset(secs)
else:
secs = self._get_at_depth(self.get_tree(None), 0, 0, [])
return secs
@property
def sections(self) -> Set[str]:
"""The sections by finding the :obj:`section_name` based from the
:obj:`root`.
"""
if self._sections is None:
self._sections = self._find_sections()
return self._sections
@sections.setter
def sections(self, sections: Set[str]):
self._sections = sections
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_dict(self.config, depth, writer)
def asdict(self, *args, **kwargs) -> Dict[str, Any]:
return self.config | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/configbase.py | configbase.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, Optional, List, Union
from dataclasses import dataclass, field
import logging
import re
from zensols.persist import persisted
from . import ImportYamlConfig, Serializer
logger = logging.getLogger(__name__)
@dataclass
class _Condition(object):
"""Contains data needed to branch at the level of node replacement.
"""
serializer: Serializer = field(repr=False)
name: str
ifn: Any
thn: Dict[str, Any]
eln: Dict[str, Any]
parent: Dict[str, Any] = field(default=None, repr=False)
@property
@persisted('_child')
def child(self) -> Dict[str, Any]:
truthy = self.ifn
if isinstance(truthy, str):
truthy = self.serializer.parse_object(truthy)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'truth value: {self.ifn} ({self.ifn}) -> '
f'{truthy} ({truthy})')
if truthy:
node = None if self.thn is None else self.thn
else:
node = None if self.eln is None else self.eln
return node[self.name]
class ConditionalYamlConfig(ImportYamlConfig):
"""Conditionally includes configuration based on very basic if/then logic.
YAML nodes (defaulting to name ``condition``) are replaced with a single
node either under a ``then`` node or an ``else`` node.
For the ``then`` node to be used, the ``if`` value must evaluate to
something that evaluates to true in Python. For this reason, it is
recommended to use boolean constants or ``eval:`` syntax.
For example::
condition:
if: ${default:testvar}
then:
classify_net_settings:
embedding_layer: 'glove_50_embedding_layer'
recurrent_settings: 'recurrent_settings'
else:
classify_net_settings:
embedding_layer: 'transformer_embedding_layer'
recurrent_settings: 'recurrent_settings'
"""
_CONDITION_REGEX = re.compile(r'^(?:[a-zA-Z0-9-_.]+)?condition$')
_IF_NODE = 'if'
_THEN_NODE = 'then'
_ELSE_NODE = 'else'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._evals: Dict[str, Dict[str, Any]] = {}
def _find_node(self, n: Union[Dict, Any], path: str, name: str):
if isinstance(n, _Condition):
node = n.child
else:
node = super()._find_node(n, path, name)
return node
def _eval_tree(self, node: Dict[str, Any]):
repls = {}
dels = set()
for cn, cv in node.items():
if self._CONDITION_REGEX.match(cn) is not None:
dels.add(cn)
if isinstance(cv, _Condition):
cond = cv
repls[cond.name] = cond.child
elif isinstance(cv, dict):
self._eval_tree(cv)
node.update(repls)
for n in dels:
del node[n]
def get_tree(self, name: Optional[str] = None) -> Dict[str, Any]:
node = self._evals.get(name)
if node is None:
node: Union[_Condition, Dict[str, Any]] = super().get_tree(name)
if isinstance(node, dict):
# we have to conditionally update recursively when a client
# descends this tree without get_tree('(grand)child')
self._eval_tree(node)
self._evals[name] = node
return node
def _create_condition(self, node: Dict[str, Any]) -> _Condition:
ifn = node.get(self._IF_NODE)
thn = node.get(self._THEN_NODE)
eln = node.get(self._ELSE_NODE)
if ifn is None:
self._raise(
f"Missing '{self._IF_NODE}' in condition: {node}")
if thn is None and eln is None:
self._raise(
f"Either '{self._THEN_NODE}' or " +
f"'{self._ELSE_NODE}' must follow an if in: {node}")
for cn, name in ((thn, self._IF_NODE), (eln, self._ELSE_NODE)):
if cn is not None and len(cn) > 1:
self._raise(
'Conditionals can have only one child, ' +
f"but got {len(cn)}: {cn}'")
thn_k = None if thn is None else next(iter(thn.keys()))
eln_k = None if eln is None else next(iter(eln.keys()))
if thn_k is not None and eln_k is not None and thn_k != eln_k:
self._raise(
f"Conditionals must have the same child root, got: '{node}'")
return _Condition(self.serializer, thn_k or eln_k, ifn, thn, eln, node)
def _map_conditions(self, par: Dict[str, Any], path: List[str]):
add_conds = {}
for cn, cv in par.items():
if isinstance(cv, dict):
if self._CONDITION_REGEX.match(cn) is not None:
cond = self._create_condition(cv)
if cond.name in add_conds:
self._raise(f'Duplicate cond: {cond}')
add_conds[cond.name] = cond
else:
path.append(cn)
self._map_conditions(cv, path)
path.pop()
par.update(add_conds)
def _compile(self) -> Dict[str, Any]:
root = super()._compile()
self._map_conditions(self._config, [])
return root | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/condyaml.py | condyaml.py |
__author__ = 'Paul Landes'
from typing import Dict, Tuple, Set, Any, Union
import logging
from pathlib import Path
from io import TextIOBase, StringIO
import yaml
from zensols.config import ConfigurableFileNotFoundError, TreeConfigurable
logger = logging.getLogger(__name__)
class YamlConfig(TreeConfigurable):
"""Just like :class:`.IniConfig` but parse configuration from YAML files.
Variable substitution works just like ini files, but you can set what
delimiter to use and keys are the paths of the data in the hierarchy
separated by dots.
See the test cases for examples.
"""
CLASS_VER = 0
def __init__(self, config_file: Union[str, Path, TextIOBase] = None,
default_section: str = None,
default_vars: Dict[str, Any] = None, delimiter: str = '$',
sections_name: str = 'sections', sections: Set[str] = None):
"""Initialize this instance. When sections are not set, and the sections are
not given in configuration file at location :obj:`sections_name` the
root is made a singleton section.
:param config_file: the configuration file path to read from; if the
type is an instance of :class:`io.TextIOBase`, then
read it as a file object
:param default_vars: used in place of missing variables duing value
interpolation; **deprecated**: this will go away in
a future release
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`;
which defaults to ``defualt``
:param delimiter: the delimiter used for template replacement with dot
syntax, or ``None`` for no template replacement
:param sections_name: the dot notated path to the variable that has a
list of sections
:param sections: used as the set of sections for this instance
"""
if isinstance(config_file, str):
self.config_file = Path(config_file)
else:
self.config_file = config_file
self.delimiter = delimiter
self._config = None
super().__init__(default_section=default_section,
default_vars=default_vars,
sections_name=sections_name,
sections=sections)
def _parse(self) -> Tuple[str, Dict[str, str], Dict[str, str]]:
if logger.isEnabledFor(logging.INFO):
logger.info(f'parsing: {self.config_file}')
cfile = self.config_file
if isinstance(cfile, Path) and not cfile.is_file():
raise ConfigurableFileNotFoundError(cfile)
elif isinstance(cfile, TextIOBase):
content = cfile.read()
self.config_file = StringIO(content)
else:
with open(cfile) as f:
content = f.read()
struct = yaml.load(content, yaml.FullLoader)
# struct is None is the file was empty
if struct is None:
struct = {}
context = {}
context.update(self.default_vars)
self._flatten(context, '', struct)
self._all_keys = set(context.keys())
return content, struct, context
def _make_class(self) -> type:
class_name = 'YamlTemplate{}'.format(self.CLASS_VER)
self.CLASS_VER += 1
# note we have to give the option of different delimiters since the
# default '$$' (use case=OS env vars) is always resolved to '$' given
# the iterative variable substitution method
#
# Yes, this really is necessary. From the string.Template
# documentation: Advanced usage: you can derive subclasses of Template
# to customize the placeholder syntax, delimiter character, or the
# entire regular expression used to parse template strings. To do this,
# you can override these class attributes:
code = """\
from string import Template
class """ + class_name + """(Template):
idpattern = r'[a-z][_a-z0-9.]*'
delimiter = '""" + self.delimiter + '\''
exec(code)
cls = eval(class_name)
return cls
def _compile(self) -> Dict[str, Any]:
content, struct, context = self._parse()
prev = None
if self.delimiter is not None:
cls = self._make_class()
while prev != content:
prev = content
# TODO: raise here for missing keys embedded in the file rather
# than KeyError
content = cls(content).substitute(context)
conf: Dict[str, Any] = yaml.load(content, yaml.FullLoader)
if conf is None:
conf = {}
return conf
def _get_config(self) -> Dict[str, Any]:
if self._config is None:
self._config = self._compile()
return self._config
def _set_config(self, source: Dict[str, Any]):
self._config = source
super().invalidate()
self.config_file = None
self._get_config() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/yaml.py | yaml.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Dict, Any, Type, Union, ClassVar
from dataclasses import dataclass, field
import sys
import logging
from pathlib import Path
from zensols.introspect import ClassImporter
from zensols.persist import persisted
from . import Configurable, IniConfig, DictionaryConfig
logger = logging.getLogger(__name__)
@dataclass
class ConfigurableFactory(object):
"""Create instances of :class:`.Configurable` with factory methods. The
parameters in :obj:`kwargs` given to the initalizer on instantiation.
This class often is used to create a factory from just a path, which then
uses the extension with the :obj:`EXTENSION_TO_TYPE` mapping to select the
class. Top level/entry point configuration should use ``conf`` as the
extension allowing the :class:`.ImportIni` to import other configuration.
An example of this is the :class:`.ConfigurationImporter` loading user
specific configuration.
If the class uses type type ``import``, the type is prepended with
``import`` and then mapped using :obj:`EXTENSION_TO_TYPE`. This allows
mixing of different files in one ``config_files`` entry and avoids multiple
import sections.
:see: `.ImportIniConfig`
"""
EXTENSION_TO_TYPE: ClassVar[Dict[str, str]] = {
'conf': 'ini',
'ini': 'ini',
'yml': 'yaml',
'json': 'json'}
"""The configuration factory extension to clas name."""
TYPE_TO_CLASS_PREFIX: ClassVar[Dict[str, str]] = {
'import': 'ImportIni',
'importini': 'ImportIni',
'importyaml': 'ImportYaml',
'condyaml': 'ConditionalYaml'}
"""Mapping from :obj:`TYPE_NAME` option to class prefix."""
TYPE_NAME: ClassVar[str] = 'type'
"""The section entry for the configurable type (eg ``ini`` vs ``yaml``)."""
SINGLE_CONFIG_FILE: ClassVar[str] = 'config_file'
"""The section entry for the configuration file."""
CLASS_NAME: ClassVar[str] = 'class_name'
"""The section entry for the class to use."""
kwargs: Dict[str, Any] = field(default_factory=dict)
"""The keyword arguments given to the factory on creation."""
type_map: Dict[str, str] = field(default_factory=dict)
"""Adds more mappings from extension to configuration factory types.
:see: :obj:`EXTENSION_TO_TYPE`
"""
def _mod_name(self) -> str:
"""Return the ``config`` (parent) module name."""
mname = sys.modules[__name__].__name__
parts = mname.split('.')
if len(parts) > 1:
mname = '.'.join(parts[:-1])
return mname
@property
@persisted('_extension_to_type')
def extension_to_type(self) -> Dict[str, str]:
ext = dict(self.EXTENSION_TO_TYPE)
ext.update(self.type_map)
return ext
def from_class_name(self, class_name: str) -> Configurable:
"""Create a configurable from the class name given.
:param class_name: a fully qualified class name
(i.e. ``zensols.config.IniConfig``)
:return: a new instance of a configurable identified by ``class_name``
and created with :obj:`kwargs`
"""
return ClassImporter(class_name, False).instance(**self.kwargs)
def from_type(self, config_type: str) -> Configurable:
"""Create a configurable from the configuration type.
:param config_type: one of the values in :obj:`EXTENSION_TO_TYPE`
(i.e. `importini`)
:return: a new instance of a configurable identified by ``class_name``
and created with :obj:`kwargs`
"""
mod_name: str = self._mod_name()
extension_to_type = self.extension_to_type
if config_type in extension_to_type:
config_type = extension_to_type[config_type].capitalize()
elif config_type in self.TYPE_TO_CLASS_PREFIX:
config_type = self.TYPE_TO_CLASS_PREFIX[config_type]
else:
config_type = config_type.capitalize()
class_name = f'{mod_name}.{config_type}Config'
return self.from_class_name(class_name)
def _path_to_type(self, path: Path) -> str:
"""Map a path to a ``config type``. See :meth:`from_type`.
"""
ext = path.suffix
ext = None if len(ext) == 0 else ext[1:]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"using extension to map: '{ext}'")
class_type = self.extension_to_type.get(ext)
if class_type is None:
class_type = 'importini'
return class_type
def from_path(self, path: Path) -> Configurable:
"""Create a configurable from a path. This updates the :obj:`kwargs` to set
``config_file`` to the given path for the duration of this method.
"""
if path.is_dir():
inst = IniConfig(path, **self.kwargs)
else:
class_type = self._path_to_type(path)
old_kwargs = self.kwargs
self.kwargs = dict(self.kwargs)
self.kwargs[self.SINGLE_CONFIG_FILE] = path
try:
inst = self.from_type(class_type)
finally:
self.kwargs = old_kwargs
return inst
@classmethod
def from_section(cls: Type[ConfigurableFactory], kwargs: Dict[str, Any],
section: str) -> Configurable:
params = dict(kwargs)
class_name: str = params.get(cls.CLASS_NAME)
type_map: Dict[str, str] = params.pop('type_map', {})
self: ConfigurableFactory = cls(
**{'type_map': type_map, 'kwargs': params})
tpe: str = params.get(self.TYPE_NAME)
config_file: Union[str, Dict[str, str]] = params.get(
self.SINGLE_CONFIG_FILE)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'class: {class_name}, type: {tpe}, ' +
f'config: {config_file}, params: {params}')
config: Configurable
if class_name is not None:
del params[self.CLASS_NAME]
config = self.from_class_name(class_name)
elif isinstance(config_file, dict):
config = DictionaryConfig(config_file)
elif tpe is not None:
del params[self.TYPE_NAME]
if tpe == 'import' and config_file is not None:
ext = Path(config_file).suffix[1:]
etype = self.extension_to_type.get(ext)
if etype is not None:
tpe = f'import{etype}'
config = self.from_type(tpe)
elif config_file is not None:
del params[self.SINGLE_CONFIG_FILE]
config = self.from_path(Path(config_file))
else:
self._raise(f"No loader information for '{section}': {params}")
if logger.isEnabledFor(logging.INFO):
logger.info(f'created config: {config}')
return config | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/configfac.py | configfac.py |
__author__ = 'Paul Landes'
from typing import Set, Any, Dict, ClassVar
from dataclasses import dataclass, field
import logging
from collections import OrderedDict
from . import (
ConfigFactory,
FactoryState,
FactoryStateObserver,
Dictable,
)
logger = logging.getLogger(__name__)
@dataclass
class Writeback(FactoryStateObserver, Dictable):
"""Subclass for classes that want to write attribute changes back to a
:class:`.Configurable`. This uses an observer pattern that write updates
back to the configuration.
When an attribute is set on an instance of this class, it is first set
using the normal Python attribute setting. After that, based on a set of
criteria, the attribute and value are set on the backing configuration
``config``. The value is clobbered with a string version based on the
``config``'s :class:`.Serializer` instance (either a primitive value string
or JSON string).
**Implementation Note:** During initialization, the :meth:`__setattr__`
is called by the Python interpreter, and before the instance is completely
being populated.
**Important:** The :meth:`_is_settable` implementation checks for a
state (any such as ``CREATED``) to be set on the instance. To change
this behavior, you will need to overide this method.
"""
DEFAULT_SKIP_ATTRIBUTES: ClassVar[Set[str]] = set(
[ConfigFactory.NAME_ATTRIBUTE,
ConfigFactory.CONFIG_ATTRIBUTE,
ConfigFactory.CONFIG_FACTORY_ATTRIBUTE])
"""Default set of attributes to skip when writing back to the configuration
on attribute sets.
"""
name: str = field()
"""The name of the section given in the configuration.
"""
config_factory: ConfigFactory = field(repr=False)
"""The configuration factory that created this instance and used for
serialization functions.
"""
def _notify_state(self, state: FactoryState):
"""Called to update the object of a new state. This is currently only called
by instances of :class:`.ConfigFactory`.
This is useful when overridding :meth:`is_settable` to disallow
setting of the configuraiton while the object is being initialized (see
class docs).
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'notify state: {state}')
self._state = state
def _is_created(self) -> bool:
return hasattr(self, '_state')
def _get_skip_attributes(self) -> Set[str]:
"""Return a set of attributes to not update based on attribute name.
"""
return self.DEFAULT_SKIP_ATTRIBUTES
def _get_keep_attribute(self) -> Set[str]:
"""Return a list of attribute names to allow update. This is an exclusive
list, so those not in this set are not updated. If ``None``, always
update all.
"""
return None
def _is_allowed_type(self, value: Any) -> bool:
"""Return whether or not to allow updating of the type of value. This
implementation is delegated to the :class:`Serializer` instance in the
backing ``config``.
"""
return self.config_factory.config.serializer.is_allowed_type(value)
def _allow_config_adds(self) -> bool:
"""Whether or not to allow new entries to be made in the configuration if they
do not already exist.
"""
return False
def _is_settable(self, name: str, value: Any) -> bool:
"""Return whether or not to allow setting attribute ``name`` with ``value`` on
the current instance. This also checks to make sure this instance has
completed initialization by check for the existance of the :obj:`state`
attribute set in :meth:`_notify_state`.
:param name: the name of the attribute
:param value: the Python object value to be set on the configuration
"""
keeps = self._get_keep_attribute()
is_created = self._is_created()
is_skip = keeps is None or name in keeps
is_skip = is_skip and name in self._get_skip_attributes()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{name}: is created: {is_created}, skip: {is_skip}')
return is_created and not is_skip and self._is_allowed_type(value)
def _set_option(self, name: str, value: Any):
"""Called by :meth:`__setattr__` to set the value on the backing ``config``.
The backing ``config`` handles the string serialization.
:param name: the name of the attribute
:param value: the Python object value to be set on the configuration
"""
config = self.config_factory.config
has_option = config.has_option(name, section=self.name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'set option {self.name}:{name} ' +
f'{value}: {has_option}')
if self._allow_config_adds() or has_option:
config.set_option(name, value, section=self.name)
def _attribute_to_object(self, name: Any, value: Any) -> Any:
svalue = value
obj = value
is_created = self._is_created()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'attrib to obj: {name}: is created: ' +
f'{is_created}: {value}')
if is_created and isinstance(value, str):
factory = self.config_factory
config = factory.config
svalue = config.serializer.parse_object(value)
obj = factory.from_config_string(svalue)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'attrib to obj: {name}: {value} -> <{obj}>')
return svalue, obj
def __setattr__(self, name: str, value: Any):
"""Set an attribute, which is overloaded from the ``builtin object``.
"""
value, obj = self._attribute_to_object(name, value)
try:
super().__setattr__(name, obj)
except AttributeError as e:
raise AttributeError(
f'can\'t set attribute \'{name}\' = {value.__class__}: {e}')
if self._is_settable(name, value):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'settings option {name} = {value}')
self._set_option(name, value)
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
"""This is overridden because this class operates on a per attribute basis very
close at the class/interpreter level. Instead of using
:class:`dataclasses.dataclass` mechanisms to inform of how to create
the dictionary, it introspects the attributes and types of those
attributes of the object.
"""
dct = OrderedDict()
self._add_class_name_param(class_name_param, dct)
for k, v in self.__dict__.items():
if isinstance(v, Dictable):
dct[k] = v._from_dictable(
recurse, readable, class_name_param)
elif self._is_allowed_type(v):
dct[k] = v
return dct | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/writeback.py | writeback.py |
__author__ = 'Paul Landes'
from typing import Dict, Union, Any, Set, Tuple, List, Iterable, Type
from dataclasses import dataclass, field
import logging
import json
from json import JSONEncoder
from itertools import chain
import re
import pkg_resources
from pathlib import Path
from zensols.introspect import ClassImporter
from . import ConfigurationError, Dictable
logger = logging.getLogger(__name__)
OBJECT_KEYS = {'_type', '_data'}
class PythonObjectEncoder(JSONEncoder):
def default(self, obj: Any):
if isinstance(obj, set):
return {'_type': obj.__class__.__name__, '_data': tuple(obj)}
elif isinstance(obj, Path):
return {'_type': 'pathlib.Path', '_data': str(obj)}
return JSONEncoder.default(self, obj)
def as_python_object(dct: Dict[str, str]):
if set(dct.keys()) == OBJECT_KEYS:
if dct['_type'] == 'pathlib.Path':
cls = Path
else:
cls = eval(dct['_type'])
return cls(dct['_data'])
return dct
class Settings(Dictable):
"""A default object used to populate in :meth:`.Configurable.populate` and
:meth:`.ConfigFactory.instance`.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def keys(self) -> Iterable[Any]:
return self.__dict__.keys()
def get(self, name: str, default: Any = None) -> Any:
return self.__dict__.get(name, default)
def items(self) -> Tuple[Tuple[Any, Any], ...]:
return self.__dict__.items()
def _from_dictable(self, recurse: bool, readable: bool,
class_name_param: str = None) -> Dict[str, Any]:
return self.__dict__
def __contains__(self, name: str) -> bool:
return name in self.__dict__
def __getitem__(self, name: str) -> str:
return self.__dict__[name]
def __len__(self) -> int:
return len(self.__dict__)
def __eq__(self, other) -> bool:
return self.__dict__ == other.__dict__
def __str__(self) -> str:
return str(self.__dict__)
def __repr__(self) -> str:
return self.__str__()
@dataclass
class Serializer(object):
"""This class is used to parse values in to Python literals and object
instances in configuration files.
"""
FLOAT_REGEXP = re.compile(r'^[-+]?\d*\.\d+$')
SCI_REGEXP = re.compile(r'^([+-]?(?:0|[1-9]\d*)(?:\.\d*)?(?:[eE][+\-]?\d+))$')
INT_REGEXP = re.compile(r'^[-+]?[0-9]+$')
BOOL_REGEXP = re.compile(r'^True|False')
PATH_REGEXP = re.compile(r'^path:\s*(.+)$')
RESOURCE_REGEXP = re.compile(r'^resource(?:\((.+)\))?:\s*(.+)$', re.DOTALL)
STRING_REGEXP = re.compile(r'^str:\s*(.+)$', re.DOTALL)
LIST_REGEXP = re.compile(r'^(list|set|tuple)(?:\((.+)\))?:\s*(.+)$', re.DOTALL)
EVAL_REGEXP = re.compile(r'^(?:eval|dict)(?:\((.+)\))?:\s*(.+)$', re.DOTALL)
JSON_REGEXP = re.compile(r'^json:\s*(.+)$', re.DOTALL)
CLASS_REGEXP = re.compile(r'^class:\s*(.+)$')
PRIMITIVES = set([bool, float, int, None.__class__])
DEFAULT_RESOURCE_MODULE = None
_EVAL_KEYS = frozenset('resolve import'.split())
allow_types: Set[type] = field(
default_factory=lambda:
set([str, int, float, bool, list, tuple, dict]))
allow_classes: Tuple[type, ...] = field(default_factory=lambda: (Path,))
def is_allowed_type(self, value: Any) -> bool:
if isinstance(value, (tuple, list, set)):
for i in value:
if not self.is_allowed_type(i):
return False
return True
elif isinstance(value, dict):
for i in chain.from_iterable(value.items()):
if not self.is_allowed_type(i):
return False
return True
return value.__class__ in self.allow_types or \
isinstance(value, self.allow_classes)
def _parse_eval(self, pconfig: str, evalstr: str = None) -> str:
if pconfig is not None:
pconfig = eval(pconfig)
bad_keys = set(pconfig.keys()) - self._EVAL_KEYS
if len(bad_keys) > 0:
raise ConfigurationError(
f'Unknown evaluation keys: {", ".join(bad_keys)}')
if 'import' in pconfig:
imports = pconfig['import']
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'imports: {imports}')
for i in imports:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'importing: {i}')
if i.startswith('from'):
exec(i)
else:
exec(f'import {i}')
if 'resolve' in pconfig:
for k, v in pconfig['resolve'].items():
v = self.parse_object(v)
locals()[k] = v
if evalstr is not None:
return eval(evalstr)
def parse_list(self, v: str) -> List[str]:
"""Parse a comma separated list in to a string list.
Any whitespace is trimmed around the commas.
"""
if v is None:
return []
else:
return re.split(r'\s*,\s*', v)
def parse_object(self, v: str) -> Any:
"""Parse as a string in to a Python object. The following is done to parse the
string in order:
1. Primitive (i.e. ``1.23`` is a float, ``True`` is a boolean).
2. A :class:`pathlib.Path` object when prefixed with ``path:``.
3. Evaluate using the Python parser when prefixed ``eval:``.
4. Evaluate as JSON when prefixed with ``json:``.
"""
if v == 'None':
v = None
elif self.FLOAT_REGEXP.match(v):
v = float(v)
elif self.SCI_REGEXP.match(v):
v = float(eval(v))
elif self.INT_REGEXP.match(v):
v = int(v)
elif self.BOOL_REGEXP.match(v):
v = v == 'True'
else:
parsed = None
m = self.STRING_REGEXP.match(v)
if m:
parsed = m.group(1)
if parsed is None:
m = self.PATH_REGEXP.match(v)
if m:
parsed = Path(m.group(1)).expanduser()
if parsed is None:
m = self.LIST_REGEXP.match(v)
if m:
ctype, pconfig, lst = m.groups()
parsed = self.parse_list(lst)
if pconfig is not None:
pconfig = eval(pconfig)
tpe = pconfig.get('type')
if tpe is not None:
tpe = eval(tpe)
tpe = self.parse_object if tpe == object else tpe
parsed = list(map(tpe, parsed))
if ctype == 'tuple':
parsed = tuple(parsed)
elif ctype == 'list':
parsed = list(parsed)
elif ctype == 'set':
parsed = set(parsed)
else:
raise ConfigurationError(
f'Unknown sequence type: {ctype}')
if parsed is None:
m = self.RESOURCE_REGEXP.match(v)
if m:
mod, pathstr = m.groups()
if mod is None:
if self.DEFAULT_RESOURCE_MODULE is None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'no module path: {pathstr}')
parsed = Path(pathstr)
if parsed is None:
parsed = self.resource_filename(pathstr, mod)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found resource path: {parsed}')
parsed = Path(parsed)
if parsed is None:
m = self.EVAL_REGEXP.match(v)
if m:
pconfig, evalstr = m.groups()
parsed = self._parse_eval(pconfig, evalstr)
if parsed is None:
m = self.CLASS_REGEXP.match(v)
if m:
class_name = m.group(1)
parsed = ClassImporter(class_name, False).get_class()
if parsed is None:
m = self.JSON_REGEXP.match(v)
if m:
parsed = self._json_load(m.group(1))
if parsed is not None:
v = parsed
return v
def populate_state(self, state: Dict[str, str],
obj: Union[dict, object] = None,
parse_types: bool = True) -> Union[dict, object]:
"""Populate an object with a string dictionary. The keys are used for the
output, and the values are parsed in to Python objects using
:meth:`parse_object`. The keys in the input are used as the same keys
if ``obj`` is a ``dict``. Otherwise, set data as attributes on the
object with :py:func:`setattr`.
:param state: the data to parse
:param obj: the object to populate
"""
obj = Settings() if obj is None else obj
is_dict = isinstance(obj, dict)
for k, v in state.items():
if parse_types and isinstance(v, str):
v = self.parse_object(v)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting {k} => {v} on {obj}')
if is_dict:
obj[k] = v
else:
setattr(obj, k, v)
return obj
def _json_dump(self, data: Any) -> str:
return json.dumps(data, cls=PythonObjectEncoder)
def _json_load(self, json_str: str) -> Any:
return json.loads(json_str, object_hook=as_python_object)
def format_option(self, obj: Any) -> str:
"""Format a Python object in to the string represetation per object syntax
rules.
:see: :meth:`parse_object`
"""
v = None
cls = obj.__class__
if cls == str:
v = obj
elif cls in self.PRIMITIVES:
v = str(obj)
elif isinstance(obj, Type):
cname = ClassImporter.full_classname(obj)
v = f'class: {cname}'
elif isinstance(obj, Path):
return f'path: {obj}'
else:
v = 'json: ' + self._json_dump(obj)
return v
def resource_filename(self, resource_name: str, module_name: str = None) \
-> Path:
"""Return a resource based on a file name. This uses the ``pkg_resources``
package first to find the resources. If the resource module does not
exist, it defaults to the relateve file given in ``module_name``. If it
finds it, it returns a path on the file system.
Note that when a package is not installed, the ``resources`` directory
must be in the module system path. This happens automatically when
installed, otherwise symbolic links are needed.
:param: resource_name the file name of the resource to obtain (or name
if obtained from an installed module)
:param module_name: the name of the module to obtain the data, which
defaults to :obj:`DEFAULT_RESOURCE_MODULE`, which
is set by
:class:`zensols.cli.simple.SimpleActionCli`
:return: a path on the file system or resource of the installed module
"""
if module_name is None:
module_name = self.DEFAULT_RESOURCE_MODULE
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f'looking resource mod={module_name}{type(module_name)}, ' +
f'resource={resource_name}{type(resource_name)}')
res = None
try:
if module_name is not None and \
pkg_resources.resource_exists(module_name, resource_name):
res = pkg_resources.resource_filename(module_name, resource_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'resource exists: {res}')
except ModuleNotFoundError as e:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'defaulting to module name: {resource_name}')
if res is None:
raise ConfigurationError(f'Missing resource: {e}') from e
if not res.exists():
raise ConfigurationError(
f'Could not find path: {resource_name}') from e
except TypeError as e:
# if the package is missing, a None is given to something raised
# from the pkg_resources module
raise ConfigurationError(
f'Could not find module and/or resource {module_name}: {e}') \
from e
if res is None:
res = resource_name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'resolved resource to {res}')
if not isinstance(res, Path):
res = Path(res)
return res | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/serial.py | serial.py |
__author__ = 'Paul Landes'
from typing import Dict, Set
import logging
import re
import collections
from zensols.persist import persisted
from . import Configurable, ConfigurableError
logger = logging.getLogger(__name__)
class StringConfig(Configurable):
"""A simple string based configuration. This takes a single comma delimited
key/value pair string in the format:
``<section>.<name>=<value>[,<section>.<name>=<value>,...]``
A dot (``.``) is used to separate the section from the option instead of a
colon (``:``), as used in more sophisticaed interpolation in the
:class:`configparser.ExtendedInterpolation`. The dot is used for this
reason to make other section interpolation easier.
"""
KEY_VAL_REGEX = re.compile(r'^(?:([^.]+?)\.)?([^=]+?)=(.+)$')
def __init__(self, config_str: str, option_sep: str = ',',
default_section: str = None):
"""Initialize with a string given as described in the class docs.
:param config_str: the configuration
:param option_sep: the string used to delimit the each key/value pair
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`
"""
super().__init__(default_section)
self.config_str = config_str
self.option_sep = option_sep
@persisted('_parsed_config')
def _get_parsed_config(self) -> Dict[str, str]:
"""Parse the configuration string given in the initializer (see class docs).
"""
conf = collections.defaultdict(lambda: {})
for kv in self.config_str.split(self.option_sep):
m = self.KEY_VAL_REGEX.match(kv)
if m is None:
raise ConfigurableError(f'unexpected format: {kv}')
sec, name, value = m.groups()
sec = self.default_section if sec is None else sec
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'section={sec}, name={name}, value={value}')
conf[sec][name] = value
return conf
@property
@persisted('_sections')
def sections(self) -> Set[str]:
return set(self._get_parsed_config().keys())
def has_option(self, name: str, section: str = None) -> bool:
section = self.default_section if section is None else section
return self._get_parsed_config(section)[name]
def get_options(self, section: str = None) -> Dict[str, str]:
section = self.default_section if section is None else section
opts = self._get_parsed_config()[section]
if opts is None:
raise ConfigurableError(f'no section: {section}')
return opts
def __str__(self) -> str:
return self.__class__.__name__ + ': config=' + self.config_str
def __repr__(self) -> str:
return f'<{self.__str__()}>' | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/strconfig.py | strconfig.py |
__author__ = 'Paul Landes'
from typing import Dict, Union, Any, Set, Tuple
from pathlib import Path
import logging
from string import Template
from io import TextIOBase
from . import (
Serializer, Configurable, ConfigurableFactory,
DictionaryConfig, YamlConfig,
)
logger = logging.getLogger(__name__)
class _Template(Template):
idpattern = r'[a-z0-9_:]+'
class ImportYamlConfig(YamlConfig):
"""Like :class:`.YamlConfig` but supports configuration importation like
:class:`.ImportIniConfig`. The list of imports is given at
:obj:`import_name` (see initializer), and contains the same information as
import sections documented in :class:`.ImportIniConfig`.
"""
def __init__(self, config_file: Union[Path, TextIOBase] = None,
default_section: str = None, sections_name: str = 'sections',
sections: Set[str] = None, import_name: str = 'import',
parse_values: bool = False,
children: Tuple[Configurable, ...] = ()):
"""Initialize with importation configuration. The usage of ``default_vars`` in
the super class is disabled since this implementation uses a mix of dot
and colon (configparser) variable substitution (the later used when
imported from an :class:`.ImportIniConfig`.
:param config_file: the configuration file path to read from; if the
type is an instance of :class:`io.TextIOBase`, then
read it as a file object
:param default_section: used as the default section when non given on
the get methds such as :meth:`get_option`;
which defaults to ``defualt``
:param sections_name: the dot notated path to the variable that has a
list of sections
:param sections: used as the set of sections for this instance
:param import_name: the dot notated path to the variable that has the
import entries (see class docs); defaults to
``import``
:param parse_values: whether to invoke the :class:`.Serializer` to
create in memory Python data, which defaults to
false to keep data as string for configuraiton
merging
"""
super().__init__(config_file, default_section, default_vars=None,
delimiter=None, sections_name=sections_name,
sections=sections)
self.import_name = import_name
self.serializer = Serializer()
self._parse_values = parse_values
self.children = children
def _import_parse(self):
def repl_node(par: Dict[str, Any]):
repl = {}
for k, c in par.items():
if isinstance(c, dict):
repl_node(c)
elif isinstance(c, list):
repl[k] = tuple(c)
elif isinstance(c, str):
template = _Template(c)
rc = template.safe_substitute(tpl_context)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'subs: {c} -> {rc}')
repl[k] = rc
par.update(repl)
import_def: Dict[str, Any] = self.get_options(
f'{self.root}.{self.import_name}')
cnf: Dict[str, Any] = {}
context: Dict[str, str] = {}
tpl_context = {}
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'import defs: {import_def}')
if import_def is not None:
for sec_name, params in import_def.items():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'import sec: {sec_name}')
config = ConfigurableFactory.from_section(params, sec_name)
for sec in config.sections:
cnf[sec] = config.get_options(section=sec)
self._config.update(cnf)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'updated config: {self._config}')
self._flatten(context, '', self._config, ':')
if len(self.children) > 0:
dconf = DictionaryConfig()
for child in self.children:
child.copy_sections(dconf)
tpl_context.update(dconf.as_one_tier_dict())
tpl_context.update(context)
new_keys = set(map(lambda k: k.replace(':', '.'), context.keys()))
self._all_keys.update(new_keys)
repl_node(self._config)
def _serialize(self, par: Dict[str, Any]):
repl = {}
for k, c in par.items():
if isinstance(c, dict):
self._serialize(c)
elif isinstance(c, str):
repl[k] = self.serializer.parse_object(c)
par.update(repl)
def _compile(self) -> Dict[str, Any]:
self._config = super()._compile()
self._import_parse()
if self._parse_values:
self._serialize(self._config)
return self._config | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/config/importyaml.py | importyaml.py |
__author__ = 'Paul Landes'
from typing import Iterable, List, Any, Tuple, Callable, Union, Type
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass, field, InitVar
import sys
import os
import logging
import math
from multiprocessing import Pool
from zensols.util.time import time
from zensols.config import Configurable, ConfigFactory, ImportConfigFactory
from zensols.persist import (
Stash, PrimablePreemptiveStash, PreemptiveStash, PrimeableStash,
chunks, Deallocatable,
)
from zensols.cli import LogConfigurator
logger = logging.getLogger(__name__)
@dataclass
class ChunkProcessor(object):
"""Represents a chunk of work created by the parent and processed on the
child.
"""
config: Configurable = field()
"""The application context configuration used to create the parent stash.
"""
name: str = field()
"""The name of the parent stash used to create the chunk, and subsequently
process this chunk.
"""
chunk_id: int = field()
"""The nth chunk."""
data: object = field()
"""The data created by the parent to be processed."""
def _create_stash(self) -> Tuple[ImportConfigFactory, Any]:
fac = ImportConfigFactory(self.config)
with time(f'factory inst {self.name} for chunk {self.chunk_id}',
logging.INFO):
inst = fac.instance(self.name)
inst.is_child = True
return fac, inst
def process(self) -> int:
"""Create the stash used to process the data, then persisted in the
stash.
"""
factory, stash = self._create_stash()
cnt = 0
self.config_factory = factory
stash._init_child(self)
if logger.isEnabledFor(logging.INFO):
logger.info(f'processing chunk {self.chunk_id} ' +
f'with stash {stash.__class__}')
with time('processed {cnt} items for chunk {self.chunk_id}'):
for i, (id, inst) in enumerate(stash._process(self.data)):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dumping {id} -> {inst.__class__}')
stash.delegate.dump(id, inst)
Deallocatable._try_deallocate(inst)
cnt += 1
Deallocatable._try_deallocate(stash)
Deallocatable._try_deallocate(factory)
return cnt
def __str__(self):
data = self.data
if data is not None:
if isinstance(data, list) and len(data) > 0:
data = data[0]
dtype = data.__class__.__name__
else:
dtype = 'None'
return f'{self.name} ({self.chunk_id}): data: {dtype}'
@dataclass
class MultiProcessor(object, metaclass=ABCMeta):
"""A base class used by :class:`.MultiProcessStash` to divid the work up
into chunks. This should be subclassed if the behavior of how divided work
is to be processed is needed.
.. automethod:: _process_work
"""
name: str = field()
"""The name of the multi-processor."""
@staticmethod
def _process_work(processor: ChunkProcessor) -> int:
"""Process a chunk of data in the child process that was created by the parent
process.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.info(f'processing processor {processor}')
with time(f'processed processor {processor}'):
return processor.process()
def invoke_work(self, workers: int, chunk_size: int,
data: Iterable[Any]) -> int:
fn: Callable = self.__class__._process_work
if logger.isEnabledFor(logging.INFO):
logger.info(f'{self.name}: spawning work in {type(self)} with ' +
f'chunk size {chunk_size} across {workers} workers')
return self._invoke_work(workers, chunk_size, data, fn)
@abstractmethod
def _invoke_work(self, workers: int, chunk_size: int,
data: Iterable[Any], fn: Callable) -> int:
pass
class PoolMultiProcessor(MultiProcessor):
"""Uses :class:`multiprocessing.Pool` to fork/exec processes to do the work.
"""
def _invoke_pool(self, pool: Pool, fn: Callable, data: iter) -> List[int]:
if pool is None:
return tuple(map(fn, data))
else:
return pool.map(fn, data)
def _invoke_work(self, workers: int, chunk_size: int,
data: Iterable[Any], fn: Callable) -> int:
if workers == 1:
with time('processed singleton chunk'):
cnt = self._invoke_pool(None, fn, data)
else:
with Pool(workers) as p:
with time('processed chunks'):
cnt = self._invoke_pool(p, fn, data)
return cnt
class SingleMultiProcessor(PoolMultiProcessor):
"""Does all work in the current process.
"""
def _invoke_work(self, workers: int, chunk_size: int,
data: Iterable[Any], fn: Callable) -> int:
return super()._invoke_work(1, chunk_size, data, fn)
@dataclass
class MultiProcessStash(PrimablePreemptiveStash, metaclass=ABCMeta):
"""A stash that forks processes to process data in a distributed fashion.
The stash is typically created by a :class:`.ImportConfigFactory` in the
child process. Work is chunked (grouped) and then sent to child processes.
In each, a new instance of this same stash is created using
:class:`.ImportConfigFactory` and then an abstract method is called to dump
the data.
Implementation details:
* The :obj:`delegate` stash is used to manage the actual persistence of
the data.
* This implemetation of :meth:`prime` is to fork processes to accomplish
the work.
* The ``process_class`` attribute is not set directly on this class since
subclasses already have non-default fields. However it is set to
:class:`.PoolMultiProcessor` by default.
The :meth:`_create_data` and :meth:`_process` methods must be
implemented.
.. document private functions
.. automethod:: _create_data
.. automethod:: _process
.. automethod:: _create_chunk_processor
:see: :class:`zensols.config.factory.ImportConfigFactory`
"""
ATTR_EXP_META = ('chunk_size', 'workers')
LOG_CONFIG_SECTION = 'multiprocess_log_config'
"""The name of the section to use to configure the log system. This section
should be an instance definition of a :class:`.LogConfigurator`.
"""
config: Configurable = field()
"""The application configuration meant to be populated by
:class:`zensols.config.factory.ImportClassFactory`."""
name: str = field()
"""The name of the instance in the configuration."""
chunk_size: int = field()
"""The size of each group of data sent to the child process to be handled;
in some cases the child process will get a chunk of data smaller than this
(the last) but never more; if this number is 0, then evenly divide the work
so that each worker takes the largets amount of work to minimize the number
of chunks (in this case the data is tupleized).
"""
workers: Union[int, float] = field()
"""The number of processes spawned to accomplish the work or 0 to use all
CPU cores. If this is a negative number, add the number of CPU processors
with this number, so -1 would result in one fewer works utilized than the
number of CPUs, which is a good policy for a busy server.
If the number is a float, then it is taken to be the percentage of the
number of processes. If it is a float, the value must be in range (0, 1].
"""
processor_class: Type[MultiProcessor] = field(init=False)
"""The class of the processor to use for the handling of the work."""
def __post_init__(self):
super().__post_init__()
self.is_child = False
if not hasattr(self, 'processor_class'):
# sub classes like `MultiProcessDefaultStash` add this as a field,
# which will already be set by the time this is called
self.processor_class: Type[MultiProcessor] = None
@abstractmethod
def _create_data(self) -> Iterable[Any]:
"""Create data in the parent process to be processed in the child
process(es) in chunks. The returned data is grouped in to sub lists and
passed to :meth:`_process`.
:return: an iterable of data to be processed
"""
pass
@abstractmethod
def _process(self, chunk: List[Any]) -> Iterable[Tuple[str, Any]]:
"""Process a chunk of data, each created by ``_create_data`` as a group
in a subprocess.
:param: chunk: a list of data generated by :meth:`_create_data` to be
processed in this method
:return: an iterable of ``(key, data)`` tuples, where ``key`` is used
as the string key for the stash and the return value is the
data returned by methods like :meth:`load`
"""
pass
def _init_child(self, processor: ChunkProcessor):
"""Initialize the child process.
:param processor: the chunk processor that created this stash in the
child process
"""
self._config_child_logging(processor.config_factory)
def _config_child_logging(self, factory: ConfigFactory):
"""Initalize the logging system in the child process.
:param factory: the factory that was used to create this stash and
child app configi environment
"""
warn = None
config = factory.config
if config.has_option('section', self.LOG_CONFIG_SECTION):
conf_sec = config.get_option('section', self.LOG_CONFIG_SECTION)
if isinstance(factory, ImportConfigFactory):
log_conf = factory.instance(conf_sec)
if isinstance(log_conf, LogConfigurator):
log_conf.config()
else:
warn = f'unknown configuration object: {type(log_conf)}'
else:
warn = f'with unknown factory type: {type(factory)}',
if warn is not None:
print(f'warning: can not configure child process logging: {warn}',
file=sys.stderr)
def _create_chunk_processor(self, chunk_id: int, data: Any) -> \
ChunkProcessor:
"""Factory method to create the ``ChunkProcessor`` instance.
"""
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'creating chunk processor for id {chunk_id}')
return ChunkProcessor(self.config, self.name, chunk_id, data)
def _spawn_work(self) -> int:
"""Chunks and invokes a multiprocessing pool to invokes processing on
the children.
"""
multi_proc: MultiProcessor
if self.processor_class is None:
multi_proc = PoolMultiProcessor(self.name)
else:
multi_proc = self.processor_class(self.name)
chunk_size, workers = self.chunk_size, self.workers
if workers <= 0:
workers = os.cpu_count() + workers
elif isinstance(workers, float):
percent = workers
avail = os.cpu_count()
workers = math.ceil(percent * avail)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'calculating as {percent} of ' +
f'total {avail}: {workers}')
data = self._create_data()
if chunk_size == 0:
data = tuple(data)
chunk_size = math.ceil(len(data) / workers)
data = map(lambda x: self._create_chunk_processor(*x),
enumerate(chunks(data, chunk_size)))
return multi_proc.invoke_work(workers, chunk_size, data)
def prime(self):
"""If the delegate stash data does not exist, use this implementation to
generate the data and process in children processes.
"""
super().prime()
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'multi prime, is child: {self.is_child}')
has_data = self.has_data
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'has data: {has_data}')
if not has_data:
with time('completed work in {self.__class__.__name__}'):
self._spawn_work()
self._reset_has_data()
@dataclass
class MultiProcessDefaultStash(MultiProcessStash):
"""Just like :class:`.MultiProcessStash`, but provide defaults as a
convenience.
"""
chunk_size: int = field(default=0)
"""The size of each group of data sent to the child process to be handled;
in some cases the child process will get a chunk of data smaller than this
(the last) but never more; if this number is 0, then evenly divide the work
so that each worker takes the largets amount of work to minimize the number
of chunks (in this case the data is tupleized).
"""
workers: Union[int, float] = field(default=1)
"""The number of processes spawned to accomplish the work or 0 to use all
CPU cores. If this is a negative number, add the number of CPU processors
with this number, so -1 would result in one fewer works utilized than the
number of CPUs, which is a good policy for a busy server.
If the number is a float, then it is taken to be the percentage of the
number of processes. If it is a float, the value must be in range (0, 1].
"""
processor_class: Type[MultiProcessor] = field(default=PoolMultiProcessor)
"""The class of the processor to use for the handling of the work."""
@dataclass(init=False)
class MultiProcessFactoryStash(MultiProcessDefaultStash):
"""Like :class:`~zensols.persist.FactoryStash`, but uses a subordinate
factory stash to generate the data in a subprocess(es) in the same manner as
the super class :class:`.MultiProcessStash`.
Attributes :obj:`chunk_size` and :obj:`workers` both default to ``0``.
"""
factory: Stash = field()
"""The stash that creates the data, which is not to be confused with the
:obj:`delegate`, which persists the data.
"""
enable_preemptive: bool = field()
"""If ``False``, do not invoke the :obj:`factory` instance's data
calculation. If the value is ``always``, then always assume the data is not
calcuated, which forces the factory prime. Otherwise, if ``None``, then
call the super class data calculation falling back on the :obj:`factory` if
the super returns ``False``.
"""
def __init__(self, config: Configurable, name: str, factory: Stash,
enable_preemptive: bool = False, **kwargs):
"""Initialize with attributes :obj:`chunk_size` and :obj:`workers` both
defaulting to ``0``.
:param config: the application configuration meant to be populated by
:class:`zensols.config.factory.ImportClassFactory`
:param name: the name of the parent stash used to create the chunk, and
subsequently process this chunk
"""
if 'chunk_size' not in kwargs:
kwargs['chunk_size'] = 0
if 'workers' not in kwargs:
kwargs['workers'] = 0
super().__init__(config=config, name=name, **kwargs)
self.factory = factory
self.enable_preemptive = enable_preemptive
def _calculate_has_data(self) -> bool:
has_data = False
if self.enable_preemptive != 'always':
has_data = super()._calculate_has_data()
if not has_data and \
self.enable_preemptive and \
isinstance(self.factory, PreemptiveStash):
has_data = self.factory._calculate_has_data()
return has_data
def prime(self):
if isinstance(self.factory, PrimeableStash):
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'priming factory: {self.factory}')
self.factory.prime()
super().prime()
def _create_data(self) -> Iterable[Any]:
return self.factory.keys()
def _process(self, chunk: List[Any]) -> Iterable[Tuple[str, Any]]:
k: str
for k in chunk:
if logger.isEnabledFor(logging.INFO):
pid: int = os.getpid()
logger.info(f'processing key {k} in process {pid}')
val: Any = self.factory.load(k)
yield (k, val) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/multi/stash.py | stash.py |
__author__ = 'Paul Landes'
import logging
import sys
import os
from optparse import OptionParser
from pkg_resources import get_distribution, DistributionNotFound
from zensols.util import APIError
from zensols.config import Serializer
logger = logging.getLogger(__name__)
class ActionCliError(APIError):
"""Thrown for all command line interface errors."""
pass
class SimpleActionCli(object):
"""A simple action based command line interface.
.. document private functions
.. automethod:: _config_logging
.. automethod:: _config_log_level
"""
def __init__(self, executors, invokes, config=None, version='none',
pkg_dist=None, opts=None, manditory_opts=None,
environ_opts=None, default_action=None):
"""Construct.
:param dict executors:
keys are executor names and values are
function that create the executor handler instance
:param dict invokes:
keys are names of in executors and values are
arrays with the form: [<option name>, <method name>, <usage doc>]
:param config: an instance of `zensols.config.Config`
:param str version: the default version of this command line module,
which is overrided by the package's version if it
exists
:param pkg_dist: the name of the module (i.e. zensols.actioncli)
:param set opts: options to be parsed
:param set manditory_opts: options that must be supplied in the command
:param set environ_opts:
options to add from environment variables; each are upcased to
be match and retrieved from the environment but are lowercased in
the results param set
:param str default_action: the action to use if non is
specified (if any)
"""
opts = opts if opts else set([])
manditory_opts = manditory_opts if manditory_opts else set([])
environ_opts = environ_opts if environ_opts else set([])
self.executors = executors
self.invokes = invokes
self.opts = opts
self.manditory_opts = manditory_opts
self.environ_opts = environ_opts
self.version = version
self.add_logging = False
self.config = config
self.default_action = default_action
self.pkg = None
self.pkg_dist = pkg_dist
if pkg_dist is not None:
try:
self.pkg = get_distribution(pkg_dist)
self.version = self.pkg.version
Serializer.DEFAULT_RESOURCE_MODULE = pkg_dist
except DistributionNotFound:
pass
if config is not None:
config.pkg = self.pkg
def _config_logging(self, level: int):
"""Configure logging by calling :meth:`_config_log_level`, which in turns
invokes :meth:`.logging.basicConfig`.
"""
if level == 0:
levelno = logging.WARNING
elif level == 1:
levelno = logging.INFO
elif level == 2:
levelno = logging.DEBUG
if level <= 1:
fmt = '%(message)s'
else:
fmt = '%(levelname)s:%(asctime)-15s %(name)s: %(message)s'
self._config_log_level(fmt, levelno)
def _config_log_level(self, fmt: str, levelno: int):
"""Configure logging by calling :meth:`.logging.basicConfig` with the base
package set at ``levelno`` if available. In this case, the default
logging level is set to :obj:`logging.WARNING`. Otherwise, reconfigure
logging using a ``levelno`` across all loggers.
"""
if self.pkg is not None:
logging.basicConfig(format=fmt, level=logging.WARNING)
logging.getLogger(self.pkg.project_name).setLevel(level=levelno)
elif self.pkg_dist is not None:
logging.basicConfig(format=fmt, level=logging.WARNING)
logging.getLogger(self.pkg_dist).setLevel(level=levelno)
else:
root = logging.getLogger()
map(root.removeHandler, root.handlers[:])
logging.basicConfig(format=fmt, level=levelno)
root.handlers[0].setFormatter(logging.Formatter(fmt))
root.setLevel(levelno)
def print_actions(self, short):
if short:
for (name, action) in self.invokes.items():
print(name)
else:
pad = max(map(lambda x: len(x), self.invokes.keys())) + 2
fmt = '%%-%ds %%s' % pad
for (name, action) in self.invokes.items():
print(fmt % (name, action[2]))
def _add_whine_option(self, parser, default=0):
parser.add_option('-w', '--whine', dest='whine', metavar='NUMBER',
type='int', default=default,
help='add verbosity to logging')
self.add_logging = True
def _add_short_option(self, parser):
parser.add_option('-s', '--short', dest='short',
help='short output for list', action='store_true')
def _parser_error(self, msg):
self.parser.error(msg)
def _default_environ_opts(self):
opts = {}
for opt in self.environ_opts:
opt_env = opt.upper()
if opt_env in os.environ:
opts[opt] = os.environ[opt_env]
logger.debug('default environment options: %s' % opts)
return opts
def _init_executor(self, executor, config, args):
pass
def get_config(self, params):
return self.config
def _config_parser_for_action(self, args, parser):
pass
def config_parser(self):
pass
def _init_config(self, config):
if config is not None and \
self.pkg is not None \
and hasattr(self, 'pkg'):
config.pkg = self.pkg
def _create_parser(self, usage):
return OptionParser(usage=usage, version='%prog ' + str(self.version))
def create_executor(self, args=sys.argv[1:]):
usage = '%prog <list|...> [options]'
parser = self._create_parser(usage)
self.parser = parser
self.config_parser()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'configured parser: {parser}')
if len(args) > 0 and args[0] in self.invokes:
logger.debug('configuring parser on action: %s' % args[0])
self._config_parser_for_action(args, parser)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'parsing arguments: {args}')
(options, args) = parser.parse_args(args)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('options: <%s>, args: <%s>' % (options, args))
self.parsed_options = options
self.parsed_args = args
if len(args) > 0:
action = args[0]
else:
if self.default_action is None:
self._parser_error('missing action mnemonic')
else:
logger.debug('using default action: %s' % self.default_action)
action = self.default_action
if logger.isEnabledFor(logging.DEBUG):
logger.debug('adding logging')
if self.add_logging:
self._config_logging(options.whine)
if action == 'list':
short = hasattr(options, 'short') and options.short
self.print_actions(short)
return None, None
else:
if action not in self.invokes:
self._parser_error("no such action: '%s'" % action)
(exec_name, meth, _) = self.invokes[action]
logging.debug('exec_name: %s, meth: %s' % (exec_name, meth))
params = vars(options)
config = self.get_config(params)
self._init_config(config)
def_params = config.options if config else {}
def_params.update(self._default_environ_opts())
for k, v in params.items():
if v is None and k in def_params:
params[k] = def_params[k]
if logger.isEnabledFor(logging.DEBUG):
logger.debug('before filter: %s' % params)
params = {k: params[k] for k in params.keys() & self.opts}
for opt in self.manditory_opts:
if opt not in params or params[opt] is None:
self._parser_error('missing option: %s' % opt)
if config:
params['config'] = config
try:
exec_obj = self.executors[exec_name](params)
self._init_executor(exec_obj, config, args[1:])
return meth, exec_obj
except ActionCliError as err:
self._parser_error(format(err))
def invoke(self, args=sys.argv[1:]):
"""Entry point invoked, usually called from ``main``.
"""
if logger.isEnabledFor(logging.DEBUG):
logging.debug(f'invoking with args: {args}')
meth, exec_obj = self.create_executor(args)
if exec_obj is not None:
try:
logging.debug('invoking: %s.%s' % (exec_obj, meth))
getattr(exec_obj, meth)()
except ActionCliError as err:
self._parser_error(format(err)) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/simple.py | simple.py |
__author__ = 'Paul Landes'
from typing import Tuple, List, Any, Dict, Iterable, Sequence
from dataclasses import dataclass, field
from enum import Enum
import logging
import sys
from itertools import chain
from pathlib import Path
from io import TextIOBase
from optparse import OptionParser
from frozendict import frozendict
from zensols.persist import persisted, PersistableContainer, Deallocatable
from zensols.config import Dictable
from . import (
ApplicationError, OptionMetaData, PositionalMetaData, ActionMetaData,
UsageConfig, UsageActionOptionParser,
)
logger = logging.getLogger(__name__)
class CommandLineError(ApplicationError):
"""Raised when command line parameters can not be parsed.
"""
pass
class CommandLineConfigError(Exception):
"""Programmer error for command line parser configuration errors.
"""
pass
@dataclass
class CommandAction(Dictable):
"""The output of the :class:`.CommandLineParser` for each parsed action.
"""
_DICTABLE_WRITABLE_DESCENDANTS = True
meta_data: ActionMetaData = field()
"""The action parsed from the command line."""
options: Dict[str, Any] = field()
"""The options given as switches."""
positional: Tuple[str, ...] = field()
"""The positional arguments parsed."""
@property
def name(self) -> str:
"""The name of the action."""
return self.meta_data.name
def __str__(self) -> str:
return f'{self.meta_data.name}: {self.options}/{self.positional}'
@dataclass
class CommandActionSet(Deallocatable, Dictable):
"""The actions that are parsed by :class:`.CommandLineParser` as the output of
the parse phase. This is indexable by command action name and iterable
across all actions. Properties :obj:`first_pass_actions` and
:obj:`second_pass_action` give access to the split from the respective
types of actions.
"""
_DICTABLE_WRITABLE_DESCENDANTS = True
actions: Tuple[CommandAction, ...] = field()
"""The actions parsed. The first N actions are first pass where as the last is
the second pass action.
"""
@property
def first_pass_actions(self) -> Iterable[CommandAction]:
"""All first pass actions."""
return self.actions[0:-1]
@property
def second_pass_action(self) -> CommandAction:
"""The single second pass action."""
return self.actions[-1]
@property
def by_name(self) -> Dict[str, CommandAction]:
"""Command actions by name keys."""
return {a.name: a for a in self.actions}
def deallocate(self):
super().deallocate()
self._try_deallocate(self.actions)
def __getitem__(self, name: str) -> CommandAction:
return self.by_name[name]
def __iter__(self) -> Iterable[CommandAction]:
return iter(self.actions)
def __len__(self) -> int:
return len(self.actions)
@dataclass
class CommandLineConfig(PersistableContainer, Dictable):
"""Given to configure the :class:`.CommandLineParser`.
"""
actions: Tuple[ActionMetaData, ...] = field()
"""The action meta data used to parse and print help."""
@property
@persisted('_first_pass_actions')
def first_pass_actions(self) -> Tuple[ActionMetaData, ...]:
return tuple(filter(lambda a: a.first_pass, self.actions))
@property
@persisted('_second_pass_actions')
def second_pass_actions(self) -> Tuple[ActionMetaData, ...]:
return tuple(filter(lambda a: not a.first_pass, self.actions))
@property
@persisted('_actions_by_name')
def actions_by_name(self) -> Dict[str, ActionMetaData]:
return frozendict({a.name: a for a in self.actions})
@property
@persisted('_first_pass_options')
def first_pass_options(self) -> Tuple[OptionMetaData, ...]:
return tuple(chain.from_iterable(
map(lambda a: a.options, self.first_pass_actions)))
@property
@persisted('_first_pass_by_option')
def first_pass_by_option(self) -> Dict[str, ActionMetaData]:
actions = {}
for action in self.first_pass_actions:
for k, v in action.options_by_dest.items():
if k in actions:
raise CommandLineConfigError(
f"First pass duplicate option in '{action.name}': {k}")
actions[k] = action
return actions
def deallocate(self):
super().deallocate()
self._try_deallocate(self.actions)
@dataclass
class CommandLineParser(Deallocatable, Dictable):
"""Parse the command line. The parser iterates twice over the command line:
1. The first pass parses only *first pass* actions
(:obj:`.ActionMetaData.first_pass`). This step also is used to
discover the mnemonic/name of the single second pass action.
2. The second pass parse parses only a single action that is given on
the command line.
The name is given as a mnemonic of the action, unless there is only one
*second pass action* given, in which case all options and usage are given
at the top level and a mnemonic is not needed nor parsed.
:see :obj:`.ActionMetaData.first_pass`
"""
config: CommandLineConfig = field()
"""Configures the command line parser with the action meta data."""
version: str = field(default='v0')
"""The version of the application, which is used in the help and the
``--version`` switch.
"""
default_action: str = field(default=None)
"""The default mnemonic use when the user does not supply one."""
application_doc: str = field(default=None)
"""The program documentation to use when it can not be deduced from the
action.
"""
usage_config: UsageConfig = field(default_factory=UsageConfig)
"""Configuraiton information for the command line help."""
def __post_init__(self):
if len(self.config.actions) == 0:
raise CommandLineConfigError(
'Must create parser with at least one action')
def _create_parser(self, actions: Tuple[ActionMetaData, ...]) -> \
OptionParser:
return UsageActionOptionParser(
actions=actions,
options=self.config.first_pass_options,
doc=self.application_doc,
default_action=self.default_action,
usage_config=self.usage_config,
version=('%prog ' + str(self.version)))
def _configure_parser(self, parser: OptionParser,
options: Iterable[OptionMetaData]):
opt_names = {}
for opt in options:
prev: OptionMetaData = opt_names.get(opt.long_name)
if prev is not None:
raise CommandLineConfigError(
f"Duplicate option: '{prev.long_name}': " +
f"<{prev}> -> <{opt}>")
opt_names[opt.long_name] = opt
op_opt = opt.create_option()
parser.add_option(op_opt)
def _get_first_pass_parser(self, add_all_opts: bool) -> \
UsageActionOptionParser:
opts = list(self.config.first_pass_options)
sp_actions = self.config.second_pass_actions
if len(sp_actions) == 1:
sp_actions = (sp_actions[0],)
opts.extend(sp_actions[0].options)
elif add_all_opts:
opts.extend(chain.from_iterable(
map(lambda a: a.options, sp_actions)))
parser = self._create_parser(sp_actions)
self._configure_parser(parser, set(opts))
return parser
def _get_second_pass_parser(self, action_meta: ActionMetaData) -> \
UsageActionOptionParser:
opts = list(self.config.first_pass_options)
opts.extend(action_meta.options)
parser = self._create_parser(self.config.second_pass_actions)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"creating parser for action: '{action_meta.name}' " +
f'opts: {action_meta.options}')
self._configure_parser(parser, opts)
return parser
def write_help(self, writer: TextIOBase = None,
include_actions: bool = True,
action_metas: Sequence[ActionMetaData] = None):
"""Write the usage information and help text.
:param writer: the data sync, or :obj:`sys.stdout` if ``None``
:param include_actions: if ``True`` write each actions' usage as well
:param actions: the list of actions to output, or ``None`` for all
"""
writer = sys.stdout if writer is None else writer
parser = self._get_first_pass_parser(False)
parser.print_help(writer, include_actions, action_metas)
def error(self, msg: str):
"""Print a usage with the error message and exit the program as fail.
"""
parser = self._get_first_pass_parser(False)
parser.error(msg)
def _parse_type(self, s: str, t: type, name: str) -> Any:
tpe = None
if issubclass(t, Enum):
tpe = t.__members__.get(s)
choices = ', '.join(map(lambda e: f"'{e.name}'", t))
if tpe is None:
raise CommandLineError(
f"No choice '{s}' for '{name}' (choose from {choices})")
else:
if not isinstance(s, (str, int, float, bool, Path)):
raise CommandLineConfigError(f'Unknown parse type: {s}: {t}')
try:
tpe = t(s)
except ValueError as e:
raise CommandLineError(f'Expecting type {t.__name__}: {e}')
return tpe
def _parse_options(self, action_meta: ActionMetaData,
op_args: Dict[str, Any]):
opts: Dict[str, OptionMetaData] = action_meta.options_by_dest
parsed = {}
for k, v in op_args.items():
opt: OptionMetaData = opts.get(k)
if v is not None and opt is not None:
v = self._parse_type(v, opt.dtype, opt.long_option)
parsed[k] = v
return parsed
def _parse_positional(self, metas: List[PositionalMetaData],
vals: List[str]) -> Tuple[Any, ...]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'parsing positional args: {metas} <--> {vals}')
return tuple(
map(lambda x: self._parse_type(x[0], x[1].dtype, x[1].name),
zip(vals, metas)))
def _get_help_action(self, args: List[str]) -> \
Tuple[List[ActionMetaData], List[str]]:
goods: List[str] = []
bads: List[str] = []
arg: str
for arg in args:
action_meta: ActionMetaData = self.config.actions_by_name.get(arg)
if action_meta is None:
bads.append(arg)
else:
goods.append(action_meta)
return goods, bads
def _parse_first_pass(self, args: List[str],
actions: List[CommandAction]) -> \
Tuple[bool, str, Dict[str, Any], Dict[str, Any], Tuple[str, ...]]:
second_pass = False
fp_opts = set(map(lambda o: o.dest, self.config.first_pass_options))
# first fish out the action name (if given) as a positional parameter
parser: OptionParser = self._get_first_pass_parser(True)
(options, op_args) = parser.parse_args(args)
# make assoc array options in to a dict
options = vars(options)
if options['help'] is True:
goods, bads = self._get_help_action(op_args)
if len(bads) > 0:
raise CommandLineError(
f"No such action{'s' if len(bads) > 1 else ''}: " +
', '.join(bads))
elif len(goods) > 0:
self.write_help(include_actions=True, action_metas=goods)
else:
self.write_help(include_actions=True)
sys.exit(0)
else:
del options['help']
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'first pass: {options}:{op_args}')
# find first pass actions (i.e. whine log level '-w' settings)
added_first_pass = set()
fp_ops: Dict[str, ActionMetaData] = self.config.first_pass_by_option
for k, v in options.items():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'looking for first pass option: {k} ' +
f'in {tuple(fp_ops.keys())}')
fp_action_meta = fp_ops.get(k)
if (fp_action_meta is not None) and \
(fp_action_meta.name not in added_first_pass):
aos = {k: options[k] for k in (set(options.keys()) & fp_opts)}
aos = self._parse_options(fp_action_meta, aos)
action = CommandAction(fp_action_meta, aos, ())
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding first pass action: {action}')
actions.append(action)
added_first_pass.add(fp_action_meta.name)
# if only one option for second pass actions are given, the user need
# not give the action mnemonic/name, instead, just add all its options
# to the top level
if len(self.config.second_pass_actions) == 1:
action_name = self.config.second_pass_actions[0].name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using singleton fp action: {action_name} ' +
f'with options {options}')
elif len(op_args) == 0:
if self.default_action is None:
# no positional arguments mean we don't know which action to
# use
raise CommandLineError('No action given')
else:
action_name = self.default_action
op_args = []
args = [action_name] + args
second_pass = True
else:
# otherwise, use the first positional parameter as the mnemonic and
# the remainder as positional parameters for that action
action_name = op_args[0]
op_args = op_args[1:]
second_pass = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'need second pass for {action_name}, ' +
f'option args: {op_args}')
return second_pass, action_name, fp_opts, options, op_args, args
def _parse_second_pass(self, action_name: str, second_pass: bool,
args: List[str], options: Dict[str, Any],
op_args: Tuple[str, ...]):
# now that we have parsed the action name, get the meta data
action_meta: ActionMetaData = \
self.config.actions_by_name.get(action_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"action '{action_name}' found: {action_meta}")
if action_meta is None:
raise CommandLineError(f'No such action: {action_name}')
pos_arg_diff = len(op_args) - len(action_meta.positional)
single_sp = None
if len(self.config.second_pass_actions) == 1:
single_sp = self.config.second_pass_actions[0].name
unnecessary_mnemonic = pos_arg_diff == 1 and single_sp == op_args[0]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'positional arg difference: {pos_arg_diff}, ' +
f'single second pass mnemonic: {single_sp}, ' +
f'unnecessary_mnemonic: {unnecessary_mnemonic}')
if unnecessary_mnemonic:
raise CommandLineError(
f"Action '{action_meta.name}' expects " +
f"{len(action_meta.positional)} argument(s), but " +
f"'{single_sp}' is counted as a positional argument " +
'and should be omitted')
if pos_arg_diff != 0:
raise CommandLineError(
f"Action '{action_meta.name}' expects " +
f"{len(action_meta.positional)} " +
f"argument(s) but got {len(op_args)}: {', '.join(op_args)}")
# if there is more than one second pass action, we must re-parse using
# the specific options and positional argument for that action
if second_pass:
parser: OptionParser = self._get_second_pass_parser(action_meta)
(options, op_args) = parser.parse_args(args)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'second pass opts: {options}:{op_args}')
# sanity check to match parsed mnemonic and action name
assert(op_args[0] == action_meta.name)
# remove the action name
op_args = op_args[1:]
options = vars(options)
del options['help']
options = self._parse_options(action_meta, options)
return action_meta, options, op_args
def _validate_setup(self):
"""Make sure we don't have a default action with positional args."""
if self.default_action is not None:
action_meta: ActionMetaData
for action_meta in self.config.second_pass_actions:
if len(action_meta.positional) > 0:
raise CommandLineConfigError(
'No positional arguments allowed when default ' +
f"action '{self.default_action}' " +
f'given for method {action_meta.name}')
def deallocate(self):
super().deallocate()
self._try_deallocate(self.config)
def parse(self, args: List[str]) -> CommandActionSet:
"""Parse command line arguments.
:param args: the arguments given on the command line; which is usually
``sys.args[1:]``
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'parsing: {args}')
# action instances
actions: List[CommandAction] = []
# some top level sanity checks
self._validate_setup()
# first pass parse
second_pass, action_name, fp_opts, options, op_args, args = \
self._parse_first_pass(args, actions)
# second pass parse
action_meta, options, op_args = self._parse_second_pass(
action_name, second_pass, args, options, op_args)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'removing first pass options: {fp_opts} ' +
f'from {options}')
# the second pass action should _not_ get the first pass options
options = {k: options[k] for k in (set(options.keys()) - fp_opts)}
# parse positional arguments much like the OptionParser did options
pos_args = self._parse_positional(action_meta.positional, op_args)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating action with {options} {pos_args}')
# create and add the second pass action
action_inst = CommandAction(action_meta, options, pos_args)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding action: {action_inst}')
actions.append(action_inst)
return CommandActionSet(tuple(actions)) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/command.py | command.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import (
Tuple, List, Dict, Iterable, Any, Callable, Optional, Union, Type
)
from dataclasses import dataclass, field
from abc import ABC, abstractmethod
import logging
import sys
import re
from io import TextIOBase
from itertools import chain
from pathlib import Path
from frozendict import frozendict
from zensols.introspect import (
Class, ClassMethod, ClassField, ClassMethodArg, ClassDoc, ClassImporter
)
from zensols.persist import (
persisted, PersistedWork, PersistableContainer, Deallocatable
)
from zensols.util import PackageResource
from zensols.config import (
ConfigurableFileNotFoundError, Serializer, Dictable,
Configurable, ConfigFactory, ImportIniConfig, ImportConfigFactory,
)
from . import (
ActionCliError, ApplicationError, ApplicationFailure, DocUtil,
ActionCliManager, ActionCli, ActionCliMethod, ActionMetaData,
CommandAction, CommandActionSet, CommandLineConfig, CommandLineParser
)
logger = logging.getLogger(__name__)
@dataclass
class Action(Deallocatable, Dictable):
"""An invokable action from the command line the :class:`.Application`
class. This class combines the user input from the command line with the
meta data from the Python classes given in the configuration.
Combined, these two data sources provide a means to execute an action,
which is conceptually one functionality of the application and literally a
Python class method.
The class also is somewhat of a facade allowing a client to access data
from both sources without needing to know where it comes from via the
class's properties.
"""
_DICTABLE_WRITABLE_DESCENDANTS = True
command_action: CommandAction = field()
"""The result of the command line parsing of the action. It contains the
data parsed on a per action level.
"""
cli: ActionCli = field()
"""Command line interface of the action meta data."""
meta_data: ActionMetaData = field()
"""An action represents a link between a command line mnemonic *action* and
a method on a class to invoke.
"""
method_meta: ClassMethod = field()
"""The metadata of the method to use for the invocation of the action.
"""
@property
@persisted('_name')
def name(self) -> str:
"""The name of the action, which is the form:
``<action's section name>.<meta data's name>``
"""
return f'{self.cli.section}.{self.meta_data.name}'
@property
def section(self) -> str:
"""The section from which the :class:`.ActionCli` was created."""
return self.cli.section
@property
def class_meta(self) -> Class:
"""The meta data of the action, which comes from :class:`.ActionCli`.
"""
return self.cli.class_meta
@property
def class_name(self) -> str:
"""Return the class name of the target application instance.
"""
return self.class_meta.name
@property
def method_name(self) -> str:
"""The method to invoke on the target application instance class.
"""
return self.method_meta.name
def deallocate(self):
super().deallocate()
self._try_deallocate((self.command_action, self.action_cli))
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return map(lambda f: (f, f),
'section class_name method_name command_action'.split())
def __str__(self):
return (f'{self.section} ({self.class_name}.{self.method_name}): ' +
f'<{self.command_action}>')
def __repr__(self):
return self.__str__()
@dataclass
class ActionResult(Dictable):
"""The results of a single method call to an :class:`.Action` instance.
There is one of these per action (both first and second pass) provided in
:class:`.ApplicationResult`.
"""
action: Action = field()
"""The action that was used to generate the result."""
instance: Any = field()
"""The application instance."""
result: Any = field()
"""The results returned from the invocation on the application instance."""
@property
def name(self) -> str:
return self.action.name
def __call__(self):
return self.result
@dataclass
class ApplicationResult(Dictable):
"""A container class of the results of an application invocation with
:meth:`.Application.invoke`. This is keyed by index of the actions given in
:obj:`actions`.
"""
action_results: Tuple[ActionResult, ...] = field()
"""Both first and second pass action results. These are provided in the
same order for which was executed when the class:`.Application` ran, which
is that same order provided to the :class:`.ActionCliManager`.
"""
@property
@persisted('_by_name')
def by_name(self) -> Dict[str, ActionResult]:
"""Per action results keyed by action name (obj:`.Action.name`)."""
return frozendict({a.name: a for a in self})
@property
def second_pass_result(self) -> ActionResult:
"""The single second pass result of that action indicated to invoke on
the command line by the user.
"""
sec_pass = tuple(filter(lambda r: not r.action.meta_data.first_pass,
self.action_results))
assert(len(sec_pass) == 1)
return sec_pass[0]
def __call__(self) -> ActionResult:
return self.second_pass_result
def __getitem__(self, index: int) -> ActionResult:
return self.action_results[index]
def __len__(self) -> int:
return len(self.action_results)
class ApplicationObserver(ABC):
"""Extended by application targets to get call backs and information from
the controlling :class:`.Application`. Method :meth:`_application_created`
is invoked for each call back.
.. document private functions
.. automethod:: _application_created
"""
@abstractmethod
def _application_created(self, app: Application, action: Action):
"""Called just after the application target is created.
:param app: the application that created the application target
"""
pass
@dataclass
class Invokable(object):
"""A callable that invokes an :class:`.Action`. This is used by
:class:`.Application` to invoke the entire CLI application.
"""
action: Action = field()
"""The action used to create this instance."""
instance: Any = field()
"""The instantiated object generated from :obj:`action`."""
method: Callable = field()
"""The object method bound to :obj:`instance` to be called."""
args: Tuple[Any, ...] = field()
"""The arguments used when calling :obj:`method`."""
kwargs: Dict[str, Any] = field()
"""The keyword arguments used when calling :obj:`method`."""
def __call__(self):
"""Call :obj:`method` with :obj:`args` and :obj:`kwargs`."""
return self.method(*self.args, **self.kwargs)
@dataclass
class Application(Dictable):
"""An invokable application created using command line and application
context data. This class creates an instance of the *target application
instance*, then invokes the corresponding action method.
The application has all the first pass actions configured to run and/or
given options indicating by the user to run (see :obj:`first_pass_actions`).
It also has the second pass action given as a mnemonic, or the single second
pass action if there is only one (see :obj:`second_pas_action`).
"""
_DICTABLE_WRITABLE_DESCENDANTS = True
config_factory: ConfigFactory = field(repr=False)
"""The factory used to create the application and its components."""
factory: ApplicationFactory = field(repr=False)
"""The factory that created this application."""
actions: Tuple[Action, ...] = field()
"""The list of actions to invoke in order."""
def _create_instance(self, action: Action) -> Any:
"""Instantiate the in memory application using the CLI input gathered
from the user and the configuration.
"""
cmd_opts: Dict[str, Any] = action.command_action.options
const_params: Dict[str, Any] = {}
sec = action.section
# gather fields
field: ClassField
for f in action.class_meta.fields.values():
val: str = cmd_opts.get(f.name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'setting CLI parameter {f.name} -> {val}')
# set the field used to create the app target instance if given by
# the user on the command line
if val is None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f'no param for action <{action}>: {f.name}')
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f'field map: {sec}:{f.name} -> {val} ({f.dtype})')
const_params[f.name] = val
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating {sec} with {const_params}')
# create the instance using the configuration factory
inst = self.config_factory.instance(sec, **const_params)
if isinstance(inst, ApplicationObserver):
inst._application_created(self, action)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created instance {inst}')
return inst
def _get_meth_params(self, action: Action, meth_meta: ClassMethod) -> \
Tuple[Tuple[Any, ...], Dict[str, Any]]:
"""Get the method argument and keyword arguments gathered from the user
input and configuration.
:return: a tuple of the positional arguments (think ``*args``) followed
by the keyword arguments map (think ``**kwargs`)
"""
cmd_opts: Dict[str, Any] = action.command_action.options
meth_params: Dict[str, Any] = {}
pos_args = action.command_action.positional
pos_arg_count: int = 0
arg: ClassMethodArg
for arg in meth_meta.args:
if arg.is_positional:
pos_arg_count += 1
else:
name: str = arg.name
if name not in cmd_opts:
raise ActionCliError(
f'No such option {name} parsed from CLI for ' +
f'method from {cmd_opts}: {meth_meta.name}')
val: str = cmd_opts.get(name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'meth map: {meth_meta.name}.{name} -> {val}')
meth_params[name] = val
if pos_arg_count != len(pos_args):
raise ActionCliError(
f'Method {meth_meta.name} expects {pos_arg_count} but got ' +
f'{len(pos_args)} in {action.name}.{meth_meta.name}')
return pos_args, meth_params
def _pre_process(self, action: Action, instance: Any):
if not action.cli.first_pass:
config = self.config_factory.config
cli_manager: ActionCliManager = self.factory.cli_manager
if cli_manager.cleanups is not None:
for sec in cli_manager.cleanups:
if sec not in config.sections:
raise ActionCliError(f'No section to remove: {sec}')
config.remove_section(sec)
def _create_invokable(self, action: Action) -> Invokable:
inst: Any = self._create_instance(action)
self._pre_process(action, inst)
meth_meta: ClassMethod = action.method_meta
pos_args, meth_params = self._get_meth_params(action, meth_meta)
meth = getattr(inst, meth_meta.name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'invoking {meth}')
return Invokable(action, inst, meth, pos_args, meth_params)
def get_invokable(self, action_name: str) -> Invokable:
"""Create an invokable.
:param action_name: the name of the action, which is also the section
name in the configuration
"""
action: Action = self.actions_by_name[action_name]
return self._create_invokable(action)
@property
@persisted('_actions_by_name')
def actions_by_name(self) -> Dict[str, Action]:
"""A dictionary of actions by their name."""
return frozendict({a.name: a for a in self.actions})
@property
def first_pass_actions(self) -> Iterable[Action]:
"""All first pass actions registered in the application and/or indicated
by the user to run via the command line.
"""
return filter(lambda a: a.meta_data.first_pass, self.actions)
@property
def second_pass_action(self) -> Action:
"""The second pass action registered in the application and indicated to
execute by the command line input.
"""
acts = filter(lambda a: not a.meta_data.first_pass, self.actions)
acts = tuple(acts)
assert len(acts) == 1
return acts[0]
def _invoke_first_pass(self) -> Tuple[ActionResult, ...]:
"""Invokes only the first pass actions and returns the results.
"""
results: List[ActionResult] = []
action: Action
for action in self.first_pass_actions:
invokable: Invokable = self._create_invokable(action)
res: Any = invokable()
results.append(ActionResult(action, invokable.instance, res))
return tuple(results)
def invoke_but_second_pass(self) -> \
Tuple[Tuple[ActionResult, ...], Invokable]:
"""Invoke first pass actions but not the second pass action.
:return: the results from the first pass actions and an invokable for
the second pass action
"""
results: List[ActionResult] = list(self._invoke_first_pass())
action: Action = self.second_pass_action
invokable: Invokable = self._create_invokable(action)
return results, invokable
def invoke(self) -> ApplicationResult:
"""Invoke the application and return the results.
"""
results, invokable = self.invoke_but_second_pass()
res: Any = invokable()
sp_res = ActionResult(invokable.action, invokable.instance, res)
results.append(sp_res)
return ApplicationResult(tuple(results))
@dataclass
class ApplicationFactory(PersistableContainer):
"""Boots the application context from the command line. This first loads
resource ``resources/app.conf`` from this package, then adds
:obj:`app_config_resource` from the application package of the client.
"""
package_resource: Union[str, PackageResource] = field()
"""The application package resource (i.e. ``zensols.someappname``). This
field is converted to a package if given as a string during post
initialization.
"""
app_config_resource: Union[str, TextIOBase] = field(
default='resources/app.conf')
"""The relative resource path to the application's context if :class:`str`.
If the type is an instance of :class:`io.TextIOBase`, then read it as a file
object.
"""
children_configs: Tuple[Configurable, ...] = field(default=None)
"""Any children configurations added to the root level configuration."""
reload_factory: bool = field(default=False)
"""If ``True``, reload classes in :class:`.ImportConfigFactory`.
:see: :meth:`_create_config_factory`
"""
reload_pattern: Union[re.Pattern, str] = field(default=None)
"""If set, reload classes that have a fully qualified name that match the
regular expression regarless of the setting ``reload`` in
:class:`.ImportConfigFactory`.
:see: :meth:`_create_config_factory`
.. document private functions
.. automethod:: _handle_error
"""
error_handler: Callable = field(default=None)
"""A callable that takes an :class:`Exception` and this instance as a
paramters to handle the error. This can be set to
:class:`..ApplicationFailure` for programatic entry to this class (see
:class:`.CliHarness`).
"""
def __post_init__(self):
if self.package_resource is None:
raise ActionCliError('Missing package resource')
if isinstance(self.package_resource, str):
self.package_resource = PackageResource(self.package_resource)
self._configure_serializer()
self._resources = PersistedWork(
'_resources', self, deallocate_recursive=True)
def _configure_serializer(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'configuring serilaizer: {self.package_resource}')
dist_name = self.package_resource.name
Serializer.DEFAULT_RESOURCE_MODULE = dist_name
def _create_application_context(self, app_context: Path) -> Configurable:
"""Factory method to create the application context from the :mod:`cli`
resource (parent) context and a path to the application specific
(child) context.
:param parent_context: the :mod:`cli` root level context path
:param app_context: the application child context path
"""
children = []
if self.children_configs is not None:
children.extend(self.children_configs)
return ImportIniConfig(app_context, children=children)
def _create_config_factory(self, config: Configurable) -> ConfigFactory:
"""Factory method to create the configuration factory from the
application context created in :meth:`_get_app_context`.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'reload: {self.reload_factory}')
return ImportConfigFactory(config,
reload=self.reload_factory,
reload_pattern=self.reload_pattern)
def _find_app_doc(self, cli_mng: ActionCliManager) -> str:
"""Try to find documentation suitable for the program as a fallback if
the command line parser can't find anything.
This returns the class level documentation if there is only one class by
all second pass actions that don't originate from this module's parent
(i.e. those, that come from :mod:`zensols.cli`).
"""
def filter_action(action: ActionCli) -> bool:
"""Filter documentation action candidates."""
name = action.class_meta.name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f'name: {name}, single pass: {not action.first_pass}, ' +
f'CLI lib: {not name.startswith(mod_pattern)}')
return not action.first_pass and \
not name.startswith(mod_pattern) and \
action.is_usage_visible
def filter_doc(action: ActionCli) -> bool:
"""Filter private classes."""
doc: ClassDoc = action.class_meta.doc
if doc is None:
return False
else:
return not action.class_meta.doc.text.startswith('_')
mod_name: str = DocUtil.module_name()
mod_pattern: str = mod_name + '.'
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'module name: {mod_name}')
ac_clis: Tuple[ActionCli, ...] = tuple(cli_mng.actions.values())
sp_actions = tuple(filter(filter_action, ac_clis))
sp_metas: Tuple[ActionMetaData, ...] = tuple(chain.from_iterable(
map(lambda ac: ac.meta_datas, sp_actions)))
doc = None
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'single pass actions: {sp_actions}, ' +
f'single pass metas: {len(sp_metas)}')
if len(sp_metas) == 1:
doc = sp_metas[0].doc
doc = DocUtil.unnormalize(doc)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using second pass doc: {doc}')
else:
# filter application classes are public
sp_actions: Tuple[ActionCli, ...] = \
tuple(filter(filter_doc, sp_actions))
actions: Dict[str, ActionCli] = \
{c.class_meta.name: c for c in sp_actions}
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'actions: {actions} in ' +
f'sec pass actions: {sp_actions}')
if len(actions) == 1:
doc = next(iter(actions.values())).class_meta.doc.text
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using class: {doc}')
return doc
def _get_app_doc(self, cli_mng: ActionCliManager) -> Optional[str]:
"""Return the application documentation, or ``None`` if it is
unavailable.
:see: :meth:`_find_app_doc`
"""
doc = cli_mng.doc
if doc is None:
doc = self._find_app_doc(cli_mng)
return doc
def _get_config_path(self) -> Path:
path: Path = self.package_resource.get_path(self.app_config_resource)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'path to app specific context: {path}')
if not path.exists():
raise ActionCliError(
f"Application context resource '{self.app_config_resource}' " +
f'not found in {self.package_resource} at {path}')
return path
@persisted('_resources')
def _create_resources(self) -> \
Tuple[ConfigFactory, ActionCliManager, CommandLineParser]:
"""Create the config factory, the command action line manager, and
command line parser resources. The data is cached and use in property
getters.
"""
cl_name: str = ClassImporter.full_classname(ActionCliManager)
cli_sec: str = ActionCliManager.SECTION
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'create resources for: {type(self)}')
if isinstance(self.app_config_resource, str):
path: Path = self._get_config_path()
config: Configurable = self._create_application_context(path)
else:
file_obj = self.app_config_resource
config: Configurable = self._create_application_context(file_obj)
# create a default CLI ActionCliManager section when it doesn't exist
if cli_sec not in config.sections:
ser: Serializer = config.serializer
apps: str = ser.format_option(['app'])
config.set_option('apps', apps, section=cli_sec)
config.set_option('class_name', cl_name, section=cli_sec)
fac: ConfigFactory = self._create_config_factory(config)
# add class name to relax missing class_name
cli_mng: ActionCliManager = fac(cli_sec, class_name=cl_name)
actions: Tuple[ActionMetaData, ...] = tuple(chain.from_iterable(
map(lambda a: a.meta_datas, cli_mng.actions.values())))
config = CommandLineConfig(actions)
parser = CommandLineParser(config, self.package_resource.version,
default_action=cli_mng.default_action,
application_doc=self._get_app_doc(cli_mng),
usage_config=cli_mng.usage_config)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created factory: {fac}')
return fac, cli_mng, parser
@property
def config_factory(self) -> ConfigFactory:
"""The configuration factory used to create the application."""
return self._create_resources()[0]
@property
def cli_manager(self) -> ActionCliManager:
"""The manager that creates the action based CLIs.
"""
return self._create_resources()[1]
@property
def parser(self) -> CommandLineParser:
"""Used to parse the command line.
"""
return self._create_resources()[2]
def _parse(self, args: List[str]) -> Tuple[Action, ...]:
"""Parse the command line.
"""
fac, cli_mng, parser = self._create_resources()
actions: List[Action] = []
action_set: CommandActionSet = parser.parse(args)
cmd_actions: Dict[str, CommandAction] = action_set.by_name
action_cli: ActionCli
for action_cli in cli_mng.actions_ordered:
acli_meth: ActionCliMethod
for acli_meth in action_cli.methods.values():
name: str = acli_meth.action_meta_data.name
caction: CommandAction = cmd_actions.get(name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'action name: {name} -> {caction}')
if caction is None and action_cli.always_invoke:
caction = CommandAction(acli_meth.action_meta_data, {}, ())
if caction is not None:
action: Action = Action(
caction, action_cli,
acli_meth.action_meta_data, acli_meth.method)
actions.append(action)
return actions
def _get_default_args(self) -> List[str]:
"""Return the arguments to parse when none are given. This defaults to
the system arguments skipping the firt (program) argument.
"""
return sys.argv[1:]
def create(self, args: List[str] = None) -> Application:
"""Create the action CLI application.
:param args: the arguments to the application; if this is a string, it
will be converted to a list by splitting on whitespace;
this defaults to the output of :meth:`_get_default_args`
:raises ActionCliError: for any missing data or misconfigurations
"""
# we have to clear previously created resources for multiple calls to
# this method for this instance
self._resources.clear()
fac, cli_mng, parser = self._create_resources()
if args is None:
args = self._get_default_args()
if logger.isEnabledFor(logging.INFO):
logger.info(f'application arguments: {args}')
actions: Tuple[Action, ...] = self._parse(args)
return Application(fac, self, actions)
def _error_to_str(self, ex: Exception) -> str:
"""Create a command line friendly error message fromt he exception."""
s = str(ex)
s = s[0].lower() + s[1:]
return s
def _dump_error(self, ex: Exception, add_usage: bool = True,
exit_err: bool = True):
"""Output an exception message using the parser error API.
:param: ex: the exception raised to be written to standard error
:param add_usage: whether to add the short usage (one line)
:param exit_err: whether to exit the interpreter
"""
msg = self._error_to_str(ex)
if add_usage:
self.parser.error(msg)
else:
prog = Path(sys.argv[0]).name
print(f'{prog}: error: {msg}', file=sys.stderr)
if exit_err:
sys.exit(1)
def _handle_error(self, ex: Exception):
"""Handle errors raised during the execution of the application.
:see: :meth:`invoke`
"""
if self.error_handler is not None:
return self.error_handler(ex, self)
else:
if isinstance(ex, ConfigurableFileNotFoundError):
self._dump_error(ex, False)
elif isinstance(ex, ApplicationError):
self._dump_error(ex)
else:
raise ex
def invoke(self, args: Union[List[str], str] = None) -> ActionResult:
"""Creates and invokes the entire application returning the result of
the second pass action.
;param args: the arguments to the application; if this is a string, it
will be converted to a list by splitting on whitespace;
this defaults to the output of :meth:`_get_default_args`
:raises ActionCliError: for any missing data or misconfigurations
:return: the result of the second pass action
"""
if isinstance(args, str):
args = args.split()
try:
app: Application = self.create(args)
app_res: ApplicationResult = app.invoke()
act_res: ActionResult = app_res()
return act_res
except Exception as e:
return self._handle_error(e)
def invoke_protect(self, args: Union[List[str], str] = None) -> \
Union[ActionResult, ApplicationFailure]:
"""Same as :meth:`invoke`, but protect against :class:`Exception` and
:class:`SystemExit`. If an error is raised while invoking, it is
logged and returned.
;param args: the arguments to the application; if this is a string, it
will be converted to a list by splitting on whitespace;
this defaults to the output of :meth:`_get_default_args`
:return: the result of the second pass action or an
:class:`.ApplicationFailure` if :class:`Exception` or
:class:`SystemExit` is raised
"""
try:
return self.invoke(args)
except (Exception, SystemExit) as e:
return ApplicationFailure(e, self)
def get_instance(self, args: Union[List[str], str] = None) -> Any:
"""Create the invokable instance of the application.
:param args: the arguments to the application; if this is a string, it
will be converted to a list by splitting on whitespace;
this defaults to the output of :meth:`_get_default_args`
:raises ActionCliError: for any missing data or misconfigurations
:return: the invokable instance of the application
"""
if isinstance(args, str):
args = args.split()
try:
app: Application = self.create(args)
app_res: ApplicationResult
invokable: Invokable
app_res, invokable = app.invoke_but_second_pass()
return invokable.instance
except Exception as e:
return self._handle_error(e)
@classmethod
def create_harness(cls: Type, **kwargs):
"""Create and return a :class:`.CliHarness`.
:param kwargs: the keyword arguments given to the harness initializer
"""
from . import CliHarness
return CliHarness(app_factory_class=cls, **kwargs) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/app.py | app.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Dict, Tuple, Iterable, Set, List, Any, Type
from dataclasses import dataclass, field, InitVar
import dataclasses
import logging
import copy as cp
from itertools import chain
from zensols.persist import persisted, PersistableContainer
from zensols.introspect import (
Class, ClassField, ClassParam, ClassMethod, ClassMethodArg,
ClassInspector, ClassImporter,
)
from zensols.config import Configurable, Dictable, ConfigFactory
from . import (
DocUtil, ActionCliError, PositionalMetaData,
OptionMetaData, ActionMetaData, UsageConfig,
)
logger = logging.getLogger(__name__)
class ActionCliManagerError(ActionCliError):
"""Raised by :class:`.ActionCliManager` for any problems creating
:class:`.ActionCli` instances.
"""
pass
@dataclass
class ActionCliMethod(Dictable):
"""A "married" action meta data / class method pair. This is a pair of action
meta data that describes how to interpret it as a CLI action and the Python
class meta data method, which is used later to invoke the action (really
command).
"""
action_meta_data: ActionMetaData = field()
"""The action meta data for ``method``."""
method: ClassMethod = field(repr=False)
"""The method containing information about the source class method to invoke
later.
"""
@dataclass
class ActionCli(PersistableContainer, Dictable):
"""A set of commands that is invokeable on the command line, one for each
registered method of a class (usually a :class:`dataclasses.dataclass`.
This contains meta data necesary to create a full usage command line
documentation and parse the user's input.
"""
section: str = field()
"""The application section to introspect."""
class_meta: Class = field(repr=False)
"""The target class meta data parsed by :class:`.ClassInspector`
"""
options: Dict[str, OptionMetaData] = field(default=None)
"""Options added by :class:`.ActionCliManager`, which are those options parsed
by the entire class metadata.
"""
mnemonic_includes: Set[str] = field(default=None)
"""A list of mnemonicss to include, or all if ``None``."""
mnemonic_excludes: Set[str] = field(default_factory=set)
"""A list of mnemonicss to exclude, or none if ``None``."""
mnemonic_overrides: Dict[str, str] = field(default=None)
"""The name of the action given on the command line, which defaults to the name
of the action.
"""
option_includes: Set[str] = field(default=None)
"""A list of options to include, or all if ``None``."""
option_excludes: Set[str] = field(default_factory=set)
"""A list of options to exclude, or none if ``None``."""
option_overrides: Dict[str, Dict[str, str]] = field(default=None)
"""Overrides when creating new :class:`.OptionMetaData` where the keys are the
option names (field or method parameter) and the values are the dict that
clobbers respective keys.
:see: :meth:`.ActionCliManager._create_op_meta_data`
"""
first_pass: bool = field(default=False)
"""Whether or not this is a first pass action (i.e. such as setting the level
in :class:`~zensols.cli.LogConfigurator`).
"""
always_invoke: bool = field(default=False)
"""If ``True``, always invoke all methods for the action regardless if an
action mnemonic and options pertaining to the action are not given by the
user/command line. This is useful for configuration first pass type
classes like :class:`.PackageInfoImporter` to force the CLI API to invoke
it, as otherwise there's no indication to the CLI that it needs to be
called.
"""
is_usage_visible: bool = field(default=True)
"""Whether the action CLI is included in the usage help."""
def _is_option_enabled(self, name: str) -> bool:
"""Return ``True`` if the option is enabled and eligible to be added to the
command line.
"""
incs = self.option_includes
excs = self.option_excludes
enabled = ((incs is None) or (name in incs)) and (name not in excs)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'option {name} is enabled: {enabled}')
return enabled
def _is_mnemonic_enabled(self, name: str) -> bool:
"""Return ``True`` if the action for the mnemonic is enabled and eligible to be
added to the command line.
"""
incs = self.mnemonic_includes
excs = self.mnemonic_excludes
enabled = ((incs is None) or (name in incs)) and (name not in excs)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mnemonic {self.section}:{name} is enabled: {enabled} for ' +
f'{self.class_meta.name}: [inc={incs},exc={excs}]')
return enabled
def _add_option(self, name: str, omds: Set[OptionMetaData]):
"""Add an :class:`.OptionMetaData` from the previously collected options.
:param name: the name of the option
:param omds: the set to populate from :obj:`options`
"""
if self._is_option_enabled(name):
opt: OptionMetaData = self.options[name]
omds.add(opt)
def _normalize_name(self, s: str) -> str:
"""Normalize text of mneomincs and positional arguments."""
return s.replace('_', '')
@property
@persisted('_methods')
def methods(self) -> Dict[str, ActionCliMethod]:
"""Return the methods for this action CLI with method name keys.
"""
meths: Dict[str, ActionCliMethod] = {}
field_params: Set[OptionMetaData] = set()
f: ClassField
# add the dataclass fields that will populate the CLI as options
for f in self.class_meta.fields.values():
self._add_option(f.name, field_params)
# create an action from each method
for name in sorted(self.class_meta.methods.keys()):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating method {name}')
meth: ClassMethod = self.class_meta.methods[name]
meth_params: Set[OptionMetaData] = set(field_params)
pos_args: List[PositionalMetaData] = []
arg: ClassMethodArg
# add positionl arguments from the class meta data
for arg in meth.args:
if arg.is_positional:
opt: Dict[str, str] = None
pdoc: str = None if arg.doc is None else arg.doc.text
if self.option_overrides is not None:
opt = self.option_overrides.get(arg.name)
# first try to get it from any mapping from the long name
if opt is not None and 'long_name' in opt:
pname = opt['long_name']
else:
# use the argument name in the method but normalize it
# to make it appear in CLI parlance
pname = self._normalize_name(arg.name)
pmeta = PositionalMetaData(pname, arg.dtype, pdoc)
if opt is not None:
poverridess = dict(opt)
poverridess.pop('long_name', None)
pmeta.__dict__.update(poverridess)
pos_args.append(pmeta)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding option: {name}:{arg.name}')
self._add_option(arg.name, meth_params)
# skip disabled mnemonics (using mnemonic_includes)
if not self._is_mnemonic_enabled(name):
continue
# customize mnemonic/action data if given (either string names, or
# dictionaries with more information)
if self.mnemonic_overrides is not None and \
name in self.mnemonic_overrides:
override: Any = self.mnemonic_overrides[name]
if isinstance(override, str):
name = override
elif isinstance(override, dict):
o_name: str = override.get('name')
option_includes: Set[str] = override.get('option_includes')
option_excludes: Set[str] = override.get('option_excludes')
if o_name is not None:
name = o_name
if option_includes is not None:
meth_params: Set[OptionMetaData] = set(
filter(lambda o: o.dest in option_includes,
meth_params))
if option_excludes is not None:
meth_params: Set[OptionMetaData] = set(
filter(lambda o: o.dest not in option_excludes,
meth_params))
else:
raise ActionCliManagerError(
f'unknown override: {override} ({type(override)})')
else:
# no underscores in the CLI action names
name = self._normalize_name(name)
# get the action help from the method if available, then class
if meth.doc is None:
doc = self.class_meta.doc
else:
doc = meth.doc
if doc is not None:
if doc.text is None:
doc = ''
else:
doc = DocUtil.normalize(doc.text)
# add the meta data
meta = ActionMetaData(
name=name,
doc=doc,
options=tuple(sorted(meth_params)),
positional=tuple(pos_args),
first_pass=self.first_pass,
is_usage_visible=self.is_usage_visible)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding metadata: {meta}')
meths[name] = ActionCliMethod(meta, meth)
self.options = None
return meths
@property
@persisted('_meta_datas', deallocate_recursive=True)
def meta_datas(self) -> Tuple[ActionMetaData, ...]:
"""Return action meta data across all methods.
"""
return tuple(map(lambda m: m.action_meta_data, self.methods.values()))
@dataclass
class ActionCliManager(PersistableContainer, Dictable):
"""Manages instances of :class:`.ActionCli`. An :class:`.ActionCli` is created
from the configuration given by the section. Optionally, another section
using :obj:`decorator_section_format` will be read to add additional
metadata and configuration to instantiated object. The decorated
information is used to help bridge between the class given to be
instantiated and the CLI.
:see: :obj:`actions`
:see: :obj:`actions_by_meta_data_name`
"""
SECTION = 'cli'
"""The application context section."""
CLASS_META_ATTRIBUTE = 'CLI_META'
"""The class level attribute on application classes containing a stand in
(otherwise missing section configuration :class:`.ActionCli`.
"""
_CLI_META_ATTRIBUTE_NAMES = frozenset(
('mnemonic_includes mnemonic_excludes mnemonic_overrides ' +
'option_includes option_excludes option_overrides').split())
"""A list of keys used in the static class metadata variable named
:obj:`CLASS_META_ATTRIBUTE`, which is used to merge static class CLI
metadata.
:see: :meth:`combine_meta`
"""
_CLASS_IMPORTERS = {}
"""Resolved class cache (see :meth:`_resolve_class`).
"""
config_factory: ConfigFactory = field()
"""The configuration factory used to create :class:`.ActionCli` instances.
"""
apps: Tuple[str, ...] = field()
"""The application section names."""
cleanups: Tuple[str, ...] = field(default=None)
"""The sections to remove after the application is built."""
app_removes: InitVar[Set[str]] = field(default=None)
"""Removes apps from :obj:`apps, which is helpful when a single section to
remove is needed when importing from other files.
"""
cleanup_removes: InitVar[Set[str]] = field(default=None)
"""Clean ups to remove from :obj:`cleanups`, which is helpful when a single
section to remove is needed when importing from other files.
"""
decorator_section_format: str = field(default='{section}_decorator')
"""Format of :class:`.ActionCli` configuration classes."""
doc: str = field(default=None)
"""The application documentation."""
default_action: str = field(default=None)
"""The default mnemonic use when the user does not supply one."""
usage_config: UsageConfig = field(default_factory=UsageConfig)
"""Configuraiton information for the command line help."""
def __post_init__(self, app_removes: Set[str], cleanup_removes: Set[str]):
super().__init__()
if app_removes is not None and self.apps is not None:
self.apps = tuple(
filter(lambda s: s not in app_removes, self.apps))
if cleanup_removes is not None and self.cleanups is not None:
self.cleanups = tuple(
filter(lambda s: s not in cleanup_removes, self.cleanups))
@classmethod
def _combine_meta(self: Type, source: Dict[str, Any],
target: Dict[str, Any], keys: Set[str] = None):
if keys is None:
keys = self._CLI_META_ATTRIBUTE_NAMES & source.keys()
for attr in keys:
src_val = source.get(attr)
targ_val = target.get(attr)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'attr: {attr} {src_val} -> {targ_val}')
if src_val is not None and targ_val is not None:
if isinstance(src_val, dict):
both_keys = src_val.keys() | targ_val.keys()
for k in both_keys:
sv = src_val.get(k)
tv = targ_val.get(k)
if sv is not None and tv is not None and\
isinstance(sv, dict) and isinstance(tv, dict):
targ_val[k] = tv | sv
src_val[k] = tv | sv
target[attr] = targ_val | src_val
elif src_val is not None:
target[attr] = cp.deepcopy(src_val)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'result: {target[attr]}')
@classmethod
def combine_meta(self: Type, parent: Type, cli_meta: Dict[str, Any]):
"""Merge static class CLI metadata of the variable named
:obj:`CLASS_META_ATTRIBUTE`.
:param self: this class
:param parent: the parent class of the caller, which is used to get the
parent classes CLI metadata to merge
:param cli_meta: the metadata identified by the
:obj:`CLASS_META_ATTRIBUTE`
"""
classes: List[Type] = [parent]
classes.extend(parent.__bases__)
cli_meta = cp.deepcopy(cli_meta)
for ans in classes:
if hasattr(ans, self.CLASS_META_ATTRIBUTE):
meta: Dict[str, Any] = getattr(ans, self.CLASS_META_ATTRIBUTE)
self._combine_meta(meta, cli_meta)
return cli_meta
@property
def config(self) -> Configurable:
return self.config_factory.config
def _create_short_name(self, long_name: str) -> str:
"""Auto generate a single letter short option name.
:param long_name: the name from which to pick a letter
"""
for c in long_name:
if c not in self._short_names:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding short name for {long_name}: {c}')
self._short_names.add(c)
return c
def _create_op_meta_data(self, pmeta: ClassParam, meth: ClassMethod,
action_cli: ActionCli) -> OptionMetaData:
"""Creates an option meta data used in the CLI from a method parsed from the
class's Python source code.
"""
meta = None
if action_cli._is_option_enabled(pmeta.name):
long_name = pmeta.name.replace('_', '')
short_name = self._create_short_name(long_name)
dest = pmeta.name
dtype = pmeta.dtype
doc = pmeta.doc
if doc is None:
if (meth is not None) and (meth.doc is not None):
doc = meth.doc.params.get(long_name)
else:
doc = doc.text
if doc is not None:
doc = DocUtil.normalize(doc)
params = {
'long_name': long_name,
'short_name': short_name,
'dest': dest,
'dtype': dtype,
'default': pmeta.default,
'doc': doc
}
if action_cli.option_overrides is not None:
overrides = action_cli.option_overrides.get(pmeta.name)
if overrides is not None:
params.update(overrides)
meta = OptionMetaData(**params)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created option meta: {meta}')
return meta
def _add_field(self, section: str, name: str, omd: OptionMetaData):
"""Adds the field by name that will later be used in a :class:`.ActionCli`.
:raises ActionCliManagerError: if ``name`` has already been
*registered*
"""
prexist = self._fields.get(name)
if prexist is not None:
# we have to skip the short option compare since
# ``_create_op_meta_data`` reassigns a new letter for all created
# options
prexist = cp.deepcopy(prexist)
prexist.short_name = omd.short_name
if omd != prexist:
raise ActionCliManagerError(
f'duplicate field {name} -> {omd} in ' +
f'{section} but not equal to {prexist}')
self._fields[name] = omd
def _add_action(self, action: ActionCli):
"""Adds add an action for each method parsed from the action cli Python source
code.
"""
if action.section in self._actions:
raise ActionCliError(
f'Duplicate action for section: {action.section}')
# for each dataclass field used to create OptionMetaData's
for name, fmd in action.class_meta.fields.items():
omd = self._create_op_meta_data(fmd, None, action)
if omd is not None:
self._add_field(action.section, fmd.name, omd)
meth: ClassMethod
# add a field for the arguments of each method
for meth in action.class_meta.methods.values():
arg: ClassMethodArg
for arg in meth.args:
# positional arguments are only referenced in the
# ClassInspector parsed source code
if not arg.is_positional:
omd = self._create_op_meta_data(arg, meth, action)
if omd is not None:
self._add_field(action.section, arg.name, omd)
self._actions[action.section] = action
def _resolve_class(self, class_name: str) -> type:
"""Resolve a class using the caching those already dynamically resolved.
"""
cls_imp: ClassImporter = self._CLASS_IMPORTERS.get(class_name)
if cls_imp is None:
# resolve the string fully qualified class name to a Python class
# type
cls_imp = ClassImporter(class_name, reload=False)
cls = cls_imp.get_class()
self._CLASS_IMPORTERS[class_name] = cls_imp
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'storing cachced {class_name}')
else:
cls = cls_imp.get_class()
return cls
def _create_action_from_section(self, conf_sec: str,
params: Dict[str, Any]) -> ActionCli:
"""Create an action from a section in the configuration. If both the class
``CLI_META`` and the decorator section exists, then this will replace
all options (properties) defined.
:param conf_sec: the section name in the configuration that has the
action to create/overwrite the data
:param params: the parameters used to create the :class:`.ActionCli`
from the decorator
:return: an instance of :class:`.ActionCli` that represents the what is
given in the configuration section
"""
sec: Dict[str, Any] = self.config_factory.config.populate(
{}, section=conf_sec)
cn_attr: str = ConfigFactory.CLASS_NAME
sec.pop(cn_attr, None)
if cn_attr not in params:
params[cn_attr] = ClassImporter.full_classname(ActionCli)
self._combine_meta(sec, params)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating action from section {conf_sec} -> {sec}')
action = self.config_factory.instance(conf_sec, **params)
if not isinstance(action, ActionCli):
raise ActionCliManagerError(
f'Section instance {conf_sec} is not a class of ' +
f'type ActionCli, but {type(action)}')
return action
def _add_app(self, section: str):
"""Add an :class:`.ActionCli` instanced from the configuration given by a
section. The application is added to :obj:`._actions`. The section is
parsed and use to instantiate an object using
:class:`~zensols.config.factory.ImportConfigFactory`.
Optionally, another section using :obj:`decorator_section_format` will
be read to add additional metadata and configuration to instantiated
object. See the class docs.
:param section: indicates which section to use with config factory
"""
config = self.config
class_name: str = config.get_option('class_name', section)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'building CLI on class: {class_name}')
# resolve the string fully qualified class name to a Python class type
cls = self._resolve_class(class_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'resolved to class: {cls}')
if not dataclasses.is_dataclass(cls):
raise ActionCliError('application CLI app must be a dataclass')
# parse the source Python code for the class
inspector = ClassInspector(cls)
meta: Class = inspector.get_class()
# parameters to create the application with the config factory
params = {'section': section,
'class_meta': meta,
'options': self._fields}
conf_sec = self.decorator_section_format.format(**{'section': section})
# start with class level meta data, allowing it to be overriden at the
# application configuration level; note: tested with
# `mnemonic_includes`, which appears to merge dictionaries, which is
# probably the new 3.9 dictionary set union operations working by
# default
if hasattr(cls, self.CLASS_META_ATTRIBUTE):
cmconf = getattr(cls, self.CLASS_META_ATTRIBUTE)
params.update(cmconf)
# if we found a decorator action cli config section, use it to set the
# configuraiton of the CLI interacts
if conf_sec in self.config.sections:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found configuration section: {conf_sec}')
action = self._create_action_from_section(conf_sec, params)
else:
# use a default with parameters collected
action = ActionCli(**params)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'created action: {action}')
self._add_action(action)
@property
@persisted('_actions_pw')
def actions(self) -> Dict[str, ActionCli]:
"""Get a list of action CLIs that is used in :class:`.CommandLineParser` to
create instances of the application. Each action CLI has a collection
of :class:`.ActionMetaData` instances.
:return: keys are the configuration sections with the action CLIs as
values
"""
self._short_names: Set[str] = {'h', 'v'}
self._fields: Dict[str, OptionMetaData] = {}
self._actions: Dict[str, ActionCli] = {}
try:
for app in self.apps:
self._add_app(app)
actions = self._actions
finally:
del self._actions
del self._short_names
del self._fields
return actions
@property
@persisted('_actions_ordered', deallocate_recursive=True)
def actions_ordered(self) -> Tuple[ActionCli, ...]:
"""Return all actions in the order they were given in the configuration.
"""
acts = self.actions
fp = filter(lambda a: a.first_pass, acts.values())
sp = filter(lambda a: not a.first_pass, acts.values())
return tuple(chain.from_iterable([fp, sp]))
@property
@persisted('_actions_by_meta_data_name_pw')
def actions_by_meta_data_name(self) -> Dict[str, ActionCli]:
"""Return a dict of :class:`.ActionMetaData` instances, each of which is each
mnemonic by name and the meta data by values.
"""
actions = {}
action: ActionCli
for action in self.actions.values():
meta: Tuple[ActionMetaData, ...]
for meta in action.meta_datas:
if meta.name in actions:
raise ActionCliError(f'Duplicate meta data: {meta.name}')
actions[meta.name] = action
return actions
def _get_dictable_attributes(self) -> Iterable[Tuple[str, str]]:
return map(lambda f: (f, f), 'actions'.split()) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/action.py | action.py |
__author__ = 'Paul Landes'
from typing import Any, List, Union
from dataclasses import dataclass, field
from pathlib import Path
import logging
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.axes import SubplotBase
import seaborn as sns
import pandas as pd
import IPython.display as ip
from zensols.config import ConfigFactory
from zensols.cli import NotebookHarness
logger = logging.getLogger()
@dataclass
class NotebookManager(object):
"""Bootstrap and import libraries to automate notebook testing. It also
contains utility methods for rendering data. This class integrates with
:class:`~zensols.cli.harness.NotebookHarness` to access Zensols applications
via Jupyter.
"""
package_resource: str = field()
"""See :obj:`~zensols.cli.harness.CliHarness.package_resource`."""
app_root_dir: Path = field(default=Path('..'))
"""The application root directory."""
src_dir_name: str = field(default='src/python')
"""See :obj:`~zensols.cli.harness.CliHarness.src_dir_name`."""
default_section_name: str = field(default=None)
"""The name of the default section in the application config."""
config: Path = field(default=None)
"""The configuration file name (no suffix)."""
image_dir: Path = field(default=Path('../image'))
"""Where the images are stored."""
def __post_init__(self):
pass
def __call__(self) -> NotebookHarness:
"""Return the application."""
return self.get_application()
def get_application(self) -> NotebookHarness:
"""Return the application."""
return self['app']
def __getitem__(self, name: str) -> Any:
if not Path('resources').exists():
logger.warning('no resources parent directory symbolic link found')
return self.get_config_factory()(name)
def _map_cli_arguments(self, **kwargs):
"""Convert args to override string.
:param kwargs: arguments include: ``lang``, ``name``
"""
args: List[str] = []
sec: str = self.default_section_name
if len(kwargs) > 0:
ostr = ','.join(map(lambda kv: f'{sec}.{kv[0]}={kv[1]}',
kwargs.items()))
args.extend(['--override', ostr])
if self.config is not None:
args.extend(['--config', str(self.config)])
return args
def get_config_factory(self) -> ConfigFactory:
"""Return the application."""
return self.get_harness().get_config_factory(self._map_cli_arguments())
def get_harness(self) -> NotebookHarness:
"""Create a new ``NotebookManager`` instance and return it."""
return NotebookHarness(
src_dir_name=self.src_dir_name,
package_resource=self.package_resource,
root_dir=self.app_root_dir,
proto_factory_kwargs={
'reload_pattern': f'^{self.package_resource}'})
def display(self, *args, **kwargs):
"""Display an object in the notebook.
:param args: arguments passed to :func:`IPython.display`
:param kwargs: keyword arguments passed to :func:`IPython.display`
"""
ip.display(*args, **kwargs)
@staticmethod
def subplots(rows: int = 1, cols: int = 1, pad: float = 5.,
height: int = None, width: int = 20, add_height: int = 0,
**kwargs) -> Union[SubplotBase, np.ndarray]:
"""Create the matplotlib plot axes using a tight layout.
:param rows: the number of rows (each renders as a subpane)
:param cols: the number of columns (each renders as a subpane)
:param pad: the padding to add around the layout
:param height: the row height; defaults to ``5 * (rows + add_height)``
:param width: the column width
:param add_height: the hight to add as a unit of the row height
:param kwargs: additional keyword arguments to pass to
:function:`matplotlib.pyplot.plt.subplots`
:return: an ``ax`` subplot, or an array of subplots if ``rows`` or
``cols`` > 0
"""
if height is None:
height = 5 * (rows + add_height)
fig, axs = plt.subplots(
ncols=cols,
nrows=rows,
sharex=False,
figsize=(width, height),
**kwargs)
fig.tight_layout(pad=pad)
return axs
def save_fig(self, ax: SubplotBase, name: str, image_format: str = 'svg'):
"""Save a plot to the reports directory in the provided format.
:param ax: the (sub)plots that has the figure
:param name: the name of the plot used in the file name
"""
if self.image_dir is None:
logger.info(f'no image directory set--skipping save of {name}')
else:
path: Path = self.image_dir / f'{name}.{image_format}'
path.parent.mkdir(parents=True, exist_ok=True)
fig = ax.get_figure()
fig.savefig(path, format=image_format, bbox_inches='tight')
logger.info(f'saved {path}')
def heatmap(self, df: pd.DataFrame, pad: float = 9., add_height: float = 0,
fmt: str = '.2f', title: str = None):
"""Create an annotation heat map for all windows and optionally normalize.
"""
ax = self.subplots(1, 1, pad=pad, add_height=add_height)
if title is not None:
ax.set_title(title)
return sns.heatmap(df, annot=True, fmt=fmt, ax=ax) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/notebook.py | notebook.py |
__author__ = 'Paul Landes'
import re
import os
import sys
import logging
import inspect
from pathlib import Path
from functools import reduce
import optparse
from optparse import OptionParser
from configparser import ExtendedInterpolation
from zensols.config import IniConfig
from . import SimpleActionCli
logger = logging.getLogger(__name__)
class PrintActionsOptionParser(OptionParser):
"""Implements a human readable implementation of print_help for action based
command line handlers (i.e. OneConfPerActionOptionsCli).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def action_options(self):
return self._action_options
@property
def action_names(self):
return sorted(self.action_options.keys())
@action_options.setter
def action_options(self, opts):
self._action_options = opts
self.usage = '%prog <list|{}> [options]'.\
format('|'.join(self.action_names))
def print_help(self, file=sys.stdout):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('print help: %s' % self.invokes)
logger.debug('action options: %s' % self.action_options)
OptionParser.print_help(self, file)
action_name_len = reduce(lambda x, y: max(x, y),
map(lambda x: len(x), self.action_names))
action_fmt_str = ' {:<' + str(action_name_len) + '} {}'
action_help = []
opt_str_len = 0
def_str_len = 0
# format text for each action and respective options
for action_name in self.action_names:
if action_name in self.invokes:
action_doc = self.invokes[action_name][2].capitalize()
opts = map(lambda x: x['opt_obj'],
self.action_options[action_name])
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{action_name} -> {action_doc}, {opts}')
opt_strs = []
for opt in opts:
short_opt, long_opt, sep, default = '', '', '', ''
if opt._short_opts and len(opt._short_opts) > 0:
short_opt = opt._short_opts[0]
if opt._long_opts and len(opt._long_opts) > 0:
long_opt = opt._long_opts[0]
if opt.metavar is not None:
otype = f' <{opt.metavar}>'
elif opt.type is not None:
otype = f' <{opt.type.upper()}>'
else:
otype = ''
if len(short_opt) > 0 and len(long_opt) > 0:
sep = ', '
opt_str = f' {short_opt}{sep}{long_opt}{otype}'
if opt.default and opt.default != ('NO', 'DEFAULT'):
default = str(opt.default)
opt_strs.append({'str': opt_str,
'default': default,
'help': opt.help})
opt_str_len = max(opt_str_len, len(opt_str))
def_str_len = max(def_str_len, len(default))
action_help.append(
{'doc': action_fmt_str.format(action_name, action_doc),
'opts': opt_strs})
opt_str_fmt = '{:<' + str(opt_str_len) + '} {:<' +\
str(def_str_len) + '} {}\n'
file.write('Actions:\n')
for i, ah in enumerate(action_help):
file.write(ah['doc'] + '\n')
for op in ah['opts']:
file.write(opt_str_fmt.format(
op['str'], op['default'], op['help']))
if i < len(action_help) - 1:
file.write('\n')
class PerActionOptionsCli(SimpleActionCli):
def __init__(self, *args, **kwargs):
self.action_options = {}
super().__init__(*args, **kwargs)
def _init_executor(self, executor, config, args):
mems = inspect.getmembers(executor, predicate=inspect.ismethod)
if 'set_args' in (set(map(lambda x: x[0], mems))):
executor.set_args(args)
def _log_config(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('executors: %s' % self.executors)
logger.debug('invokes: %s' % self.invokes)
logger.debug('action options: %s' % self.action_options)
logger.debug('opts: %s' % self.opts)
logger.debug('manditory opts: %s' % self.manditory_opts)
def make_option(self, *args, **kwargs):
return optparse.make_option(*args, **kwargs)
def _create_parser(self, usage):
return PrintActionsOptionParser(
usage=usage, version='%prog ' + str(self.version))
def _config_parser_for_action(self, args, parser):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('config parser for action: %s' % args)
action = args[0]
if action in self.action_options:
for opt_cfg in self.action_options[action]:
opt_obj = opt_cfg['opt_obj']
parser.add_option(opt_obj)
self.opts.add(opt_obj.dest)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('manditory: %s' % opt_cfg['manditory'])
if opt_cfg['manditory']:
self.manditory_opts.add(opt_obj.dest)
self._log_config()
class OneConfPerActionOptionsCli(PerActionOptionsCli):
"""Convenience action handler that allows a definition on a per action
basis. See the test cases for examples of how to use this as the detail is
all in the configuration pased to the init method.
:param opt_config: the option configuration (see project documentation)
:param config_type: the class used for the configuration and defaults to
:class:`zensols.util.configbase.Configurable`.
"""
def __init__(self, opt_config, config_type=IniConfig, **kwargs):
self.opt_config = opt_config
self.config_type = config_type
super().__init__({}, {}, **kwargs)
def _config_global(self, oc):
parser = self.parser
if logger.isEnabledFor(logging.DEBUG):
logger.debug('global opt config: %s' % oc)
if 'whine' in oc and oc['whine'] is not None:
logger.debug('configuring whine option')
self._add_whine_option(parser, default=oc['whine'])
if 'short' in oc and oc['short']:
logger.debug('configuring short option')
self._add_short_option(parser)
if 'config_option' in oc:
conf = oc['config_option']
self.config_opt_conf = conf
opt = conf['opt']
logger.debug('config opt: %s', opt)
opt_obj = self.make_option(opt[0], opt[1], **opt[3])
parser.add_option(opt_obj)
if opt[2]:
self.manditory_opts.add(opt_obj.dest)
if 'global_options' in oc:
for opt in oc['global_options']:
logger.debug('global opt: %s', opt)
opt_obj = self.make_option(opt[0], opt[1], **opt[3])
logger.debug('parser opt: %s', opt_obj)
parser.add_option(opt_obj)
self.opts.add(opt_obj.dest)
if opt[2]:
self.manditory_opts.add(opt_obj.dest)
def _config_executor(self, oc):
exec_name = oc['name']
gaopts = self.action_options
if logger.isEnabledFor(logging.DEBUG):
logger.debug('config opt config: %s' % oc)
for action in oc['actions']:
action_name = action['name']
meth = action['meth'] if 'meth' in action else re.sub(r'[- ]', '_', action_name)
doc = action['doc'] if 'doc' in action else re.sub(r'[-_]', ' ', meth)
inv = [exec_name, meth, doc]
logger.debug('inferred action: %s: %s' % (action, inv))
self.invokes[action_name] = inv
if 'opts' not in action:
action['opts'] = ()
aopts = gaopts[action_name] if action_name in gaopts else []
gaopts[action_name] = aopts
for opt in action['opts']:
logger.debug('action opt: %s' % opt)
opt_obj = self.make_option(opt[0], opt[1], **opt[3])
logger.debug('action opt obj: %s' % opt_obj)
aopts.append({'opt_obj': opt_obj, 'manditory': opt[2]})
self.executors[exec_name] = oc['executor']
def config_parser(self):
super().config_parser()
parser = self.parser
self._config_global(self.opt_config)
for oc in self.opt_config['executors']:
self._config_executor(oc)
parser.action_options = self.action_options
parser.invokes = self.invokes
self._log_config()
if logger.isEnabledFor(logging.DEBUG):
logger.debug('finished config parser')
def _create_config(self, conf_file):
return self.config_type(config_file=conf_file)
def _get_default_config(self, params):
return super().get_config(params)
def _find_conf_file(self, conf, params):
conf_name = conf['name']
conf_file = Path(params[conf_name])
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'config configuration: {conf}, name: {conf_name}, ' +
f'params: {params}')
if conf_file is not None:
if not conf_file.exists() and \
('expect' not in conf or conf['expect']):
raise IOError('no such configuration file: %s' % conf_file)
return conf_file
def get_config(self, params):
if not hasattr(self, 'config_opt_conf'):
conf = self._get_default_config(params)
else:
conf_def = self.config_opt_conf
conf_file = self._find_conf_file(conf_def, params)
if conf_file is None:
conf = None
else:
good_keys = filter(lambda x: params[x] is not None,
params.keys())
defaults = {k: str(params[k]) for k in good_keys}
conf = self._create_config(conf_file)
for k, v in defaults.items():
conf.set_option(k, v)
if conf is None:
conf = self._get_default_config(params)
logger.debug('returning config: %s' % conf)
return conf
class OneConfPerActionOptionsCliEnv(OneConfPerActionOptionsCli):
"""A command line option parser that first parses an ini file and passes that
configuration on to the rest of the CLI action processing in the super
class.
"""
def __init__(self, opt_config, config_env_name=None, no_os_environ=False,
*args, **kwargs):
"""Initialize.
:param opt_config: the option configuration (see project documentation)
:param config_env_name:
the name of the environment variable that holds the resource like
name (i.e. ~/.<program name>rc); this will be used as the
configuration file if it is given and found; otherwise a
``ValueError`` is rasied if not found
:param no_os_environ:
if ``True`` do not add environment variables to the configuration
environment
"""
super().__init__(opt_config, *args, **kwargs)
if config_env_name is None:
self.default_config_file = None
else:
conf_env_var = config_env_name.upper()
if conf_env_var in os.environ:
cfile = os.environ[conf_env_var]
else:
cfile = '~/.{config_env_name}'
cfile = Path(cfile).expanduser()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'configured default config file: {cfile}')
self.default_config_file = cfile
self.no_os_environ = no_os_environ
def _create_config(self, conf_file):
conf = super()._create_config(conf_file)
defs = {}
if not self.no_os_environ:
logger.debug(f'adding environment to config: {os.environ}')
if isinstance(conf, IniConfig) and \
isinstance(conf.parser._interpolation, ExtendedInterpolation):
env = {}
for k, v in os.environ.items():
env[k] = v.replace('$', '$$')
else:
env = os.environ
defs.update(env)
logger.debug('creating with conf_file: {}'.format(conf_file))
for k, v in defs.items():
conf.set_option(k, v)
return conf
def _find_conf_file(self, conf, params):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('finding config: {}'.format(self.default_config_file))
if self.default_config_file is None:
conf_file = super().\
_find_conf_file(conf, params)
else:
conf_name = conf['name']
conf_file = params[conf_name]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'config: {conf}, name: {conf_name}, ' +
f'params: {params}, default_config_file: ' +
f'{self.default_config_file}')
if conf_file is None:
if os.path.isfile(self.default_config_file):
conf_file = self.default_config_file
elif 'expect' in conf and conf['expect']:
if conf_file is None:
raise IOError('no configuration file defined in: %s or %s' %
(conf['name'], self.default_config_file))
raise IOError('no such configuration file: %s' % conf_file)
return conf_file | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/peraction.py | peraction.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Any, Union, Type, Optional, Tuple, ClassVar
from dataclasses import dataclass, field
import sys
import os
import logging
import inspect
from io import TextIOBase
from pathlib import Path
from zensols.util import PackageResource, APIError
from zensols.config import DictionaryConfig, ConfigFactory
from zensols.config import (
ModulePrototype, ImportConfigFactoryModule, ImportConfigFactory
)
from zensols.introspect import ClassImporter
from zensols.cli import (
ApplicationError, ApplicationFailure, Action, ActionResult, OptionMetaData,
Application, ApplicationResult, ApplicationFactory, ConfigurationImporter
)
from . import LogConfigurator
from .lib.support import ListActions
logger = logging.getLogger(__name__)
@dataclass
class ConfigFactoryAccessor(object):
"""Return an instance from a :class:`.ConfigFactory`, which is useful for
creating harnesses and accessing application context configured instances.
*Important*: if configured to access an application class in the application
context, you will need to remove the entry from the removes in the ``cli``
seciton of the ``app.conf`` file.
"""
LONG_NAME = ListActions.LONG_NAME_SKIP
"""The long name of the first pass option to denote the application is to be
returned in a pragmatic context as a client to :class:`.CliHarness`.
:see: :class:`.ListActions`
"""
CLI_META = {'option_includes': {},
'mnemonic_overrides': {'access': LONG_NAME},
'is_usage_visible': False}
config_factory: ConfigFactory = field()
"""The parent configuration factory that is returned when accessed."""
def access(self) -> ConfigFactory:
"""Return the configuration factory."""
return self.config_factory
@dataclass
class _HarnessEnviron(object):
"""A context to help the harness do things like relocate the environment
(i.e. root directory that has data and resource files).
"""
args: List[str]
src_path: Path
root_dir: Path
app_config_resource: Union[str, TextIOBase]
@dataclass
class CliHarness(object):
"""A utility class to automate the creation of execution of the
:class:`.Application` from either the command line or a Python REPL.
"""
src_dir_name: str = field(default=None)
"""The directory (relative to :obj:`root_dir` to add to the Python path
containing the source files.
"""
package_resource: Union[str, PackageResource] = field(default='app')
"""The application package resource.
:see: :obj:`.ApplicationFactory.package_resource`
"""
app_config_resource: Union[str, TextIOBase] = field(
default='resources/app.conf')
"""The relative resource path to the application's context. If set as an
instance of :class:`io.TextIOBase` then read from that resource instead of
trying to find a resource file.
:see: :obj:`.ApplicationFactory.app_config_resource`
"""
app_config_context: Dict[str, Dict[str, str]] = field(default_factory=dict)
"""More context given to the application context on app creation."""
root_dir: Path = field(default=None)
"""The entry point directory where to make all files relative. If not given,
it is resolved from the parent of the entry point program path in the
(i.e. :obj:`sys.argv`) arguments.
"""
app_factory_class: Union[str, Type[ApplicationFactory]] = field(
default=ApplicationFactory)
"""The application factory used to create thye application."""
relocate: bool = field(default=True)
"""Whether or not to make :obj:`source_dir_name` and :obj:`app_config_resource`
relative to :obj:`root_dir` (when non-``None``). This should be set to
``False`` when used to create an application that is installed (i.e. with
pip).
"""
proto_args: Union[str, List[str]] = field(default_factory=list)
"""The command line arguments."""
proto_factory_kwargs: Dict[str, Any] = field(default_factory=dict)
"""Factory keyword arguments given to the :class:`.ApplicationFactory`."""
proto_header: str = field(default=None)
"""Printed for each invocation of the prototype command line. This is handy
when running in environments such as Emacs REPL to clarify the invocation
method.
"""
log_format: str = field(default='%(asctime)-15s [%(name)s] %(message)s')
"""The log formatting used in :meth:`configure_logging`."""
no_exit: bool = field(default=False)
"""If ``True`` do not exist the program when :class:`SystemExit` is raised.
"""
def __post_init__(self):
if self.root_dir is not None:
if not self.root_dir.is_dir():
raise ApplicationError(
f'No such root directory: {self.root_dir}')
self.add_sys_path(self.root_dir)
@staticmethod
def add_sys_path(to_add: Path):
"""Add to the Python system path if not already.
:param to_add: the path to test and add
"""
def canon(p: Path):
return p.expanduser().resolve().absolute()
spath: str
to_add = canon(to_add)
if not any(map(lambda p: canon(Path(p)) == to_add, sys.path)):
sys.path.append(str(to_add))
@property
def invoke_method(self) -> str:
"""Return how the program was invoked.
:return: one of ``eval`` for re-evaluating the file, ``repl`` from the
REPL or ``main`` for invocation from the main command line
"""
meth = None
finf: inspect.FrameInfo = inspect.stack()[-1]
mod_name = finf.frame.f_globals['__name__']
if mod_name == '__main__':
if finf.filename == '<stdin>':
meth = 'repl'
elif finf.filename == '<string>':
meth = 'eval'
meth = 'main' if meth is None else meth
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'module name: {mod_name}, file: {finf.filename}, ' +
f'method: {meth}')
return meth
def _handle_exit(self, se: SystemExit):
"""Handle attempts to exit the Python interpreter. This default implementation
simplly prints the error if :obj:`no_exit` is ``True``.
:param se: the error caught
:raises: SystemExit if :obj:`no_exit` is ``False``
"""
if self.no_exit:
print(f'exit: {se}')
else:
raise se
def configure_logging(self, *args, **kwargs):
"""Convenience method to configure the logging package system for early stage
(bootstrap) debugging. However, the "right" way to configure logging
is in the application configuration.
The arguments provided are given to the initializer of
:class:`.LogConfigurator`, which is then used to configure the logging
system.
"""
log_conf = LogConfigurator(*args, **kwargs)
log_conf.config()
def _create_context(self, env: _HarnessEnviron) -> \
Dict[str, Dict[str, str]]:
ctx = dict(self.app_config_context)
appenv = ctx.get('appenv')
if appenv is None:
appenv = {}
ctx['appenv'] = appenv
appenv['root_dir'] = str(env.root_dir)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating initial context with {ctx}')
return ctx
def _get_app_factory_class(self) -> Type[ApplicationFactory]:
if isinstance(self.app_factory_class, str):
ci = ClassImporter(self.app_factory_class, False)
cls = ci.get_class()
else:
cls = self.app_factory_class
return cls
def _relocate_harness_environ(self, args: List[str]) -> _HarnessEnviron:
"""Create a relocated harness environment.
:param args: all command line arguments as given from :obj:`sys.argv`,
including the program name
"""
entry_path: Path = None
cur_path = Path('.')
src_path = None
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'args: {args}')
if len(args) > 0:
entry_path = Path(args[0]).parents[0]
args = args[1:]
if entry_path is None:
entry_path = cur_path
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'entry path: {entry_path}')
if self.root_dir is None:
root_dir = entry_path
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'no root dir, using entry path: {entry_path}')
else:
root_dir = entry_path / self.root_dir
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'root dir: {root_dir}')
if self.src_dir_name is not None:
src_path = root_dir / self.src_dir_name
if logger.isEnabledFor(logging.INFO):
logger.info(f'adding source path: {src_path} to python path')
self.add_sys_path(src_path)
app_conf_res: str = self.app_config_resource
if isinstance(app_conf_res, str):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
f'app conf res: {app_conf_res}, entry path: {entry_path}' +
f', root_dir: {root_dir}, cur_path: {cur_path}')
if root_dir != cur_path:
app_conf_res: Path = root_dir / app_conf_res
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'relative app conf res: {app_conf_res}')
# absolute paths do not work with package_resource as it
# removes the leading slash when resolving resource paths
app_conf_res = Path(os.path.relpath(
app_conf_res.resolve(), Path.cwd()))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'update app config resource: {app_conf_res}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'args={args}, src_path={src_path}, ' +
f'root_dir={root_dir}, app_conf_res={app_conf_res}')
return _HarnessEnviron(args, src_path, root_dir, app_conf_res)
def _create_harness_environ(self, args: List[str]) -> _HarnessEnviron:
"""Process paths and configure the Python path necessary to execute the
application.
:param args: all command line arguments as given from :obj:`sys.argv`,
including the program name
"""
if self.relocate:
return self._relocate_harness_environ(args)
else:
if len(args) > 0:
args = args[1:]
return _HarnessEnviron(
args, None, Path('.'), self.app_config_resource)
def _create_app_fac(self, env: _HarnessEnviron,
factory_kwargs: Dict[str, Any]) -> ApplicationFactory:
ctx = self._create_context(env)
dconf = DictionaryConfig(ctx)
cls = self._get_app_factory_class()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating {cls}: ' +
f'package resource: {self.package_resource}, ' +
f'config resource: {env.app_config_resource}, ' +
f'context: {ctx}')
return cls(
package_resource=self.package_resource,
app_config_resource=env.app_config_resource,
children_configs=(dconf,),
**factory_kwargs)
def create_application_factory(self, args: List[str] = (),
**factory_kwargs: Dict[str, Any]) -> \
ApplicationFactory:
"""Create and return the application factory.
:param args: all command line arguments as given from :obj:`sys.argv`,
including the program name
:param factory_kwargs: arguments passed to :class:`.ApplicationFactory`
:return: the application factory on which to call
:meth:`.ApplicationFactory.invoke`
"""
env: _HarnessEnviron = self._create_harness_environ(args)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'environ: {env}')
return self._create_app_fac(env, factory_kwargs)
def invoke(self, args: List[str] = sys.argv,
**factory_kwargs: Dict[str, Any]) -> ApplicationResult:
"""Invoke the application using the standard command line arguments.
This is called from command line entry points. To invoke from Python
use :meth:`execute`.
:param args: all command line arguments including the program name
:param factory_kwargs: arguments given to the command line factory
:return: the application results
"""
cli: ApplicationFactory = self.create_application_factory(
args, **factory_kwargs)
try:
return cli.invoke(args[1:])
except SystemExit as e:
self._handle_exit(e)
return e
def get_instance(self, args: Union[List[str], str] = '',
**factory_kwargs: Dict[str, Any]) -> \
Union[Any, ApplicationFailure]:
"""Create the invokable instance of the application.
:param args: the arguments to the application not including the program
name (as it makes no sense in the context of this call);
if this is a string, it will be converted to a list by
splitting on whitespace; this defaults to the output of
:meth:`_get_default_args`
:param factory_kwargs: arguments passed to :class:`.ApplicationFactory`
:see: :meth:`.ApplicationFactory.get_instance`
"""
self.no_exit = True
if isinstance(args, str):
args = f'_ {args}'
else:
args = ['_'] + args
cli: ApplicationFactory = self.create_application_factory(
args, **factory_kwargs)
if cli.error_handler is None:
cli.error_handler = ApplicationFailure
try:
return cli.get_instance(args[1:])
except SystemExit as e:
return self._handle_exit(e)
def _normalize_args(self, args: Optional[Union[List[str], str]],
create_if_none: bool = True) -> List[str]:
if args is None:
if create_if_none:
args = []
else:
if isinstance(args, str):
args = args.split()
elif not isinstance(args, list):
raise APIError(
f'Expecting argument list of str but got: {args}')
return args
def get_config_factory(self, args: Union[List[str], str] = None,
throw: bool = True) -> \
Union[ConfigFactory, ApplicationFailure]:
"""The application configuration factory.
:param args: additional argument to give to the pseudo command line
(i.e. ``-c <configuration file>``)
:param throw: whether to throw exceptions raised during executing the
application; if ``False`` then return an
:class:`.ApplicationFailure`
:return: the configuration factory used to create the application
environment and application config or the
:class:`.ApplicationFailure` if ``throw`` is ``True``
"""
inst_args: List[str] = [ConfigFactoryAccessor.LONG_NAME]
if args is not None:
args = self._normalize_args(args)
inst_args.extend(args)
accessor: ConfigFactoryAccessor = self.get_instance(inst_args)
if isinstance(accessor, ApplicationFailure):
if throw:
raise accessor.exception
else:
return accessor
else:
return accessor.access()
def get_application(self, args: Union[List[str], str] = None,
throw: bool = True,
app_section: str = 'app') -> \
Union[Application, ApplicationFailure]:
"""Get the application from the configuration factory. For this to
work, the ``app`` section most not be in the cleanups. Otherwise the
framework will remove the section before the call to the configuration
factory to create it.
"""
config_factory: Union[ConfigFactory, ApplicationFailure] = \
self.get_config_factory(args, throw)
if isinstance(config_factory, ConfigFactory):
if app_section not in config_factory.config.sections:
raise ApplicationError(
f"No '{app_section}' section in configuration. " +
"Remove it from cleanups?")
return config_factory(app_section)
def __getitem__(self, section_name: str) -> Optional[Any]:
"""Index by section name binded application configuration instances.
:param section_name: the section used to create the instance
"""
fac: ConfigFactory = self.get_config_factory()
return fac(section_name)
def _proto(self, args: Union[List[str], str],
**factory_kwargs: Dict[str, Any]) -> ApplicationResult:
"""Invoke the prototype.
:param args: the command line arguments without the first argument (the
program name)
:param factory_kwargs: arguments given to the command line factory
:return: the application results
"""
args = ['_'] + self._normalize_args(args)
self.no_exit = True
return self.invoke(args, **factory_kwargs)
def proto(self, args: Union[List[str], str] = None) -> \
Optional[ApplicationResult]:
"""Invoke the prototype using :obj:`proto_args` and
:obj:`proto_factory_kwargs`.
:param args: the command line arguments without the first argument (the
program name)
:return: the application results if it did not try to exit
"""
if self.proto_header is not None:
print(self.proto_header)
args = self.proto_args if args is None else args
try:
return self._proto(args, **self.proto_factory_kwargs)
except SystemExit as e:
self._handle_exit(e)
def run(self) -> Optional[ActionResult]:
"""The command line script (i.e. model harness scripts) entry point.
:return: the application results if it did not exit
"""
invoke_method = self.invoke_method
if invoke_method == 'main':
# when running from a shell, run the CLI entry point
return self.invoke()
elif invoke_method == 'repl':
# otherwise, assume a Python REPL and run the prototyping method
return self.proto()
else:
logger.debug('skipping re-entry from interpreter re-evaluation')
def execute(self, args: Union[List[str], str] = None) -> \
Optional[ApplicationResult]:
"""Invoke the application with command line with arguments from other
Python programs or the REPL.
:param args: the command line arguments without the first argument (the
program name)
:return: the application results if it did not try to exit
"""
self.proto_header = None
self.no_exit = True
try:
return self.proto(args)
except SystemExit as e:
self._handle_exit(e)
def __call__(self, args: Union[List[str], str] = None) -> \
Optional[ApplicationResult]:
"""Invoke using :meth:`execute`."""
self.execute(args)
@dataclass
class ConfigurationImporterCliHarness(CliHarness):
"""A harness that adds command line arguments for the configuration file
when they are available. It does this by finding an instance of
:class:`.ConfigurationImporter` in the command line metadata. When it finds
it, if not set from the given set of arguments it:
1. Uses :obj:`config_path`
2. Gets the path from the environment variable set using
:class:`.ConfigurationImporter`
**Implementation note**: One disadvantage to using this over
:class:`.CliHarness` is that it has to parse the application configuration
and create the application twice. This can slow the start of the
application and is noticable for larger configurations.
"""
config_path: Union[str, Path] = field(default=None)
def __post_init__(self):
super().__post_init__()
if isinstance(self.config_path, str):
self.config_path = Path(self.config_path)
def _get_config_path_args(self, env: _HarnessEnviron,
app: Application) -> List[str]:
args: List[str] = []
capp: Action = tuple(filter(
lambda a: a.class_meta.class_type == ConfigurationImporter,
app.first_pass_actions))
capp = capp[0] if len(capp) > 0 else None
if capp is not None:
ci: ConfigurationImporter = app.get_invokable(capp.name).instance
if ci.config_path is None:
op: OptionMetaData = capp.command_action.meta_data.options[0]
lnop: str = f'--{op.long_name}'
envvar: str = ci.get_environ_var_from_app()
envval: str = os.environ.get(envvar)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('harness loading config from environment ' +
f"varaibles '{envvar}' = {envval}")
config_path: Path = None
if self.config_path is not None:
config_path = self.config_path
elif envval is not None:
config_path = Path(envval)
if config_path is not None:
config_path = env.root_dir / config_path
args.extend((lnop, str(config_path)))
return args
def _update_args(self, args: List[str],
**factory_kwargs: Dict[str, Any]) -> \
Tuple[str, ApplicationFactory]:
env: _HarnessEnviron = self._create_harness_environ(args)
app_fac: ApplicationFactory = self._create_app_fac(env, factory_kwargs)
try:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'creating application {app_fac} with {env.args}')
app: Application = app_fac.create(env.args)
args = list(env.args)
args.extend(self._get_config_path_args(env, app))
return app_fac, args
except ApplicationError as ex:
app_fac._dump_error(ex)
def invoke(self, args: List[str] = sys.argv,
**factory_kwargs: Dict[str, Any]) -> Any:
app_fac, args = self._update_args(args, **factory_kwargs)
if app_fac is not None:
return app_fac.invoke(args)
def get_instance(self, args: Union[List[str], str] = None,
**factory_kwargs: Dict[str, Any]) -> Any:
args = self._normalize_args(args)
args.insert(0, '_')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'get inst: {args}, factory: {factory_kwargs}')
app_fac, args = self._update_args(args, **factory_kwargs)
if app_fac is not None:
return app_fac.get_instance(args)
@dataclass
class NotebookHarness(CliHarness):
"""A harness used in Jupyter notebooks. This class has default configuration
useful to having a single directory with one or more notebooks off the
project root ditectory.
For this reason :obj:`root_dir` is the parent directory, which is used to
add :obj:`src_dir_name` to the Python path.
"""
factory_kwargs: Dict[str, Any] = field(default_factory=dict)
"""Arguments given to the factory when creating new application instances with
:meth:`__call__`.
"""
def __post_init__(self):
super().__post_init__()
self._app_factory = None
self.reset()
def reset(self):
"""Reset the notebook and recreate all resources.
"""
self.set_browser_width()
if self._app_factory is not None:
self._app_factory.deallocate()
self._app_factory = self.create_application_factory(
**self.factory_kwargs)
@staticmethod
def set_browser_width(width: int = 95):
"""Use the entire width of the browser to create more real estate.
:param width: the width as a percent (``[0, 100]``) to use as the width
in the notebook
"""
from IPython.core.display import display, HTML
html = f'<style>.container {{ width:{width}% !important; }}</style>'
display(HTML(html))
def __call__(self, args: str) -> Any:
"""Return the invokable instance."""
return self._app_factory.get_instance(args)
@dataclass
class _ApplicationImportConfigFactoryModule(ImportConfigFactoryModule):
"""A module that creates instance from the context of a *different*
application.
The configuration string prototype has the form::
application(<package name>): <instance section name>
"""
_NAME: ClassVar[str] = 'application'
def __post_init__(self):
self._factories: Dict[str, ConfigFactory] = {}
def _get_factory(self, proto: ModulePrototype) -> ConfigFactory:
pkg: str = proto.config_str
fac: ConfigFactory = self._factories.get(pkg)
if fac is None:
harness: CliHarness = CliHarness(package_resource=pkg)
fac = harness.get_config_factory()
self._factories[pkg] = fac
return fac
def _instance(self, proto: ModulePrototype) -> Any:
fac: ConfigFactory = self._get_factory(proto)
return fac(proto.name)
ImportConfigFactory.register_module(_ApplicationImportConfigFactoryModule) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/harness.py | harness.py |
__author__ = 'Paul Landes'
from typing import Tuple, Dict, Any, Type, Callable
from dataclasses import dataclass, field
from enum import Enum
import logging
import sys
from io import TextIOBase
from pathlib import Path
import optparse
from frozendict import frozendict
from zensols.util import Failure
from zensols.introspect import TypeMapper, IntegerSelection
from zensols.persist import persisted, PersistableContainer
from zensols.config import Dictable
from . import ActionCliError
logger = logging.getLogger(__name__)
class ApplicationError(ActionCliError):
"""Thrown for any application error that should result in a user error
rather than produce a full stack trace.
"""
pass
@dataclass
class ApplicationFailure(Failure, Dictable):
"""Contains information for application invocation failures used by
programatic methods.
"""
def raise_exception(self):
"""Raise the contained exception. The exception will include
:obj:`message` if available.
:throws ApplicationError: every time for the contained exception
"""
raise ApplicationError(str(self)) from self.exception
def __str__(self):
return self.message if self.message is not None else str(self.exception)
def apperror(method: Callable = None, *,
exception: Type[Exception] = Exception):
"""A decorator that rethrows any method's exception as an
:class:`.ApplicationError`. This is helpful for application classes that
should print a usage rather than an exception stack trace.
An optional exception parameter can be provided so the exception is rethrown
for only certain caught exceptions.
"""
def no_args(*args, **kwargs):
if method is not None:
ref = args[0]
try:
return method(ref, *args[1:], **kwargs)
except exception as e:
raise ApplicationError(str(e)) from e
else:
def with_args(*wargs, **wkwargs):
try:
return args[0](wargs[0], *wargs[1:], **wkwargs)
except exception as e:
raise ApplicationError(str(e)) from e
return with_args
return no_args
@dataclass
class _MetavarFormatter(object):
"""Formats the meta variable string for options. This is the data type or
example printed next to the argument or option in the usage help.
"""
def __post_init__(self):
is_enum: bool
try:
is_enum = issubclass(self.dtype, Enum) and self.choices is None
except Exception:
is_enum = False
if is_enum:
self.choices = tuple(sorted(self.dtype.__members__.keys()))
else:
self.choices = ()
if self.metavar is None:
self._set_metvar()
@property
def is_choice(self) -> bool:
"""Whether or not this option represents string combinations that map to
a :class:`enum.Enum` Python class.
"""
return len(self.choices) > 0
def _set_metvar(self) -> str:
if self.is_choice:
metavar = f"<{'|'.join(self.choices)}>"
elif self.dtype == Path:
metavar = 'FILE'
elif self.dtype == IntegerSelection:
metavar = f'INT[,INT|{IntegerSelection.INTERVAL_DELIM}INT]'
elif self.dtype == bool:
metavar = None
elif self.dtype == str:
metavar = 'STRING'
else:
metavar = self.dtype.__name__.upper()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'metavar recompute using {self.dtype}: ' +
f'{metavar}, {self.choices}')
self.metavar = metavar
@dataclass(eq=True, order=True, unsafe_hash=True)
class OptionMetaData(PersistableContainer, Dictable, _MetavarFormatter):
"""A command line option."""
DATA_TYPES = frozenset(TypeMapper.DEFAULT_DATA_TYPES.values())
"""Supported data types."""
long_name: str = field()
"""The long name of the option (i.e. ``--config``)."""
short_name: str = field(default=None)
"""The short name of the option (i.e. ``-c``)."""
dest: str = field(default=None, repr=False)
"""The the field/parameter name used to on the target class."""
dtype: type = field(default=str)
"""The data type of the option (i.e. :class:`str`).
Other types include: :class:`int`, :class`float`, :class:`bool`,
:class:`list` (for choice), or :class:`patlib.Path` for files and
directories.
"""
choices: Tuple[str, ...] = field(default=None)
"""The constant list of choices when :obj:`dtype` is :class:`list`. Note
that this class is a tuple so instances are hashable in :class:`.ActionCli`.
"""
default: str = field(default=None)
"""The default value of the option."""
doc: str = field(default=None)
"""The document string used in the command line help."""
metavar: str = field(default=None, repr=False)
"""Used in the command line help for the type of the option."""
def __post_init__(self):
if self.dest is None:
self.dest = self.long_name
_MetavarFormatter.__post_init__(self)
def _str_vals(self) -> Tuple[str, str, str]:
default = self.default
choices = None
tpe = {str: 'string',
int: 'int',
float: 'float',
bool: None,
Path: None,
list: 'choice'}.get(self.dtype)
if tpe is None and self.is_choice:
tpe = 'choice'
choices = self.choices
# use the string value of the default if set from the enum
if isinstance(default, Enum):
default = default.name
elif (default is not None) and (self.dtype != bool):
default = str(default)
return tpe, default, choices
@property
def default_str(self) -> str:
"""Get the default as a string usable in printing help and as a default
using the :class:`optparse.OptionParser` class.
"""
return self._str_vals()[1]
@property
def long_option(self) -> str:
"""The long option string with dashes."""
return f'--{self.long_name}'
@property
def short_option(self) -> str:
"""The short option string with dash."""
return None if self.short_name is None else f'-{self.short_name}'
def create_option(self) -> optparse.Option:
"""Add the option to an option parser.
:param parser: the parser to populate
"""
params = {}
tpe, default, choices = self._str_vals()
if choices is not None:
params['choices'] = choices
# only set the default if given, as default=None is not the same as a
# missing default when adding the option
if default is not None:
params['default'] = default
if tpe is not None:
params['type'] = tpe
if self.dtype == list:
params['choices'] = self.choices
if self.doc is not None:
params['help'] = self.doc
for att in 'metavar dest'.split():
v = getattr(self, att)
if v is not None:
params[att] = v
if self.dtype == bool:
if self.default is True:
params['action'] = 'store_false'
else:
params['action'] = 'store_true'
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'params: {params}')
return optparse.Option(self.long_option, self.short_option, **params)
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
dct = super()._from_dictable(*args, **kwargs)
dct['dtype'] = self.dtype
if not self.is_choice:
del dct['choices']
else:
if self.default is not None:
dct['default'] = self.default.name
dct = {k: dct[k] for k in
filter(lambda x: dct[x] is not None, dct.keys())}
return dct
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
dct = self.asdict()
del dct['long_name']
self._write_line(self.long_name, depth, writer)
self._write_object(dct, depth + 1, writer)
@dataclass
class PositionalMetaData(Dictable, _MetavarFormatter):
"""A command line required argument that has no option switches.
"""
name: str = field()
"""The name of the positional argument. Used in the documentation and when
parsing the type.
"""
dtype: Type = field(default=str)
"""The type of the positional argument.
:see: :obj:`.Option.dtype`
"""
doc: str = field(default=None)
"""The documentation of the positional metadata or ``None`` if missing.
"""
choices: Tuple[str, ...] = field(default=None)
"""The constant list of choices when :obj:`dtype` is :class:`list`. Note
that this class is a tuple so instances are hashable in :class:`.ActionCli`.
"""
metavar: str = field(default=None, repr=False)
"""Used in the command line help for the type of the option."""
def __post_init__(self):
_MetavarFormatter.__post_init__(self)
class OptionFactory(object):
"""Creates commonly used options.
"""
@classmethod
def dry_run(cls: type, **kwargs) -> OptionMetaData:
"""A boolean dry run option."""
return OptionMetaData('dry_run', 'd', dtype=bool,
doc="don't do anything; just act like it",
**kwargs)
@classmethod
def file(cls: type, name: str, short_name: str, **kwargs):
"""A file :class:`~pathlib.Path` option."""
return OptionMetaData(name, short_name, dtype=Path,
doc=f'the path to the {name} file',
**kwargs)
@classmethod
def directory(cls: type, name: str, short_name: str, **kwargs):
"""A directory :class:`~pathlib.Path` option."""
return OptionMetaData(name, short_name, dtype=Path,
doc=f'the path to the {name} directory',
**kwargs)
@classmethod
def config_file(cls: type, **kwargs) -> OptionMetaData:
"""A subordinate file based configuration option."""
return cls.file('config', 'c', **kwargs)
@dataclass
class ActionMetaData(PersistableContainer, Dictable):
"""An action represents a link between a command line mnemonic *action* and
a method on a class to invoke.
"""
_DICTABLE_WRITABLE_DESCENDANTS = True
name: str = field(default=None)
"""The name of the action, which is also the mnemonic used on the command
line.
"""
doc: str = field(default=None)
"""A short human readable documentation string used in the usage."""
options: Tuple[OptionMetaData, ...] = field(default_factory=lambda: ())
"""The command line options for the action."""
positional: Tuple[PositionalMetaData, ...] = \
field(default_factory=lambda: ())
"""The positional arguments expected for the action."""
first_pass: bool = field(default=False)
"""If ``True`` this is a first pass action that is used with no mnemonic.
Examples include the ``-w``/``--whine`` logging level, which applies to the
entire application and can be configured in a separate class/process from
the main single action given as a mnemonic on the command line.
"""
is_usage_visible: bool = field(default=True)
"""Whether to display the action in the help usage. Applications such as
:class:`.ConfigFactoryAccessor`, which is only useful with a programatic
usage, is an example of where this is used.
"""
def __post_init__(self):
if self.first_pass and len(self.positional) > 0:
raise ActionCliError(
'A first pass action can not have positional arguments, ' +
f'but got {self.positional} for action: {self.name}')
@property
@persisted('_options_by_dest')
def options_by_dest(self) -> Dict[str, OptionMetaData]:
return frozendict({m.dest: m for m in self.options})
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
dct = super()._from_dictable(*args, **kwargs)
if len(dct['positional']) == 0:
del dct['positional']
return dct | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/meta.py | meta.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import Tuple, Iterable, List, Union, Optional, Sequence
from dataclasses import dataclass, field
import logging
import os
import sys
import re
from itertools import chain
from pathlib import Path
from io import TextIOBase
from optparse import OptionParser
from zensols.introspect import IntegerSelection
from zensols.config import Writable, Dictable
from zensols.persist import persisted
from . import OptionMetaData, ActionMetaData, PositionalMetaData
logger = logging.getLogger(__name__)
@dataclass
class UsageConfig(Dictable):
"""Configuraiton information for the command line help.
"""
width: int = field(default=None)
"""The max width to print help."""
max_first_col: Union[float, int] = field(default=0.4)
"""Maximum width of the first column. If this is a float, then it is computed
as a percentage of the terminal width.
"""
max_metavar_len: Union[float, int] = field(default=0.15)
"""Max length of the option type."""
max_default_len: Union[float, int] = field(default=0.1)
"""Max length in characters of the default value."""
left_indent: int = field(default=2)
"""The number of left spaces for the option and positional arguments."""
inter_col_space: int = field(default=2)
"""The number of spaces between all three columns."""
sort_actions: bool = field(default=False)
"""If ``True`` sort mnemonic output."""
def __post_init__(self):
if self.width is None:
try:
self.width = os.get_terminal_size()[0]
except OSError as e:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'can not get terminal size: {e}')
self.width = 0
if self.width == 0:
self.width = 80
if self.max_first_col is None:
self.max_first_col = 0.4
if isinstance(self.max_first_col, float):
self.max_first_col = int(self.width * self.max_first_col)
if isinstance(self.max_metavar_len, float):
self.max_metavar_len = int(self.width * self.max_metavar_len)
if isinstance(self.max_default_len, float):
self.max_default_len = int(self.width * self.max_default_len)
class UsageActionOptionParser(OptionParser):
"""Implements a human readable implementation of :meth:`print_help` for action
based command line handlers.
**Implementation note**: we have to extend :class:`~optparser.OptionParser`
since the ``-h`` option invokes the print help behavior and then exists
printing the second pass action options. Instead, we look for the help
option in the first pass, print help with the correction options, then
exit.
"""
def __init__(self, actions: Tuple[ActionMetaData, ...],
options: Tuple[OptionMetaData, ...], usage_config: UsageConfig,
doc: str = None, default_action: str = None, *args, **kwargs):
super().__init__(*args, add_help_option=False, **kwargs)
help_op = OptionMetaData(
'help', 'h',
metavar='[actions]',
dtype=bool,
doc='show this help message and exit')
version_op = OptionMetaData(
'version', None, dtype=bool,
doc='show the program version and exit')
options = [help_op, version_op] + list(options)
self._usage_writer = _UsageWriter(
self, actions, options, doc, usage_config, default_action)
self.add_option(help_op.create_option())
def print_help(self, file: TextIOBase = sys.stdout,
include_actions: bool = True,
action_metas: Sequence[ActionMetaData] = None):
"""Write the usage information and help text.
:param include_actions: if ``True`` write each actions' usage as well
:param actions: the list of actions to output, or ``None`` for all
"""
self._usage_writer.write(
writer=file,
include_actions=include_actions,
action_metas=action_metas)
@dataclass
class _Formatter(Writable):
"""A formattingn base class that has utility methods.
"""
_BACKTICKS_REGEX = re.compile(r"``([^`]+)``")
def _format_doc(self, doc: str = None) -> str:
doc = '' if doc is None else doc
doc = re.sub(self._BACKTICKS_REGEX, r'"\1"', doc)
return doc
def _write_three_col(self, a: str, b: str, c: str, depth: int = 0,
writer: TextIOBase = sys.stdout):
a = '' if a is None else a
b = '' if b is None else b
c = '' if c is None else c
w1 = self.usage_formatter.two_col_width
w2 = self.usage_formatter.three_col_width
a = self._trunc(a, self.usage_formatter.max_first_col)
fmt = '{:<' + str(w1) + '}{:<' + str(w2) + '}{}'
s = fmt.format(a, b, c)
sp = self._get_str_space(w1 + w2)
self._write_wrap(s, depth, writer, subsequent_indent=sp)
@dataclass
class _OptionFormatter(_Formatter):
"""Write the option, which includes the option name and documenation.
"""
usage_formatter: _UsageWriter
opt: OptionMetaData
usage_config: UsageConfig
def __post_init__(self):
self.WRITABLE_MAX_COL = self.usage_config.width
opt = self.opt
self.doc = self._format_doc(self.opt.doc)
left_indent: str = ' ' * self.usage_config.left_indent
max_olen: int = self.usage_config.max_metavar_len
sep: str = '' if opt.short_name is None else ', '
long_opt: str = opt.long_option
short_opt: str = '' if opt.short_option is None else opt.short_option
metavar: str = '' if opt.metavar is None else opt.metavar
mlen, over = self._get_min_default_len()
self._opt_str = f'{left_indent}{short_opt}{sep}{long_opt}'
if not issubclass(opt.dtype, IntegerSelection) and \
len(metavar) > max_olen:
if metavar.find('|') > -1:
metavar = metavar[1:-1]
if len(self.doc) > 0:
self.doc += ', '
self.doc += f"X is one of: {', '.join(metavar.split('|'))}"
self._opt_str += ' X'
else:
if len(self.doc) > 0:
self.doc += ', of '
self.doc += f'type {metavar}'
else:
self._opt_str += f' {metavar}'
if over:
self.doc += f' with default {self.opt.default_str}'
def _get_min_default_len(self) -> Tuple[Optional[int], bool]:
mdlen: int = None
over: bool = False
if self.opt.default is not None and self.opt.dtype != bool:
mdlen: int = self.usage_config.max_default_len
over = (len(self.opt.default_str) + 3) > mdlen
return mdlen, over
@property
def default(self) -> str:
mlen, over = self._get_min_default_len()
if mlen is not None:
s: str = self.opt.default_str
if over:
s = s[:mlen] + '...'
return s
else:
return ''
def add_first_col_width(self, widths: List[int]):
widths.append(len(self._opt_str))
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_three_col(
self._opt_str, self.default, self.doc, depth, writer)
@dataclass
class _PositionalFormatter(_Formatter):
usage_formatter: _UsageFormatter
pos: PositionalMetaData
def __post_init__(self):
self.WRITABLE_MAX_COL = self.usage_formatter.usage_config.width
spl = self.usage_formatter.writer.usage_config.left_indent
sp = self._get_str_space(spl)
mv = ''
if self.pos.metavar is not None:
mv = f' {self.pos.metavar}'
self.name = f'{sp}{self.pos.name}{mv}'
self.doc = self._format_doc(self.pos.doc)
def add_first_col_width(self, widths: List[int]):
widths.append(len(self.name))
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_three_col(self.name, '', self.doc, depth, writer)
@dataclass
class _ActionFormatter(_Formatter):
"""Write the action, which includes the name, positional arguments, and
documentation in one line, then the options afterward.
"""
usage_formatter: _UsageFormatter
action: ActionMetaData
usage_config: UsageConfig = field()
action_name: str = field(default=None)
opts: Tuple[_OptionFormatter] = field(default=None)
pos: Tuple[_PositionalFormatter] = field(default=None)
def __post_init__(self):
self.WRITABLE_MAX_COL = self.usage_config.width
action = self.action
if len(action.positional) == 0:
args = ''
else:
pargs = ', '.join(map(lambda p: p.name, action.positional))
args = f' <{pargs}>'
self.action_name = action.name + args
if self.usage_formatter.default_action == self.action_name:
self.action_name = f'{self.action_name} (default)'
self.opts = tuple(map(
lambda of: _OptionFormatter(
self.usage_formatter, of, self.usage_config),
action.options))
self.pos = tuple(map(
lambda pos: _PositionalFormatter(self.usage_formatter, pos),
action.positional))
self.doc = self._format_doc(self.action.doc)
def add_first_col_width(self, widths: List[int]):
widths.append(len(self.action_name))
for of in self.opts:
of.add_first_col_width(widths)
for pos in self.pos:
pos.add_first_col_width(widths)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout):
self._write_three_col(
self.action_name, '', self.doc, depth, writer)
for pos in self.pos:
self._write_object(pos, depth, writer)
for opt in self.opts:
self._write_object(opt, depth, writer)
@dataclass
class _UsageFormatter(_Formatter):
"""Write the global options and all actions.
"""
writer: _UsageWriter
actions: Tuple[ActionMetaData, ...]
usage_config: UsageConfig
global_options: Tuple[OptionMetaData, ...]
glob_option_formatters: List[_OptionFormatter] = field(default=None)
action_formatters: List[_ActionFormatter] = field(default=None)
pos_formatters: List[_PositionalFormatter] = field(default=None)
def __post_init__(self):
self.WRITABLE_MAX_COL = self.usage_config.width
self.glob_option_formatters = list(
map(lambda o: _OptionFormatter(self, o, self.usage_config),
self.global_options))
self.action_formatters = list(
map(lambda a: _ActionFormatter(self, a, self.usage_config),
self.actions))
self.pos_formatters = []
if self.is_singleton_action:
for af in self.action_formatters:
self.glob_option_formatters.extend(af.opts)
self.pos_formatters.extend(af.pos)
self.action_formatters.clear()
@property
def is_singleton_action(self) -> bool:
return len(self.actions) == 1
@property
def default_action(self):
return self.writer.default_action
@property
def max_first_col(self) -> int:
return self.writer.usage_config.max_first_col
def _get_opt_formatters(self) -> Iterable[_OptionFormatter]:
return chain.from_iterable(
[chain.from_iterable(
map(lambda f: f.opts, self.action_formatters)),
self.glob_option_formatters])
@property
@persisted('_two_col_width_pw')
def two_col_width(self) -> int:
widths = []
for af in self.action_formatters:
af.add_first_col_width(widths)
for go in self.glob_option_formatters:
go.add_first_col_width(widths)
for po in self.pos_formatters:
po.add_first_col_width(widths)
return max(widths) + self.usage_config.inter_col_space
@property
@persisted('_three_col_width_pw')
def three_col_width(self) -> int:
return max(len(a.default) for a in self._get_opt_formatters()) + \
self.usage_config.inter_col_space
def get_option_usage_names(self, expand: bool = True) -> str:
actions: Iterable[Action] = filter(
lambda a: a.is_usage_visible, self.actions)
action_names: Tuple[str, ...] = tuple(map(lambda a: a.name, actions))
if len(action_names) > 1:
if expand:
names = '|'.join(action_names)
else:
names = 'actions'
if self.default_action is None:
opts = f"<{names}> "
else:
opts = f"[{names}] "
elif len(action_names) > 0:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'action: {self.actions[0]}')
opts = ', '.join(map(lambda p: p.name, self.actions[0].positional))
if len(opts) > 0:
opts = f'<{opts}> '
else:
opts = ''
return opts
def _write_options(self, depth: int, writer: TextIOBase) -> bool:
n_fmt = len(self.glob_option_formatters)
has_opts = n_fmt > 0
if has_opts:
self._write_line('Options:', depth, writer)
for i, of in enumerate(self.glob_option_formatters):
of.write(depth, writer)
return has_opts
def _write_actions(self, depth: int, writer: TextIOBase,
action_metas: Sequence[ActionMetaData] = None):
am_set: Set[str] = None
if action_metas is not None:
am_set = set(map(lambda a: a.name, action_metas))
# get only visible actions
fmts: Tuple[_ActionFormatter] = tuple(filter(
lambda f: f.action.is_usage_visible and \
(am_set is None or f.action.name in am_set),
self.action_formatters))
n_fmt: int = len(fmts)
if n_fmt > 0:
self._write_line('Actions:', depth, writer)
i: int
fmt: _ActionFormatter
for i, fmt in enumerate(fmts):
am: ActionMetaData = fmt.action
self._write_object(fmt, depth, writer)
if i < n_fmt - 1:
self._write_empty(writer)
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_singleton_positional: bool = True,
include_options: bool = True,
include_actions: bool = True,
action_metas: Sequence[ActionMetaData] = None):
if self.is_singleton_action and include_singleton_positional and \
len(self.pos_formatters) > 0:
self._write_line('Positional:', depth, writer)
for po in self.pos_formatters:
self._write_object(po, depth, writer)
if include_options or include_actions:
self._write_empty(writer)
if include_options:
if self._write_options(depth, writer) and include_actions and \
len(self.action_formatters) > 0:
self._write_empty(writer)
if include_actions:
self._write_actions(depth, writer, action_metas)
@dataclass
class _UsageWriter(_Formatter):
"""Generates the usage and help messages for an :class:`optparse.OptionParser`.
"""
parser: OptionParser = field()
"""Parses the command line in to primitive Python data structures."""
actions: Tuple[ActionMetaData, ...] = field()
"""The set of actions to document as a usage."""
global_options: Tuple[OptionMetaData, ...] = field()
"""Application level options (i.e. level, config, verbose etc)."""
doc: str = field()
"""The application document string."""
usage_config: UsageConfig = field(default_factory=UsageConfig)
"""Configuraiton information for the command line help."""
default_action: str = field(default=None)
"""The default mnemonic use when the user does not supply one."""
usage_formatter: _UsageFormatter = field(default=None)
"""The usage formatter used to generate the documentation."""
def __post_init__(self):
self.WRITABLE_MAX_COL = self.usage_config.width
if self.usage_config.sort_actions:
actions = sorted(self.actions, key=lambda a: a.name)
else:
actions = self.actions
self.usage_formatter = _UsageFormatter(
self, actions, self.usage_config, self.global_options)
def get_prog_usage(self) -> str:
opt_usage: str = '[options]:'
prog: str = '<python>'
if len(sys.argv) > 0:
prog_path: Path = Path(sys.argv[0])
prog = prog_path.name
opts = self.usage_formatter.get_option_usage_names()
usage = f'{prog} {opts}{opt_usage}'
if len(usage) > (self.usage_config.width - len(opt_usage)):
opts = self.usage_formatter.get_option_usage_names(expand=False)
usage = f'{prog} {opts}{opt_usage}'
return usage
def write(self, depth: int = 0, writer: TextIOBase = sys.stdout,
include_singleton_positional: bool = True,
include_options: bool = True,
include_actions: bool = True,
action_metas: Sequence[ActionMetaData] = None):
prog = self.get_prog_usage()
# if user specified help action(s) on the command line, only print the
# action(s)
if action_metas is None:
self._write_line(f'Usage: {prog}', depth, writer)
self._write_empty(writer)
if self.doc is not None:
doc = self._format_doc(self.doc)
self._write_wrap(doc, depth, writer)
self._write_empty(writer)
else:
include_options = False
self.usage_formatter.write(
depth, writer,
include_singleton_positional=include_singleton_positional,
include_options=include_options,
include_actions=include_actions,
action_metas=action_metas) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/usage.py | usage.py |
__author__ = 'Paul Landes'
from typing import Dict, Any, Set, List, Tuple, Optional, Type, Union
from dataclasses import dataclass, field
import os
import logging
from string import Template
import parse as par
import re
from pathlib import Path
from zensols.util import PackageResource
from zensols.config import (
rawconfig, Configurable, ConfigurableFactory,
IniConfig, ImportIniConfig, StringConfig, DictionaryConfig,
)
from .. import (
Dictable, ActionCliError, ApplicationError, OptionMetaData, ActionMetaData,
ApplicationObserver, Action, Application,
)
logger = logging.getLogger(__name__)
class _ConfiguratorImporterTemplate(Template):
delimiter = '^'
class _PreLoadImportIniConfig(ImportIniConfig):
_PRELOAD_FORMAT = 'preload:{}'
def __init__(self, *args, preloads: Dict[str, Configurable], **kwargs):
super().__init__(*args, **kwargs)
self.preloads = preloads
@classmethod
def format_preload(self, name: str) -> str:
return self._PRELOAD_FORMAT.format(name)
@classmethod
def parse_preload(self, s: str) -> Optional[str]:
pres: par.Result = par.parse(self._PRELOAD_FORMAT, s)
if pres is not None:
return pres[0]
def _create_config(self, section: str,
params: Dict[str, Any]) -> Configurable:
conf: Configurable = None
key: str = None
config_file: Union[Path, str] = params.get(self.SINGLE_CONFIG_FILE)
if isinstance(config_file, str):
key = self.parse_preload(config_file)
if key is not None:
conf = self.preloads.get(key)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"create config '{key} -> {conf} ({type(conf)})")
if conf is None:
logger.warning(f"matched a preload format '{config_file}' " +
f"with non-existing preload: '{key}'")
if conf is None:
conf = super()._create_config(section, params)
return conf
@dataclass
class ConfigurationOverrider(object):
"""Overrides configuration in the app config. This is useful for replacing on
a per command line invocation basis. Examples could include changing the
number of epochs trained for a model.
The :obj:`override` field either contains a path to a file that contains
the configuration file to use to clobber the given sections/values, or a
string to be interpreted by :class:`.StringConfig`. This determination is
made by whether or not the string points to an existing file or directory.
"""
OVERRIDE_FIELD = 'override'
CLI_META = {'first_pass': True, # not a separate action
'mnemonic_includes': {'merge'},
# better/shorter long name, and reserve the short name
'option_overrides': {OVERRIDE_FIELD:
{'metavar': '<FILE|DIR|STRING>',
'short_name': None}},
# only the path to the configuration should be exposed as a
# an option on the comamnd line
'option_includes': {OVERRIDE_FIELD}}
config: Configurable = field()
"""The parent configuration, which is populated from the child configuration
(see class docs).
"""
override: str = field(default=None)
"""A config file/dir or a comma delimited section.key=value string that
overrides configuration.
"""
option_sep: str = field(default=',')
"""The string used to delimit the each key/value pair."""
disable: bool = field(default=False)
"""Whether to disable the application, which is useful to set to ``False`` when
used with :class:`.ConfigurationImporter`.
"""
def get_configurable(self) -> Optional[Configurable]:
if self.override is not None:
path = Path(self.override)
if path.exists():
cf = ConfigurableFactory()
overrides = cf.from_path(path)
else:
overrides = StringConfig(
self.override,
option_sep=self.option_sep
)
return overrides
def merge(self) -> Configurable:
"""Merge the string configuration with the application context."""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'overriding with: {self.override}')
if not self.disable:
conf: Optional[Configurable] = self.get_configurable()
if conf is not None:
self.config.merge(conf)
return self.config
def __call__(self) -> Configurable:
return self.merge()
@dataclass
class ConfigurationImporter(ApplicationObserver, Dictable):
"""This class imports a child configuration in to the application context. It
does this by:
1. Attempt to load the configuration indicated by the ``--config``
option.
2. If the option doesn't exist, attempt to get the path to load from an
environment variable (see :meth:`get_environ_var_from_app`).
3. Loads the *child* configuration.
4. Copy all sections from the child configuration to :obj:`config`.
The child configuration is created by :class:`.ConfigurableFactory`. If
the child has a `.conf` extension, :class:`.ImportIniConfig` is used with
its child set as :obj:`config` so the two can reference each other at
property/factory resolve time.
Special mnemonic ``^{config_path}`` can be used in an
:class`.ImportIniConfig` import section in the `config_files` property to
load the referred configuration file in any order with the other loaded
files. The special mnemonic ``^{override}`` does the same thing with the
:class:`.ConfigurationOverrider` one pass application as well.
"""
_OVERRIDES_KEY = ConfigurationOverrider.OVERRIDE_FIELD
_OVERRIDES_PRELOAD = _PreLoadImportIniConfig.format_preload(_OVERRIDES_KEY)
CONFIG_PATH_FIELD = 'config_path'
"""The field name in this class of the child configuration path.
:see: :obj:`config_path`
"""
CLI_META = {'first_pass': True, # not a separate action
# the mnemonic must be unique and used to referece the method
'mnemonic_overrides': {'merge': '_merge_config_as_import'},
'mnemonic_includes': {'merge'},
# better/shorter long name, and reserve the short name
'option_overrides': {CONFIG_PATH_FIELD: {'long_name': 'config',
'short_name': 'c'}},
# only the path to the configuration should be exposed as a
# an option on the comamnd line
'option_includes': {CONFIG_PATH_FIELD}}
"""Command line meta data to avoid having to decorate this class in the
configuration. Given the complexity of this class, this configuration only
exposes the parts of this class necessary for the CLI.
"""
IMPORT_TYPE = 'import'
"""The string value of the ``type`` parameter in the section config identifying
an :class:`.ImportIniConfig` import section.
"""
ENVIRON_VAR_REGEX = re.compile(r'^.+\.([a-z]+?)$')
"""A regular expression to parse the name from the package name for the
environment variable that might hold the configuration
(i.e. ``APPNAMERC``).
"""
name: str = field()
"""The section name."""
config: Configurable = field()
"""The parent configuration, which is populated from the child configuration
(see class docs).
"""
expect: bool = field(default=True)
"""If ``True``, raise an :class:`.ApplicationError` if the option is not given.
"""
default: Path = field(default=None)
"""Use this file as the default when given on the command line, which is not
used unless :obj:``expect`` is set to ``False``.
If this is set to ``skip``, then do not load any file. This is useful when
the entire configuration is loaded by this class and there are
configuration mentions in the ``app.conf`` application context.
"""
config_path_environ_name: str = field(default=None)
"""An environment variable containing the default path to the configuration.
"""
type: str = field(default=None)
"""The type of :class:`.Configurable` use to create in
:class:`.ConfigurableFactory`. If this is not provided, the factory
decides based on the file extension.
:see: :class:`.ConfigurableFactory`
"""
arguments: Dict[str, Any] = field(default=None)
"""Additional arguments to pass to the :class:`.ConfigFactory` when created.
"""
section: str = field(default=None)
"""Additional which section to load as an import. This is only valid and used
when :obj:`type` is set to `import`. When it is, the section will replace
the string ``^{config_apth}`` (and any other field in this instance using
the same syntax) and load indicated remaining configuration using
:class:`~zensols.config.ImportIniConfig`.
See the `API documentation
<https://plandes.github.io/util/doc/config.html#import-ini-configuration>`_
for more information.
"""
config_path_option_name: str = field(default='config_path')
"""If not ``None``, the name of the option to set in the section defined for
this instance (section = :obj:`name`).
"""
debug: bool = field(default=False)
"""Printn the configuration after the merge operation."""
# name of this field must match
# :obj:`ConfigurationImporter.CONFIG_PATH_FIELD`
config_path: Path = field(default=None)
"""The configuration file."""
def get_environ_var_from_app(self) -> str:
"""Return the environment variable based on the name of the application. This
returns the :obj:`config_path_environ_name` if set, otherwise, it
generates it based on the name returned from the packge + ``RC`` and
capitalizes it.
"""
if self.config_path_environ_name is not None:
name = self.config_path_environ_name
else:
pkg_res: PackageResource = self._app.factory.package_resource
name: str = pkg_res.name
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"match environment variable '{name}' " +
f'on {self.ENVIRON_VAR_REGEX}')
m = self.ENVIRON_VAR_REGEX.match(name)
if m is not None:
name = m.group(1)
name = f'{name}rc'.upper()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f"using environment variable '{name}'")
return name
def _get_config_option(self) -> str:
"""Return the long option name (with dashes) as given on the command line.
"""
ameta: ActionMetaData = self._action.meta_data
ometa: OptionMetaData = ameta.options_by_dest[self.CONFIG_PATH_FIELD]
return ometa.long_option
def _application_created(self, app: Application, action: Action):
"""In this call back, set the app and action for using in the invocation
:meth:`merge`.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'configurator created with {action}')
self._app = app
self._action = action
def _get_override_config(self) -> Configurable:
conf: Configurable = None
ac: Action
for ac in self._app.actions:
ctype: Type = ac.class_meta.class_type
if issubclass(ctype, ConfigurationOverrider):
opts: Dict[str, Any] = ac.command_action.options
sconf: str = opts.get(ConfigurationOverrider.OVERRIDE_FIELD)
params = {ConfigurationOverrider.OVERRIDE_FIELD: sconf}
co = ConfigurationOverrider(self.config, **params)
conf = co.get_configurable()
break
if conf is None:
conf = DictionaryConfig()
return conf
def _validate(self):
# the section attribute is only useful for ImportIniConfig imports
if self.type != self.IMPORT_TYPE and self.section is not None:
raise ActionCliError("Cannot have a 'section' entry " +
f"without type of '{self.IMPORT_TYPE}'")
def _populate_import_sections(self, config: Configurable) -> \
Tuple[Configurable, Set[str]]:
sec: Dict[str, str] = self.config.get_options(self.section)
secs: Dict[str, Dict[str, str]] = {}
populated_sec = {}
vals = self.asdict()
vals[self._OVERRIDES_KEY] = self._OVERRIDES_PRELOAD
for k, v in sec.items():
populated_sec[k] = v.format(**vals)
secs[ImportIniConfig.IMPORT_SECTION] = populated_sec
preload_keys: Set[str] = set()
if ImportIniConfig.SECTIONS_SECTION in populated_sec:
sub_secs: List[str] = self.config.serializer.parse_object(
self.config.get_option(
ImportIniConfig.SECTIONS_SECTION, self.section))
for sec in sub_secs:
repl_sec: Dict[str, str] = {}
secs[sec] = repl_sec
with rawconfig(config):
for k, v in config.get_options(sec).items():
tpl = _ConfiguratorImporterTemplate(v)
try:
vr = tpl.substitute(vals)
except KeyError as e:
raise ActionCliError(
f"Bad config load special key {e} in: '{v}'")
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'{sec}:{k}: {v} -> {vr}')
pls = self.config.serializer.parse_object(vr)
if isinstance(pls, (list, tuple)):
pls = map(
_PreLoadImportIniConfig.parse_preload, pls)
pls = filter(lambda x: x is not None, pls)
preload_keys.update(pls)
repl_sec[k] = vr
return DictionaryConfig(secs), preload_keys
def _load_configuration(self) -> Configurable:
"""Once we have the path and the class used to load the configuration, create
the instance and load it.
Special handling is required to make options forward *and* backward
propogate.
"""
if logger.isEnabledFor(logging.INFO):
logger.info('configurator loading section: ' +
f'{self.config.config_file}:[{self.name}]')
# the modified configuration that will returned
modified_config: Configurable = self.config
# sections created during this call to later be removed
secs_to_del: Set[str] = set()
# create the command line specified config
do_back_copy: bool = True
# config section sanity check
self._validate()
# create a configuration factory using the configuration file extension
if self.type is None:
args = {} if self.arguments is None else self.arguments
cf = ConfigurableFactory(kwargs=args)
if self.config_path is None:
# this happens when expect=False and no configuration is given
cl_config = DictionaryConfig()
else:
cl_config = cf.from_path(self.config_path)
logger.info(f'config loaded {self.config_path} as ' +
f'type {cl_config.__class__.__name__}')
# import using ImportIniConfig as a section
elif self.type == self.IMPORT_TYPE:
args: dict = {} if self.arguments is None else self.arguments
children: Tuple[Configurable, ...] = \
self._app.factory.children_configs
ini: IniConfig = IniConfig(self.config)
dconf: Configurable
preload_keys: Set[str]
dconf, preload_keys = self._populate_import_sections(ini)
preloads: Dict[str, Configurable] = {}
if self._OVERRIDES_KEY in preload_keys:
preloads[self._OVERRIDES_KEY] = self._get_override_config()
secs_to_del.update(dconf.sections)
dconf.copy_sections(ini)
if children is None:
# unit test cases will not have children configurables
children = ()
with rawconfig(ini):
cl_config = _PreLoadImportIniConfig(
preloads=preloads,
config_file=ini,
children=children,
**args)
with rawconfig(cl_config):
cl_config.copy_sections(self.config)
modified_config = cl_config
# remove sections that were removed
removed_secs: Set[str] = self.config.sections - cl_config.sections
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'removing additional sections: {removed_secs}')
secs_to_del.update(removed_secs)
# avoid the two way copy that happens later
do_back_copy = False
# otherwise, use the type to tell the configuraiton factory how to
# create it
else:
args = {'config_file': self.config_path}
if self.arguments is not None:
args.update(self.arguments)
cf = ConfigurableFactory(kwargs=args)
cl_config = cf.from_type(self.type)
logger.info(f'configurator loading {self.config_path} from ' +
f'type {self.type}')
# For non-import configs, first inject our app context (app.conf) to
# the command line specified configuration (--config) skipping sections
# that have missing options. Examples of those missing include
# cyclical dependencies such option references from our app context to
# the command line context.
if do_back_copy:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'copying app config to {cl_config}')
with rawconfig(self.config):
self.config.copy_sections(cl_config)
# copy the command line config to our app context letting it barf with
# any missing properties this time
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'copying to app config: {cl_config}')
if do_back_copy:
with rawconfig(cl_config):
cl_config.copy_sections(self.config)
# if we imported, we created ImportIniConfig sections we need to remove
for sec in secs_to_del:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'removing (added) section: {sec}')
self.config.remove_section(sec)
if self.debug:
print(self.config.get_raw_str())
return modified_config
def _load(self) -> Configurable:
"""Load the configuration and update the application context.
"""
modified_config = self._load_configuration()
if self.config_path_option_name is not None:
val: str
if self.config_path is None:
val = 'None'
else:
val = f'path: {str(self.config_path)}'
self.config.set_option(self.config_path_option_name,
val, section=self.name)
return modified_config
def _reset(self):
"""Reset the Python logger configuration."""
root = logging.getLogger()
tuple(map(root.removeHandler, root.handlers[:]))
tuple(map(root.removeFilter, root.filters[:]))
def merge(self) -> Configurable:
"""Merge configuration at path to the current configuration.
:param config_path: the path to the configuration file
"""
# the modified configuration that will returned
modified_config: Configurable = self.config
env_var: str = None
rc_path: Path = None
if self.config_path is None:
env_var: str = self.get_environ_var_from_app()
env_var_path: str = os.environ.get(env_var)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('loading config from environment ' +
f"varaibles '{env_var}' = {env_var_path}")
if env_var_path is not None:
rc_path = Path(env_var_path)
if rc_path.exists():
self.config_path = rc_path
elif self.default is not None:
if self.default == 'skip':
self.config_path = None
else:
self.config_path = self.default
if self.config_path is None:
if self.expect:
lopt = self._get_config_option()
if env_var is not None and env_var_path is not None:
logger.warning(f'Environment variable {env_var} set to ' +
f'non-existant path: {rc_path}')
raise ApplicationError(f'Missing option {lopt}')
else:
modified_config = self._load()
else:
modified_config = self._load()
return modified_config
def __call__(self) -> Configurable:
return self.merge() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/lib/config.py | config.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
import logging
import re
from zensols.config import Configurable, DictionaryConfig
from zensols.util import PackageResource
from .. import Action, Application, ApplicationObserver
logger = logging.getLogger(__name__)
@dataclass
class PackageInfoImporter(ApplicationObserver):
"""Adds a section to the configuration with the application package
information. The section to add is given in :obj:`section`, and the
key/values are:
* **name**: the package name (:obj:`.PackageResources.name`)
* **short_name**: a shorter package name useful for setting in logging
messages taken from :obj:`.PackageResources.name`
* **version**: the package version (:obj:`.PackageResources.version`)
This class is useful to configure the default application module given by
the package name for the :class:`.LogConfigurator` class.
"""
CLI_META = {'first_pass': True, # not a separate action
# since there are no options and this is a first pass, force
# the CLI API to invoke it as otherwise there's no indication
# to the CLI that it needs to be called
'always_invoke': True,
# the mnemonic must be unique and used to referece the method
'mnemonic_overrides': {'add': '_add_package_info'},
'mnemonic_includes': {'add'},
# only the path to the configuration should be exposed as a
# an option on the comamnd line
'option_includes': {}}
"""Command line meta data to avoid having to decorate this class in the
configuration. Given the complexity of this class, this configuration only
exposes the parts of this class necessary for the CLI.
"""
_BUT_FIRST_REGEX = re.compile(r'^[^.]+\.(.+)$')
config: Configurable = field()
"""The parent configuration, which is populated with the package
information.
"""
section: str = field(default='package')
"""The name of the section to create with the package information."""
def _application_created(self, app: Application, action: Action):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'configurator created with {action}')
self._app = app
self._action = action
def _short_name(self, pkg_res: PackageResource) -> str:
m: re.Match = self._BUT_FIRST_REGEX.match(pkg_res.name)
return pkg_res.name if m is None else m.group(1)
def add(self) -> Configurable:
"""Add package information to the configuration (see class docs).
"""
pkg_res: PackageResource = self._app.factory.package_resource
params = {'name': pkg_res.name,
'short_name': self._short_name(pkg_res),
'version': pkg_res.version}
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding package section: {self.section}={params}')
d_conf = DictionaryConfig({self.section: params})
d_conf.copy_sections(self.config)
return d_conf
def __call__(self) -> Configurable:
return self.add() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/lib/package.py | package.py |
__author__ = 'Paul Landes'
from typing import Dict, Type, Any, Optional, Tuple, Union
from dataclasses import dataclass, field
from enum import Enum, auto, EnumMeta
import sys
import os
import re
import logging
import inspect
from json import JSONEncoder
from io import TextIOBase
from pathlib import Path
import shutil
from zensols.config import (
Configurable, Dictable, ConfigFactory, DictionaryConfig
)
from zensols.introspect import ClassImporter
from .. import (
Action, ActionCli, ActionCliMethod, ActionMetaData, ActionCliManager,
Application, ApplicationObserver,
)
from .. import ConfigurationImporter
logger = logging.getLogger(__name__)
class ExportFormat(Enum):
"""The format for the environment export with the :class:`.ExportEnvironment`
first pass application.
"""
bash = auto()
make = auto()
class ListFormat(Enum):
"""Options for outputing the action list in :class:`.ListActions`.
"""
text = auto()
json = auto()
name = auto()
class ConfigFormat(Enum):
"""Options for outputing the action list in :class:`.ShowConfiguration`.
"""
text = auto()
ini = auto()
json = auto()
@dataclass
class DryRunApplication(object):
CLI_META = {'option_overrides': {'dry_run': {'short_name': 'd'}}}
dry_run: bool = field(default=False)
"""Don't do anything; just act like it."""
@dataclass
class ExportEnvironment(object):
"""The class dumps a list of bash shell export statements for sourcing in build
shell scripts.
"""
# we can't use "output_format" because ListActions would use the same
# causing a name collision
OUTPUT_FORMAT = 'export_output_format'
OUTPUT_PATH = 'output_path'
CLI_META = {'option_includes': {OUTPUT_FORMAT, OUTPUT_PATH},
'option_overrides':
{OUTPUT_FORMAT: {'long_name': 'expfmt',
'short_name': None},
OUTPUT_PATH: {'long_name': 'expout',
'short_name': None}}}
config: Configurable = field()
"""The configuration used to get section information used to generate the
export commands.
"""
section: str = field()
"""The section to dump as a series of export statements."""
output_path: Path = field(default=None)
"""The output file name for the export script."""
export_output_format: ExportFormat = field(default=ExportFormat.bash)
"""The output format."""
def _write(self, writer: TextIOBase):
exports: Dict[str, str] = self.config.populate(section=self.section)
if self.export_output_format == ExportFormat.bash:
fmt = 'export {k}="{v}"\n'
else:
fmt = '{k}={v}\n'
for k, v in exports.asdict().items():
writer.write(fmt.format(**{'k': k.upper(), 'v': v}))
def export(self) -> Path:
"""Create exports for shell sourcing."""
if self.output_path is None:
self._write(sys.stdout)
else:
with open(self.output_path, 'w') as f:
self._write(f)
return self.output_path
def __call__(self) -> Path:
return self.export()
@dataclass
class ListActions(ApplicationObserver, Dictable):
"""List command line actions with their help information.
"""
LONG_NAME_SKIP = 'add_ConfigFactoryAccessor_to_app_config'
"""The :class:`.ActionCliMethod` name to skip,which indicates the first pass
action used to get the application in the :class:`.CliHarness` used by
:class:`.ConfigFactoryAccessor`.
"""
# we can't use "output_format" because ExportEnvironment would use the same
# causing a name collision
OUTPUT_FORMAT = 'list_output_format'
CLI_META = {'option_includes': {OUTPUT_FORMAT},
'option_overrides':
{OUTPUT_FORMAT: {'long_name': 'lstfmt',
'short_name': None}}}
list_output_format: ListFormat = field(default=ListFormat.text)
"""The output format for the action listing."""
type_to_string: Dict[Type, str] = field(
default_factory=lambda: {Path: 'path'})
"""Map Python type to a string used in the JSON formatted list output."""
def __post_init__(self):
self._command_line = False
def _application_created(self, app: Application, action: Action):
"""In this call back, set the app and action for using in the invocation
:meth:`add`.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'configurator created with {action}')
self._app = app
self._action = action
def _from_dictable(self, *args, **kwargs) -> Dict[str, Any]:
action_cli: ActionCli
ac_docs: Dict[str, str] = {}
for action_cli in self._app.factory.cli_manager.actions_ordered:
if not action_cli.first_pass:
name: str
meth: ActionCliMethod
for name, meth in action_cli.methods.items():
if name == self.LONG_NAME_SKIP:
continue
meta: ActionMetaData = meth.action_meta_data
if self._command_line:
md = meta.asdict()
del md['first_pass']
ac_docs[name] = md
else:
ac_docs[name] = meta.doc
return ac_docs
def list(self):
"""List all actions and help."""
class ActionEncoder(JSONEncoder):
def default(self, obj: Any) -> str:
if isinstance(obj, EnumMeta) or inspect.isclass(obj):
val = tm.get(obj)
if val is None:
val = ClassImporter.full_classname(obj)
return val
return JSONEncoder.default(self, obj)
tm = self.type_to_string
def list_json():
try:
self._command_line = True
print(self.asjson(indent=4, cls=ActionEncoder))
finally:
self._command_line = False
return {
ListFormat.name: lambda: print('\n'.join(self.asdict().keys())),
ListFormat.text: lambda: self.write(),
ListFormat.json: list_json,
}[self.list_output_format]()
def __call__(self):
return self.list()
@dataclass
class ShowConfiguration(object):
"""The class dumps a list of bash shell export statements for sourcing in build
shell scripts.
"""
# we can't use "output_format" because ListActions would use the same
# causing a name collision
OUTPUT_FORMAT = 'config_output_format'
OUTPUT_PATH = 'config_output_path'
SECTION_NAME = 'sections'
CLI_META = {'mnemonic_overrides': {'show_config': 'config'},
'option_includes': {OUTPUT_FORMAT, OUTPUT_PATH, SECTION_NAME},
'option_overrides':
{OUTPUT_FORMAT: {'long_name': 'cnffmt',
'short_name': None},
OUTPUT_PATH: {'long_name': 'cnfout',
'short_name': None},
SECTION_NAME: {'long_name': 'secs',
'short_name': None}}}
config_factory: ConfigFactory = field()
"""The configuration factory which is returned from the app."""
config_output_path: Path = field(default=None)
"""The output file name for the configuration."""
config_output_format: ConfigFormat = field(default=ConfigFormat.text)
"""The output format."""
def _write_config(self, writer: TextIOBase, fmt: ConfigFormat,
sections: str):
conf = self.config_factory.config
if sections is not None:
dconf = DictionaryConfig()
conf.copy_sections(dconf, re.split(r'\s*,\s*', sections))
conf = dconf
if fmt == ConfigFormat.text:
conf.write(writer=writer)
elif fmt == ConfigFormat.ini:
print(conf.get_raw_str().rstrip(), file=writer)
elif fmt == ConfigFormat.json:
print(conf.asjson(indent=4), file=writer)
def show_config(self, sections: str = None) -> Configurable:
"""Print the configuration and exit.
:param sections: comma separated sections to show, all if not given, or
- for names
"""
fmt = self.config_output_format
if self.config_output_path is None:
writer = sys.stdout
else:
writer = open(self.config_output_path, 'w')
try:
if sections == '-':
secs = '\n'.join(sorted(self.config_factory.config.sections))
writer.write(secs + '\n')
else:
self._write_config(writer, fmt, sections)
finally:
if self.config_output_path is not None:
writer.close()
return self.config_factory
def __call__(self):
return self.show_config()
@dataclass
class EditConfiguration(object):
"""Edits the configuration file given on the command line. This must be added
*after* the :class:`~zensols.cli.ConfigurationImporter` class.
"""
CLI_META = {'option_includes': set(),
'mnemonic_overrides': {'edit_configuration': 'editconf'}}
config_factory: ConfigFactory = field()
"""The configuration factory which is returned from the app."""
section_name: str = field(default='config_cli')
"""The section of the CLI configuration that contains the entry."""
command: str = field(default='emacsclient -n {path}')
"""The command used on the :function:`os.system` command to edit the file.
"""
def edit_configuration(self):
"""Edit the configuration file."""
sec = self.config_factory(self.section_name)
attr: str = ConfigurationImporter.CONFIG_PATH_FIELD
path: Path = getattr(sec, attr)
path = str(path.absolute())
cmd = self.command.format(**dict(path=path))
if logger.isEnabledFor(logging.INFO):
logger.info(f'editing file: {path}')
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'system: {cmd}')
os.system(cmd)
@dataclass
class ProgramNameConfigurator(object):
"""Adds a section with the name of the program to use. This is useful for
adding the program name to the beginning of logging lines to confirm to
UNIX line output standards.
To add it to the logging output add it to the
:class:`~zensols.cli.LogConfigurator` section's ``format`` property.
Example::
[add_prog_cli]
class_name = zensols.cli.ProgramNameConfigurator
default = someprog
[log_cli]
class_name = zensols.cli.LogConfigurator
format = ${prog:name}: %%(message)s
"""
CLI_META = {'first_pass': True, # not a separate action
# since there are no options and this is a first pass, force
# the CLI API to invoke it as otherwise there's no indication
# to the CLI that it needs to be called
'always_invoke': True,
# only the path to the configuration should be exposed as a
# an option on the comamnd line
'mnemonic_includes': {'add_program_name'},
'option_includes': {}}
config: Configurable = field()
"""The parent configuration, which is populated with the package
information.
"""
section: str = field(default='program')
"""The name of the section to create with the package information."""
default: str = field(default='prog')
"""The default progran name to use when can not be inferred."""
@classmethod
def infer_program_name(self, entry_path: str = None) -> Optional[str]:
"""Infer the program name using the system arguments.
:param entry_path: used to infer the program name from the entry point
script, which defaults ``sys.argv[0]``
"""
entry_path = sys.argv[0] if entry_path is None else entry_path
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'command line leading arg: <{entry_path}>')
if entry_path is not None and len(entry_path) > 0:
return Path(entry_path).stem
def create_section(self, entry_path: str = None) -> Dict[str, str]:
"""Return a dict with the contents of the program and name section.
:param entry_path: used to infer the program name from the entry point
script, which defaults ``sys.argv[0]``
"""
prog_name = self.infer_program_name(entry_path) or self.default
if logger.isEnabledFor(logging.INFO):
logger.info(f'using program name: {prog_name}')
return {self.section: {'name': prog_name}}
def add_program_name(self):
"""Add the program name as a single configuration section and parameter.
:see: :obj:`section`
:see: :obj:`default`
"""
d_conf = DictionaryConfig(self.create_section())
d_conf.copy_sections(self.config)
@dataclass
class Cleaner(DryRunApplication):
"""Clean (removes) files and directories not needed by the project. The first
tuple of paths will get deleted at any level, the next needs a level of 1
and so on.
"""
CLASS_INSPECTOR = {}
CLI_META = ActionCliManager.combine_meta(
DryRunApplication,
{'mnemonic_includes': {'clean'},
'option_excludes': {'paths'},
'option_overrides': {'clean_level': {'long_name': 'clevel',
'short_name': None}}})
paths: Tuple[Tuple[Union[str, Path]]] = field(default=None)
"""Paths to delete (files or directories) with each group corresponding to a
level (see class docs).
"""
clean_level: int = field(default=0)
"""The level at which to delete."""
def __post_init__(self):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'cleaner: created with paths: {self.paths}')
def _remove_path(self, level: int, glob: Path) -> bool:
if logger.isEnabledFor(logging.WARNING):
logger.warn(f'cleaning at level {level} using {glob}')
if glob.parent.name == '**':
parent = glob.parent.parent
pat = f'{glob.parent}/{glob.name}'
else:
parent = glob.parent
pat = glob.name
for path in parent.glob(pat):
if path.exists():
if logger.isEnabledFor(logging.WARNING):
logger.warn(f'removing (level {level}): {path}')
if not self.dry_run:
if path.is_dir():
shutil.rmtree(path)
else:
path.unlink()
def clean(self):
"""Clean up unecessary files."""
if logger.isEnabledFor(logging.WARNING):
logger.warn(f'cleaning at max level {self.clean_level}')
for level, paths in enumerate(self.paths):
if level <= self.clean_level:
for path in paths:
if isinstance(path, str):
path = Path(path).expanduser()
self._remove_path(level, path)
def __call__(self):
self.clean() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/lib/support.py | support.py |
__author__ = 'Paul Landes'
from typing import Dict, Union, Any
from dataclasses import dataclass, field
from enum import Enum
import logging
from logging import Logger
from pathlib import Path
from .. import ActionCliError, ApplicationError
logger = logging.getLogger(__name__)
class LogLevel(Enum):
"""Set of configurable log levels on the command line. Note that we don't
include all so as to not overwhelm the help usage.
"""
debug = logging.DEBUG
info = logging.INFO
warn = logging.WARNING
err = logging.ERROR
@dataclass
class LogConfigurator(object):
"""A simple log configuration utility.
"""
CLI_META = {'first_pass': True, # not a separate action
# don't add the '-l' as a short option
'option_overrides': {'level': {'short_name': None}},
# we configure this class, but use a better naming for
# debugging
'mnemonic_overrides': {'config': 'log'},
'mnemonic_includes': {'config'},
# only set 'level' as a command line option so we can configure
# the rest in the application context.
'option_includes': {'level'}}
"""Command line meta data to avoid having to decorate this class in the
configuration. Given the complexity of this class, this configuration only
exposes the parts of this class necessary for the CLI.
"""
log_name: str = field(default=None)
"""The log name space."""
default_level: LogLevel = field(default=None)
"""The root logger level."""
level: LogLevel = field(default=None)
"""The application logger level."""
default_app_level: LogLevel = field(default=LogLevel.info)
"""The default log level to set the application logger when not given on the
command line.
"""
config_file: Path = field(default=None)
"""If provided, configure the log system with this configuration file."""
format: str = field(default=None)
"""The format string to use for the logging system."""
loggers: Dict[str, Union[str, LogLevel]] = field(default=None)
"""Additional loggers to configure."""
debug: bool = field(default=False)
"""Print some logging to standard out to debug this class."""
def __post_init__(self):
if ((self.default_level is not None) or (self.format is not None)) \
and (self.config_file is not None):
raise ActionCliError(
"Cannot set 'default_level' or 'format' " +
"while setting a log configuration file 'config_file'")
if self.default_level is None:
self.default_level = LogLevel.warn
def _to_level(self, name: str, level: Any) -> int:
if isinstance(level, LogLevel):
level = level.value
elif isinstance(level, str):
obj = LogLevel.__members__.get(level)
if obj is None:
raise ApplicationError(f'No such level for {name}: {level}')
level = obj.value
if not isinstance(level, int):
raise ActionCliError(f'Unknown level: {level}({type(level)})')
return level
def _debug(self, msg: str):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(msg)
if self.debug:
print(msg)
def _config_file(self):
import logging.config
self._debug(f'configuring from file: {self.config_file}')
logging.config.fileConfig(self.config_file,
disable_existing_loggers=False)
def _config_basic(self):
self._debug(f'configuring root logger to {self.default_level}')
level: int = self._to_level('default', self.default_level)
params = {'level': level}
if self.format is not None:
params['format'] = self.format.replace('%%', '%')
self._debug(f'config log system with level {level} ' +
f'({self.default_level})')
logging.basicConfig(**params)
def config(self):
"""Configure the log system.
"""
modified_logger: Logger = None
if self.config_file is not None:
self._config_file()
else:
self._config_basic()
if self.log_name is not None:
app_level = self.default_app_level \
if self.level is None else self.level
level: int = self._to_level('app', app_level)
self._debug(f'setting logger {self.log_name} to {level} ' +
f'({app_level})')
modified_logger = logging.getLogger(self.log_name)
modified_logger.setLevel(level)
# avoid clobbering CLI given level with app config ``loggers`` entry
if self.level is None and self.loggers is not None:
for name, level in self.loggers.items():
level = self._to_level(name, level)
assert isinstance(level, int)
self._debug(f'setting logger: {name} -> {level}')
modified_logger = logging.getLogger(name)
modified_logger.setLevel(level)
return modified_logger
@classmethod
def set_format(cls, format: str):
"""Set the format of the logger after previously set using this class.
"""
root = logging.getLogger()
hdlr: logging.StreamHandler = root.handlers[0]
assert type(hdlr) == logging.StreamHandler
fmt = logging.Formatter(format)
hdlr.setFormatter(fmt)
@staticmethod
def reset():
"""Reset the logging system. All configuration is removed."""
from importlib import reload
logging.shutdown()
reload(logging)
def __call__(self):
return self.config() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/cli/lib/log.py | log.py |
__author__ = 'Paul Landes'
from typing import Any, Set, Tuple, Union
import logging
import collections
from functools import reduce
from pathlib import Path
import shutil
from . import PersistableError, DirectoryStash
logger = logging.getLogger(__name__)
class MissingDataKeys(PersistableError):
def __init__(self, keys: Set[str]):
super().__init__(f'Missing data keys: {keys}')
self.keys = keys
class DirectoryCompositeStash(DirectoryStash):
"""A stash distributes the data of each item out over several directories.
On dumping, an attribute holding a ``dict`` is removed from the item, it's
data is persisted over multiple directories, then the attribute is restored
after pickling.
The data is split up amoung groups of keys in the attribute ``dict`` of the
item. Persistence works similar to the parent :class:`DirectoryStash`,
except the path points a directory that has an instance of each item
without the attribute (called the item instance directory), and the split
data (called the composite data directory).
The composite data is grouped across keys from the composite attribute.
When the data is loaded, if no ``load_keys`` are requested from a group,
the data is not accessed. In this way, loading data becomes *much* faster
for very large objects (i.e. matrix/tensor) data.
For this reason, it is important to properly group your load keys so the
most related data goes together. This is because if only one key is from
the data is needed, the entire composite item is loaded.
*Note:* If order of the data is important, use an instance of
:class:`collections.OrderedDict` as the attribute data.
"""
INSTANCE_DIRECTORY_NAME = 'inst'
COMPOSITE_DIRECTORY_NAME = 'comp'
def __init__(self, path: Path, groups: Tuple[Set[str]],
attribute_name: str, load_keys: Set[str] = None):
"""Initialize using the parent class's default pattern.
:param path: the directory that will have to subdirectories with the
files, they are named :obj:`INSTANCE_DIRECTORY_NAME` and
:obj:`COMPOSITE_DIRECTORY_NAME`
:param groups: the groups of the ``dict`` composite attribute, which
are sets of keys, each of which are persisted to their
respective directory
:param attribute_name: the name of the attribute in each item to split
across groups/directories; the instance data to
persist has the composite attribute of type
``dict``
:param load_keys: the keys used to load the data from the composite
stashs in to the attribute ``dict`` instance; only
these keys will exist in the loaded data, or ``None``
for all keys; this can be set after the creation of
the instance as well
"""
super().__init__(path)
self.attribute_name = attribute_name
self.load_keys = load_keys
if load_keys is not None and not isinstance(load_keys, set):
raise PersistableError(
f'Expecting set but got {load_keys} {type(load_keys)}')
self._top_level_dir = self.path
self.path = self.path / self.INSTANCE_DIRECTORY_NAME
self.groups = groups
@property
def groups(self) -> Tuple[Set[str]]:
"""The groups of the ``dict`` composite attribute, which are sets of
keys, each of which are persisted to their respective directory.
"""
return self._groups
@groups.setter
def groups(self, groups: Tuple[Set[str]]):
"""The groups of the ``dict`` composite attribute, which are sets of
keys, each of which are persisted to their respective directory.
"""
def map_group(group: Union[set, list, tuple]):
if not isinstance(group, (set, list, tuple)):
raise PersistableError(
f'Composition {group} is not type set: ({type(group)})')
return frozenset(group)
if len(groups) == 0:
raise PersistableError('Must have at least one group set')
groups = tuple(map(map_group, groups))
stashes = {}
comp_path: Path = self._top_level_dir / self.COMPOSITE_DIRECTORY_NAME
self._stash_by_group = {}
self._stash_by_attribute = stashes
self._all_keys = frozenset(reduce(lambda a, b: a | b, groups))
for group in groups:
name = '-'.join(sorted(group))
path = comp_path / name
comp_stash = DirectoryStash(path)
comp_stash.group = group
comp_stash.group_name = name
for k in group:
if k in stashes:
raise PersistableError(
f'Duplicate name \'{k}\' in {groups}')
stashes[k] = comp_stash
self._stash_by_group[name] = comp_stash
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating composit hash with groups: {groups}')
self._groups = groups
def _to_composite(self, data: dict) -> Tuple[str, Any, Tuple[str, Any]]:
"""Create the composite data used to by the composite stashes to
persist.
:param data: the data item stored as the attribute in ``inst`` to
persist
:return: a tuple with the following:
* attribute name
* original attriubte value to be repopulated after pickling
* context used when loading, which is the ordered keys for now
* list of tuples each having (stash name, data dict)
"""
data_group = collections.defaultdict(lambda: {})
is_ordered = isinstance(data, collections.OrderedDict)
context = tuple(data.keys()) if is_ordered else None
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'keys: {data.keys()}, groups: {self._all_keys}')
missing_keys: Set[str] = self._all_keys - set(data.keys())
if len(missing_keys) > 0:
raise MissingDataKeys(missing_keys)
for k, v in data.items():
if k not in self._stash_by_attribute:
raise PersistableError(
f'Unmapping/grouped attribute: {k} in {self.groups}')
stash = self._stash_by_attribute[k]
data_group[stash.group_name][k] = v
data_group = tuple(data_group.items())
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'create group {data_group}')
return context, data_group
def dump(self, name: str, inst: Any):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dump {name}({self.attribute_name}) ' +
f'-> {inst.__class__}')
org_attr_val = getattr(inst, self.attribute_name)
context, composite = self._to_composite(org_attr_val)
try:
setattr(inst, self.attribute_name, None)
for group_name, composite_inst in composite:
stash = self._stash_by_group[group_name]
stash.dump(name, composite_inst)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'dump composite {group_name}/{name}: ' +
f'context={context}, inst={composite_inst}')
super().dump(name, (inst, context))
finally:
setattr(inst, self.attribute_name, org_attr_val)
def _from_composite(self, name: str, context: Any) -> Any:
"""Restore the item's attribute ``dict`` values on load.
:param name: the ID key of the data item used in the composite stashes
:param context: the load context (see :meth:`_to_composite`)
"""
attr_name = self.attribute_name
comp_data = {}
attribs = set(self._stash_by_attribute.keys())
if self.load_keys is not None:
attribs = attribs & self.load_keys
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'load attribs: {attribs}')
for stash in self._stash_by_group.values():
if len(stash.group & attribs) > 0:
data = stash.load(name)
logger.debug(f'loaded: {data}')
if data is None:
raise PersistableError(
f'Missing composite data for id: {name}, ' +
f'stash: {stash.group}, path: {stash.path}, ' +
f'attribute: \'{attr_name}\'')
if self.load_keys is None:
comp_data.update(data)
else:
for k in set(data.keys()) & attribs:
comp_data[k] = data[k]
if context is not None:
ordered_data = collections.OrderedDict()
for k in context:
if k in comp_data:
ordered_data[k] = comp_data[k]
comp_data = ordered_data
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'comp_data: {comp_data}')
return comp_data
def load(self, name: str) -> Any:
inst, context = super().load(name)
attr_val = self._from_composite(name, context)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'loaded {name}({self.attribute_name})')
setattr(inst, self.attribute_name, attr_val)
return inst
def clear(self):
logger.info('DirectoryCompositeStash: clearing')
if self._top_level_dir.is_dir():
if logger.isEnabledFor(logging.INFO):
logger.info(f'deleting subtree: {self._top_level_dir}')
shutil.rmtree(self._top_level_dir) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/persist/composite.py | composite.py |
__author__ = 'Paul Landes'
import logging
from typing import Callable, Any, Iterable, Tuple
from dataclasses import dataclass, field, InitVar
from abc import ABCMeta
from itertools import chain
import parse
import pickle
from pathlib import Path
import zensols.util.time as time
from zensols.util import APIError
from . import (
PersistableError,
Stash,
ReadOnlyStash,
DelegateStash,
PrimablePreemptiveStash,
)
logger = logging.getLogger(__name__)
@dataclass
class OneShotFactoryStash(PrimablePreemptiveStash, metaclass=ABCMeta):
"""A stash that is populated by a callable or an iterable *worker*. The data
is generated by the worker and dumped to the delegate. This worker is
either a callable (i.e. function) or an interable that return tuples or
lists of (key, object)
To use this class, this worker must be set as attribute ``worker`` or this
class extended and ``worker`` be a property.
.. document private functions
.. automethod:: _get_worker_type
"""
def _get_worker_type(self) -> str:
"""Return the type of worker. This default implementation returns *unknown*.
If not implemented, when :meth:`_process_work` is called, the API has
use :func:`callable` to determine if the worker is a method or
property. Doing so accessing the property invoking potentially
unecessary work.
:return: ``u`` for unknown, ``m`` for method (callable), or ``a`` for
attribute or a property
"""
return 'u'
def _process_work(self):
"""Invoke the worker to generate the data and dump it to the delegate.
"""
wt = self._get_worker_type()
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'processing with {type(self.worker)}: type={wt}')
if wt == 'u':
if callable(self.worker):
wt = 'm'
else:
wt = 'a'
if wt == 'm':
itr = self.worker()
elif wt == 'a':
itr = self.worker
else:
raise APIError(f'Unknown worker type: {wt}')
for id, obj in itr:
self.delegate.dump(id, obj)
def prime(self):
has_data = self.has_data
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'asserting data: {has_data}')
if not has_data:
with time('processing work in OneShotFactoryStash'):
self._process_work()
self._reset_has_data()
@dataclass
class SortedStash(DelegateStash):
"""Specify an sorting to how keys in a stash are returned. This usually also
has an impact on the sort in which values are iterated since a call to get
the keys determins it.
"""
ATTR_EXP_META = ('sort_function',)
sort_function: Callable = field(default=None)
def __iter__(self):
return map(lambda x: (x, self.__getitem__(x),), self.keys())
def values(self) -> Iterable[Any]:
return map(lambda k: self.__getitem__(k), self.keys())
def items(self) -> Tuple[str, Any]:
return map(lambda k: (k, self.__getitem__(k)), self.keys())
def keys(self) -> Iterable[str]:
keys = super().keys()
if self.sort_function is None:
keys = sorted(keys)
else:
keys = sorted(keys, key=self.sort_function)
return keys
@dataclass
class DictionaryStash(Stash):
"""Use a dictionary as a backing store to the stash. If one is not provided in
the initializer a new ``dict`` is created.
"""
_data: dict = field(default_factory=dict)
"""The backing dictionary for the stash data."""
@property
def data(self):
return self._data
def load(self, name: str) -> Any:
return self.data.get(name)
def get(self, name: str, default: Any = None) -> Any:
return self.data.get(name, default)
def exists(self, name: str) -> bool:
return name in self.data
def dump(self, name: str, inst):
self.data[name] = inst
def delete(self, name=None):
del self.data[name]
def keys(self):
return self.data.keys()
def clear(self):
self.data.clear()
super().clear()
def __getitem__(self, key):
return self.data[key]
@dataclass
class CacheStash(DelegateStash):
"""Provide a dictionary based caching based stash.
"""
cache_stash: Stash = field(default_factory=lambda: DictionaryStash())
"""A stash used for caching (defaults to :class:`.DictionaryStash`)."""
def load(self, name: str):
if self.cache_stash.exists(name):
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'loading cached: {name}')
return self.cache_stash.load(name)
else:
obj = self.delegate.load(name)
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'loading from delegate, dumping to cache: {name}')
self.cache_stash.dump(name, obj)
return obj
def get(self, name: str, default: Any = None) -> Any:
item = self.load(name)
if item is None:
item = default
return item
def delete(self, name: str = None):
if self.cache_stash.exists(name):
self.cache_stash.delete(name)
if not isinstance(self.delegate, ReadOnlyStash):
self.delegate.delete(name)
def clear(self):
if not isinstance(self.delegate, ReadOnlyStash):
super().clear()
self.cache_stash.clear()
@dataclass
class DirectoryStash(Stash):
"""Creates a pickled data file with a file name in a directory with a given
pattern across all instances.
"""
ATTR_EXP_META = ('path', 'pattern')
path: Path = field()
"""The directory of where to store the files."""
pattern: str = field(default='{name}.dat')
"""The file name portion with ``name`` populating to the key of the data
value.
"""
def __post_init__(self):
if not isinstance(self.path, Path):
raise PersistableError(
f'Expecting pathlib.Path but got: {self.path.__class__}')
def assert_path_dir(self):
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'path {self.path}: {self.path.exists()}')
self.path.mkdir(parents=True, exist_ok=True)
def key_to_path(self, name: str) -> Path:
"""Return a path to the pickled data with key ``name``.
"""
fname = self.pattern.format(**{'name': name})
self.assert_path_dir()
return Path(self.path, fname)
def _load_file(self, path: Path) -> Any:
with open(path, 'rb') as f:
try:
return pickle.load(f)
except Exception as e:
raise PersistableError(f"Can not read {path}: {e}") from e
def _dump_file(self, inst: Any, path: Path):
with open(path, 'wb') as f:
pickle.dump(inst, f)
def load(self, name: str) -> Any:
path = self.key_to_path(name)
inst = None
if path.exists():
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'loading instance from {path}')
inst = self._load_file(path)
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'loaded instance: {name} -> {type(inst)}')
return inst
def exists(self, name) -> bool:
path = self.key_to_path(name)
return path.exists()
def keys(self) -> Iterable[str]:
def path_to_key(path):
p = parse.parse(self.pattern, path.name)
# avoid files that don't match the pattern
if p is not None:
p = p.named
if 'name' in p:
return p['name']
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'checking path {self.path} ({type(self)})')
if not self.path.is_dir():
keys = ()
else:
keys = filter(lambda x: x is not None,
map(path_to_key, self.path.iterdir()))
return keys
def dump(self, name: str, inst: Any):
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'saving instance: {name} -> {type(inst)}')
path = self.key_to_path(name)
self._dump_file(inst, path)
def delete(self, name: str):
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'deleting instance: {name}')
path = self.key_to_path(name)
if path.exists():
path.unlink()
def close(self):
pass
@dataclass
class IncrementKeyDirectoryStash(DirectoryStash):
"""A stash that increments integer value keys in a stash and dumps/loads using
the last key available in the stash.
"""
name: InitVar[str] = field(default='data')
"""The name of the :obj:`pattern` to use in the super class."""
def __post_init__(self, name: str):
super().__post_init__()
self.pattern = name + '-{name}.dat'
self._last_key = None
def get_last_key(self, inc: bool = False) -> str:
"""Get the last available (highest number) keys in the stash.
"""
if self._last_key is None:
keys = tuple(map(int, self.keys()))
if len(keys) == 0:
key = 0
else:
key = max(keys)
self._last_key = key
if inc:
self._last_key += 1
return str(self._last_key)
def keys(self) -> Iterable[str]:
def is_good_key(data):
try:
int(data)
return True
except ValueError:
return False
return filter(is_good_key, super().keys())
def dump(self, name_or_inst, inst=None):
"""If only one argument is given, it is used as the data and the key name is
derived from ``get_last_key``.
"""
if inst is None:
key = self.get_last_key(True)
inst = name_or_inst
else:
key = name_or_inst
path = self.key_to_path(key)
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'dumping result {self.name} to {path}')
super().dump(key, inst)
def load(self, name: str = None) -> Any:
"""Just like ``Stash.load``, but if the key is omitted, return the value of the
last key in the stash.
"""
if name is None:
name = self.get_last_key(False)
if len(self) > 0:
return super().load(name)
@dataclass
class UnionStash(ReadOnlyStash):
"""A stash joins the data of many other stashes.
"""
stashes: Tuple[Stash, ...] = field()
"""The delegate constituent stashes used for each operation."""
def load(self, name: str) -> Any:
item = None
for stash in self.stashes:
item = stash.load(name)
if item is not None:
break
return item
def get(self, name: str, default: Any = None) -> Any:
return self.load(name)
def values(self) -> Iterable[Any]:
return chain.from_iterable(map(lambda s: s.values(), self.stashes))
def keys(self) -> Iterable[str]:
return chain.from_iterable(map(lambda s: s.keys(), self.stashes))
def exists(self, name: str) -> bool:
for stash in self.stashes:
if stash.exists(name):
return True
return False | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/persist/stash.py | stash.py |
__author__ = 'Paul Landes'
from typing import Union, Any, Dict, Type, Tuple, ClassVar
import logging
import sys
import re
from copy import copy
import pickle
import time as tm
from datetime import datetime
import os
from pathlib import Path
from zensols.util import APIError
import zensols.util.time as time
from . import Deallocatable
logger = logging.getLogger(__name__)
class PersistableError(APIError):
"""Thrown for any persistable API error"""
pass
class FileTextUtil(object):
"""Basic file naming utility methods.
"""
_NORMALIZE_REGEX: ClassVar[re.Pattern] = re.compile(
r"""[ \\\[\]()\/()<>{}:;_`'"!@#$%^&*,+=.-]+""")
"""The default regular expression for :meth:`normalize_text`."""
@classmethod
def normalize_text(cls: Type, name: str, replace_char: str = '-',
lower: bool = True, regex: re.Pattern = None) -> str:
"""Normalize the name in to a string that is more file system friendly.
This removes special characters and replaces them with ``replace_char``.
:param name: the name to be normalized
:param replace_char: the character used to replace special characters
:param lower: whether to lowercase the text
:param regex: the regular expression that matches on text to remove
:return: the normalized name
"""
if lower:
name = name.lower()
regex = cls._NORMALIZE_REGEX if regex is None else regex
name = re.sub(regex, replace_char, name)
# remove beginning and trailing dashes
nlen = len(name)
if nlen > 1:
if name[0] == replace_char:
name = name[1:]
if nlen > 2 and name[-1] == replace_char:
name = name[:-1]
return name
@staticmethod
def byte_format(num: int, suffix: str = 'B') -> str:
"""Return a human readable string of the number of bytes ``num``.
:param num: the number of bytes to format
:param suffix: the suffix to append to the resulting string
:attribution: `Fred Cirera <https://stackoverflow.com/questions/1094841/get-human-readable-version-of-file-size>`
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(num) < 1024.0:
return f'{num:3.1f}{unit}{suffix}'
num /= 1024.0
return f'{num:.1f}Yi{suffix}'
@staticmethod
def unique_tracked_name(prefix: str, include_user: bool = True,
include_time: bool = True,
extension: str = None) -> str:
"""Create a unique file name useful for tracking files.
:param prefix: the file name that identifier
:param include_user: whether to add the user name in the file
"""
time: str = ''
user: str = ''
if include_time:
time = '-' + datetime.now().strftime('%b%d-%H%M')
if include_user:
user = os.environ['USER'] if 'USER' in os.environ else os.getlogin()
user = f'-{user}'
if extension is None:
extension = ''
else:
extension = f'.{extension}'
return f'{prefix}{user}{time}{extension}'.lower()
# class level persistance
class PersistedWork(Deallocatable):
"""This class caches data in the instance of the contained class and/or global
level. In addition, the data is also pickled to disk to avoid any
expensive recomputation of the data.
In order, it first looks for the data in ``owner``, then in globals (if
``cache_global`` is True), then it looks for the data on the file system.
If it can't find it after all of this it invokes function ``worker`` to
create the data and then pickles it to the disk.
This class is a callable itself, which is invoked to get or create the
work.
There are two ways to implement the data/work creation: pass a ``worker``
to the ``__init__`` method or extend this class and override
``__do_work__``.
"""
def __init__(self, path: Union[str, Path], owner: object,
cache_global: bool = False, transient: bool = False,
initial_value: Any = None, mkdir: bool = False,
deallocate_recursive: bool = False,
recover_empty: bool = False):
"""Create an instance of the class.
:param path: if type of ``pathlib.Path`` then use disk storage to cache
of the pickeled data, otherwise a string used to store in
the owner
:param owner: an owning class to get and retrieve as an attribute
:param cache_global: cache the data globals; this shares data across
instances but not classes
:param transient: the data not persisted to disk after invoking the
method
:param initial_value: if provided, the method is never called and this
value returned for all invocations
:param mkdir: if ``path`` is a :class`.Path` object, then recursively
create all directories needed to be able to persist the
file without missing directory IO errors
:deallocate_recursive: the ``recursive`` parameter passed to
:meth:`.Deallocate._try_deallocate` to try to
deallocate the object graph recursively
:param recover_empty: if ``True`` and a ``path`` points to a zero size
file, treat it as data that has not yet been
generated; this is useful when a previous
exception was raised leaving a zero byte file
"""
super().__init__()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'pw inst: path={path}, global={cache_global}')
self.owner = owner
self.cache_global = cache_global
self.transient = transient
self.worker = None
if isinstance(path, Path):
self.path = path
self.use_disk = True
fname = FileTextUtil.normalize_text(str(self.path.absolute()), '_')
else:
self.path = Path(path)
self.use_disk = False
fname = str(path)
cstr = owner.__module__ + '.' + owner.__class__.__name__
self.varname = f'_{cstr}_{fname}_pwvinst'
if initial_value is not None:
self.set(initial_value)
self.mkdir = mkdir
self.deallocate_recursive = deallocate_recursive
self.recover_empty = recover_empty
def _info(self, msg, *args):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(self.varname + ': ' + msg, *args)
def clear_global(self):
"""Clear only any cached global data.
"""
vname = self.varname
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'global clearing {vname}')
if vname in globals():
if logger.isEnabledFor(logging.DEBUG):
logger.debug('removing global instance var: {}'.format(vname))
del globals()[vname]
def clear(self):
"""Clear the data, and thus, force it to be created on the next fetch.
This is done by removing the attribute from ``owner``, deleting it from
globals and removing the file from the disk.
"""
vname = self.varname
if self.use_disk and self.path.is_file():
if logger.isEnabledFor(logging.DEBUG):
logger.debug('deleting cached work: {}'.format(self.path))
self.path.unlink()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'owner exists: {self.owner is not None} ' +
f'has {vname}: {hasattr(self.owner, vname)}')
if self.owner is not None and hasattr(self.owner, vname):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('removing instance var: {}'.format(vname))
delattr(self.owner, vname)
self.clear_global()
def deallocate(self):
super().deallocate()
vname = self.varname
if self.owner is not None and hasattr(self.owner, vname):
obj = getattr(self.owner, vname)
self._try_deallocate(obj, self.deallocate_recursive)
delattr(self.owner, vname)
self.clear_global()
self.owner = None
def _do_work(self, *argv, **kwargs):
t0: float = tm.time()
obj: Any = self.__do_work__(*argv, **kwargs)
if logger.isEnabledFor(logging.INFO):
self._info('created work in {:2f}s, saving to {}'.format(
(tm.time() - t0), self.path))
return obj
def _load_or_create(self, *argv, **kwargs):
"""Invoke the file system operations to get the data, or create work.
If the file does not exist, calling ``__do_work__`` and save it.
"""
load_file: bool = self.path.is_file()
if load_file and self.recover_empty and self.path.stat().st_size == 0:
load_file = False
if load_file:
self._info('loading work from {}'.format(self.path))
with open(self.path, 'rb') as f:
try:
obj = pickle.load(f)
except EOFError as e:
raise PersistableError(f'Can not read: {self.path}') from e
else:
self._info('saving work to {}'.format(self.path))
if self.mkdir:
self.path.parent.mkdir(parents=True, exist_ok=True)
if not self.path.parent.is_dir():
raise PersistableError(
f'Parent directory does not exist: {self.path.parent}')
with open(self.path, 'wb') as f:
obj = self._do_work(*argv, **kwargs)
pickle.dump(obj, f)
return obj
def set(self, obj):
"""Set the contents of the object on the owner as if it were persisted
from the source. If this is a global cached instance, then add it to
global memory.
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'saving in memory value {type(obj)}')
vname = self.varname
if self.owner is None:
raise PersistableError(
f'Owner is not set for persistable: {vname}')
setattr(self.owner, vname, obj)
if self.cache_global:
if vname not in globals():
globals()[vname] = obj
def is_set(self) -> bool:
"""Return whether or not the persisted work has been engaged and has
data.
"""
vname = self.varname
if self.cache_global:
return vname in globals()
else:
return self.owner is not None and hasattr(self.owner, vname)
def __getstate__(self):
"""We must null out the owner and worker as they are not pickelable.
:see: :class:`.PersistableContainer`
"""
d = copy(self.__dict__)
d['owner'] = None
d['worker'] = None
return d
def __call__(self, *argv, **kwargs):
"""Return the cached data if it doesn't yet exist. If it doesn't exist,
create it and cache it on the file system, optionally ``owner`` and
optionally the globals.
"""
vname = self.varname
obj = None
if logger.isEnabledFor(logging.DEBUG):
logger.debug('call with vname: {}'.format(vname))
if self.owner is not None and hasattr(self.owner, vname):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('found in instance')
obj = getattr(self.owner, vname)
if obj is None and self.cache_global:
if vname in globals():
if logger.isEnabledFor(logging.DEBUG):
logger.debug('found in globals')
obj = globals()[vname]
if obj is None:
if self.use_disk:
obj = self._load_or_create(*argv, **kwargs)
else:
self._info('invoking worker')
obj = self._do_work(*argv, **kwargs)
self.set(obj)
return obj
def __do_work__(self, *argv, **kwargs):
"""You can extend this class and overriding this method. This method will
invoke the worker to do the work.
"""
return self.worker(*argv, **kwargs)
def write(self, indent=0, include_content=False, writer=sys.stdout):
sp = ' ' * indent
writer.write(f'{sp}{self}:\n')
sp = ' ' * (indent + 1)
writer.write(f'{sp}global: {self.cache_global}\n')
writer.write(f'{sp}transient: {self.transient}\n')
writer.write(f'{sp}type: {type(self())}\n')
if include_content:
writer.write(f'{sp}content: {self()}\n')
def _deallocate_str(self) -> str:
return f'{self.varname} => {type(self.owner)}'
def __str__(self):
return self.varname
def __repr__(self):
return self.__str__()
class PersistableContainerMetadata(object):
"""Provides metadata about :class:`.PersistedWork` definitions in the class.
"""
def __init__(self, container):
super().__init__()
self.container = container
@property
def persisted(self):
"""Return all ``PersistedWork`` instances on this object as a ``dict``.
"""
pws = {}
for k, v in self.container.__dict__.items():
if isinstance(v, PersistedWork):
pws[k] = v
return pws
def write(self, indent=0, include_content=False,
recursive=False, writer=sys.stdout):
sp = ' ' * indent
spe = ' ' * (indent + 1)
for k, v in self.container.__dict__.items():
if isinstance(v, PersistedWork):
v.write(indent, include_content, writer=writer)
else:
writer.write(f'{sp}{k}:\n')
writer.write(f'{spe}type: {type(v)}\n')
if include_content:
writer.write(f'{spe}content: {v}\n')
if recursive and isinstance(v, PersistableContainer):
cmeta = v._get_persistable_metadata()
cmeta.write(writer, indent + 2, include_content, True)
def clear(self):
"""Clear all ``PersistedWork`` instances on this object.
"""
for pw in self.persisted.values():
pw.clear()
class PersistableContainer(Deallocatable):
"""Classes can extend this that want to persist :class:`.PersistedWork`
instances, which otherwise are not persistable.
This class also manages the deallocation of all :class:`.PersistedWork`
attributes of the class, which might be another reason to use it even if
there isn't a persistence use case.
If the class level attribute ``_PERSITABLE_TRANSIENT_ATTRIBUTES`` is set,
all attributes given in this set will be set to ``None`` when pickled.
If the class level attribute ``_PERSITABLE_REMOVE_ATTRIBUTES`` is set, all
attributes given in this set will be set object deleted when pickled.
If the class level attribute ``_PERSITABLE_PROPERTIES`` is set, all
properties given will be accessed for force creation before pickling.
If the class level attribute ``_PERSITABLE_METHODS`` is set, all method
given will be accessed for force creation before pickling.
.. document private functions
.. automethod:: _clear_persistable_state
"""
def __getstate__(self) -> Dict[str, Any]:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'get state for {self.__class__}')
removes = set()
tran_attribute_name = '_PERSITABLE_TRANSIENT_ATTRIBUTES'
remove_attribute_name = '_PERSITABLE_REMOVE_ATTRIBUTES'
prop_attribute_name = '_PERSITABLE_PROPERTIES'
meth_attribute_name = '_PERSITABLE_METHODS'
if hasattr(self, prop_attribute_name):
for attr in getattr(self, prop_attribute_name):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'for property get: {attr}')
getattr(self, attr)
if hasattr(self, meth_attribute_name):
for attr in getattr(self, meth_attribute_name):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'for method get: {attr}')
getattr(self, attr)()
state = copy(self.__dict__)
if hasattr(self, tran_attribute_name):
tran_attribs = getattr(self, tran_attribute_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'transient attributes: {tran_attribs}')
removes.update(tran_attribs)
for k, v in state.items():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'container get state: {k} => {type(v)}')
if isinstance(v, PersistedWork):
if v.transient:
removes.add(v.varname)
for k in removes:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'removed persistable attribute: {k}')
state[k] = None
if hasattr(self, remove_attribute_name):
remove_attribs = getattr(self, remove_attribute_name)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'remove attributes: {tran_attribs}')
for k in remove_attribs:
del state[k]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'state keys for {self.__class__}: ' +
f'{", ".join(state.keys())}')
return state
def _clear_persistable_state(self):
"""Clear all cached state from all :class:`.PersistedWork` in this
instance.
"""
pws: Tuple[PersistedWork, ...] = tuple(filter(
lambda v: isinstance(v, PersistedWork),
self.__dict__.values()))
for v in pws:
v.clear()
def __setstate__(self, state: Dict[str, Any]):
"""Set the owner to containing instance and the worker function to the
owner's function by name.
"""
self.__dict__.update(state)
for k, v in state.items():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'container set state: {k} => {type(v)}')
if isinstance(v, PersistedWork):
setattr(v, 'owner', self)
def _get_persistable_metadata(self) -> PersistableContainerMetadata:
"""Return the metadata for this container.
"""
return PersistableContainerMetadata(self)
def deallocate(self):
super().deallocate()
for pw in self._get_persistable_metadata().persisted.values():
pw.deallocate()
class persisted(object):
"""Class level annotation to further simplify usage with
:class:`.PersistedWork`.
:see: :class:`.PersistedWork`
For example::
class SomeClass(object):
@property
@persisted('_counter', 'tmp.dat')
def counter(self):
return tuple(range(5))
"""
def __init__(self, name: str, path: Path = None,
cache_global: bool = False, transient: bool = False,
allocation_track: bool = True, mkdir: bool = False,
deallocate_recursive: bool = False,
recover_empty: bool = False):
"""Initialize.
:param name: the name of the attribute on the instance to set with the
cached result of the method
:param: path: if set, the path where to store the cached result on the
file system
:param cache_global: if ``True``, globally cache the value at the class
definition level
:param transient: if ``True`` do not persist only in memory, and not on
the file system, which is needed when used with
:class:`.PersistableContainer`
:param allocation_track: if ``False``, immediately mark the backing
:class:`PersistedWork` as deallocated
:param mkdir: if ``path`` is a :class`.Path` object, then recursively
create all directories needed to be able to persist the
file without missing directory IO errors
:deallocate_recursive: the ``recursive`` parameter passed to
:meth:`.Deallocate._try_deallocate` to try to
deallocate the object graph recursively
:param recover_empty: if ``True`` and a ``path`` points to a zero size
file, treat it as data that has not yet been
generated; this is useful when a previous
exception was raised leaving a zero byte file
"""
super().__init__()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'persisted decorator on attr: {name}, ' +
f'global={cache_global}')
self.attr_name = name
self.path = path
self.cache_global = cache_global
self.transient = transient
self.allocation_track = allocation_track
self.mkdir = mkdir
self.deallocate_recursive = deallocate_recursive
self.recover_empty = recover_empty
def __call__(self, fn):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'call: {fn}:{self.attr_name}:{self.path}:' +
f'{self.cache_global}')
def wrapped(*argv, **kwargs):
inst = argv[0]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'wrap: {fn}:{self.attr_name}:{self.path}:' +
f'{self.cache_global}')
pwork: PersistedWork
if hasattr(inst, self.attr_name):
pwork = getattr(inst, self.attr_name)
else:
if self.path is None:
path = self.attr_name
else:
path = Path(self.path)
pwork = PersistedWork(
path, owner=inst, cache_global=self.cache_global,
transient=self.transient,
mkdir=self.mkdir,
deallocate_recursive=self.deallocate_recursive,
recover_empty=self.recover_empty)
setattr(inst, self.attr_name, pwork)
if not self.allocation_track:
pwork._mark_deallocated()
if pwork is None:
raise PersistableError(
f'PersistedWork not found: {self.attr_name}')
pwork.worker = fn
return pwork(*argv, **kwargs)
# copy documentation over for Sphinx docs
wrapped.__doc__ = fn.__doc__
# copy annotations (i.e. type hints) over for Sphinx docs
wrapped.__annotations__ = fn.__annotations__
return wrapped
# resource/sql
class resource(object):
"""This annotation uses a template pattern to (de)allocate resources. For
example, you can declare class methods to create database connections and
then close them. This example looks like this:
For example::
class CrudManager(object):
def _create_connection(self):
return sqlite3.connect(':memory:')
def _dispose_connection(self, conn):
conn.close()
@resource('_create_connection', '_dispose_connection')
def commit_work(self, conn, obj):
conn.execute(...)
"""
def __init__(self, create_method_name, destroy_method_name):
"""Create the instance based annotation.
:param create_method_name: the name of the method that allocates
:param destroy_method_name: the name of the method that deallocates
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'connection decorator {create_method_name} ' +
f'destructor method name: {destroy_method_name}')
self.create_method_name = create_method_name
self.destroy_method_name = destroy_method_name
def __call__(self, fn):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'connection call with fn: {fn}')
def wrapped(*argv, **kwargs):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'in wrapped {self.create_method_name}')
inst = argv[0]
resource = getattr(inst, self.create_method_name)()
try:
result = fn(inst, resource, *argv[1:], **kwargs)
finally:
getattr(inst, self.destroy_method_name)(resource)
return result
# copy documentation over for Sphinx docs
wrapped.__doc__ = fn.__doc__
# copy annotations (i.e. type hints) over for Sphinx docs
wrapped.__annotations__ = fn.__annotations__
return wrapped | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/persist/annotation.py | annotation.py |
__author__ = 'Paul Landes'
from typing import Any, Iterable, Tuple, Set
from dataclasses import dataclass, field, InitVar
from abc import abstractmethod, ABC, ABCMeta
import logging
import itertools as it
from . import PersistableError
logger = logging.getLogger(__name__)
class chunks(object):
"""An iterable that chunks any other iterable in to chunks. Each element
returned is a list of elemnets of the given size or smaller. That element
that might be smaller is the remainer of the iterable once it is exhausted.
"""
def __init__(self, iterable: iter, size: int, enum: bool = False):
"""Initialize the chunker.
:param iterable: any iterable object
:param size: the size of each chunk
"""
self.iterable = iterable
self.size = size
self.enum = enum
def __iter__(self):
self.iterable_session = iter(self.iterable)
return self
def __next__(self):
ds = []
for e in range(self.size):
try:
obj = next(self.iterable_session)
except StopIteration:
break
if self.enum:
obj = (e, obj)
ds.append(obj)
if len(ds) == 0:
raise StopIteration()
return ds
class Stash(ABC):
"""A dictionary-like pure virtual class for CRUDing data, most of which read
and write to/from the file system. One major difference is dictionaries
iterate over keys while stashes iterate over items, which calls
:meth:`items`.
Note that there are subtle differences a [Stash] and a ``dict`` when
generating or accessing data. For example, when indexing obtaining the
value is sometimes *forced* by using some mechanism to create the item.
When using ``get`` it relaxes this creation mechanism for some
implementations.
"""
@abstractmethod
def load(self, name: str) -> Any:
"""Load a data value from the pickled data with key ``name``.
Semantically, this method loads the using the stash's implementation.
For example :class:`.DirectoryStash` loads the data from a file if it
exists, but factory type stashes will always re-generate the data.
:see: :meth:`get`
"""
pass
def get(self, name: str, default: Any = None) -> Any:
"""Load an object or a default if key ``name`` doesn't exist.
Semantically, this method tries not to re-create the data if it already
exists. This means that if a stash has built-in caching mechanisms,
this method uses it.
:see: :meth:`load`
"""
if self.exists(name):
item = self.load(name)
else:
item = default
return item
def exists(self, name: str) -> bool:
"""Return ``True`` if data with key ``name`` exists.
**Implementation note**: This :meth:`.Stash.exists` method is very
inefficient and should be overriden.
"""
for k in self.keys():
if k == name:
return True
return False
@abstractmethod
def dump(self, name: str, inst: Any):
"Persist data value ``inst`` with key ``name``."
pass
@abstractmethod
def delete(self, name: str = None):
"""Delete the resource for data pointed to by ``name`` or the entire
resource if ``name`` is not given.
"""
pass
def clear(self):
"""Delete all data from the from the stash.
**Important**: Exercise caution with this method, of course.
"""
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'clearing stash {self.__class__}')
for k in self.keys():
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'deleting key: {k}')
self.delete(k)
@abstractmethod
def keys(self) -> Iterable[str]:
"""Return an iterable of keys in the collection.
"""
pass
def key_groups(self, n):
"Return an iterable of groups of keys, each of size at least ``n``."
return chunks(self.keys(), n)
def values(self) -> Iterable[Any]:
"""Return the values in the hash.
"""
return map(lambda k: self.__getitem__(k), self.keys())
def items(self) -> Tuple[str, Any]:
"""Return an iterable of all stash items."""
return map(lambda k: (k, self.__getitem__(k)), self.keys())
def _debug(self, msg: str):
"""Utility debugging method that adds the class name to the message to
document the source stash.
This makes no checks for if debugging is enabled since it is assumed
the caller will do so for avoiding double checks of the logger level.
"""
logger.debug(f'[{self.__class__.__name__}] {msg}')
def __getitem__(self, key):
item = self.get(key)
if item is None:
raise KeyError(key)
return item
def __setitem__(self, key, value):
self.dump(key, value)
def __delitem__(self, key):
self.delete(key)
def __contains__(self, key):
return self.exists(key)
def __iter__(self):
return map(lambda x: (x, self.__getitem__(x),), self.keys())
def __len__(self):
return len(tuple(self.keys()))
@dataclass
class NoopStash(Stash):
"""A stash that does nothing.
"""
def load(self, name: str) -> Any:
return None
def get(self, name: str, default: Any = None) -> Any:
return default
def exists(self, name: str) -> bool:
return False
def dump(self, name: str, inst: Any):
pass
def delete(self, name: str = None):
pass
def keys(self) -> Iterable[str]:
return iter(())
@dataclass
class ReadOnlyStash(Stash):
"""An abstract base class for subclasses that do not support write methods
(i.e. :meth:`dump`). This class is useful to extend for factory type
classes that generate data. Paired with container classes such as
:class:`.DictionaryStash` provide persistence in a reusable way.
The only method that needs to be implemented is :meth:`load` and
:meth:`keys`. However, it is recommended to implement :meth:`exists` to
speed things up.
Setting attribute ``strict`` to ``True`` will raise a
:class:`.PersistableError` for any modification attempts. Otherwise,
setting it to ``False`` (the default) silently ignores and does nothing on
:meth:`.dump`, :meth:`delete` and :meth:`clear`.
Example::
class RangeStash(ReadOnlyStash):
def __init__(self, n, end: int = None):
super().__init__()
self.n = n
self.end = end
def load(self, name: str) -> Any:
if self.exists(name):
return name
def keys(self) -> Iterable[str]:
if self.end is not None:
return map(str, range(self.n, self.end))
else:
return map(str, range(self.n))
def exists(self, name: str) -> bool:
n = int(name)
if self.end is None:
if (n >= self.n):
return False
elif (n < self.n) or (n >= self.end):
return False
return True
"""
def __post_init__(self):
self.strict = False
def _ro_check(self, meth: str):
if self.strict:
meth: str = meth.capitalize()
raise PersistableError(
f'{meth} not implemented for read only stashes ({type(self)}')
def dump(self, name: str, inst: Any):
self._ro_check('dump')
def delete(self, name: str = None):
self._ro_check('delete')
def clear(self):
self._ro_check('clear')
@dataclass
class CloseableStash(Stash):
"""Any stash that has a resource that needs to be closed.
"""
@abstractmethod
def close(self):
"Close all resources created by the stash."
pass
class DelegateDefaults(object):
"""Defaults set in :class:`.DelegateStash`.
"""
# setting to True breaks stash reloads from ImportConfigFactory, so set to
# True for tests etc
CLASS_CHECK = False
DELEGATE_ATTR = False
@dataclass
class DelegateStash(CloseableStash, metaclass=ABCMeta):
"""Delegate pattern. It can also be used as a no-op if no delegate is
given.
A minimum functioning implementation needs the :meth:`load` and
:meth:`keys` methods overriden. Inheriting and implementing a
:class:`.Stash` such as this is usually used as the ``factory`` in a
:class:`.FactoryStash`.
This class delegates attribute fetches to the delegate for the
unimplemented methods and attributes using a decorator pattern when
attribute :py:obj:`delegate_attr` is set to ``True``.
**Note:** Delegate attribute fetching can cause strange and unexpected
behavior, so use this funcationlity with care. It is advised to leave it
off if unexpected ``AttributeError`` are raised due to incorrect attribute
is access or method dispatching.
:see: :py:obj:`delegate_attr`
"""
delegate: Stash = field()
"""The stash to delegate method invocations."""
def __post_init__(self):
if self.delegate is None:
raise PersistableError('Delegate not set')
if not isinstance(self.delegate, Stash):
msg = f'not a stash: {self.delegate.__class__} or reloaded'
if DelegateDefaults.CLASS_CHECK:
raise PersistableError(msg)
else:
logger.warning(msg)
self.delegate_attr = DelegateDefaults.DELEGATE_ATTR
def __getattr__(self, attr, default=None):
if attr == 'delegate_attr':
return False
if self.delegate_attr:
try:
delegate = super().__getattribute__('delegate')
except AttributeError:
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute " +
f"'{attr}'; delegate not set'")
return delegate.__getattribute__(attr)
else:
return super().__getattribute__(attr)
def _debug_meth(self, meth: str):
if logger.isEnabledFor(logging.DEBUG):
self._debug(
f'calling method <{meth}> on delegate {type(self.delegate)}')
def load(self, name: str) -> Any:
self._debug_meth('load')
if self.delegate is not None:
return self.delegate.load(name)
def get(self, name: str, default: Any = None) -> Any:
"""Load an object or a default if key ``name`` doesn't exist.
**Implementation note:** sub classes will probably want to override
this method given the super method is cavalier about calling
:meth:`exists:` and :meth:`load`. Based on the implementation, this
can be problematic.
"""
self._debug_meth('get')
if self.delegate is None:
return super().get(name, default)
else:
return self.delegate.get(name, default)
def exists(self, name: str) -> bool:
self._debug_meth('exists')
if self.delegate is not None:
return self.delegate.exists(name)
else:
return False
def dump(self, name: str, inst):
self._debug_meth('dump')
if self.delegate is not None:
return self.delegate.dump(name, inst)
def delete(self, name=None):
self._debug_meth('delete')
if self.delegate is not None:
self.delegate.delete(name)
def keys(self) -> Iterable[str]:
self._debug_meth('keys')
if self.delegate is not None:
return self.delegate.keys()
return ()
def clear(self):
self._debug_meth('clear')
if self.delegate is not None:
if logger.isEnabledFor(logging.DEBUG):
self._debug(
f'calling super clear on {self.delegate.__class__}')
self.delegate.clear()
def close(self):
self._debug_meth('close')
if self.delegate is not None:
return self.delegate.close()
@dataclass
class ReadOnlyDelegateStash(DelegateStash, ReadOnlyStash):
"""Makes any stash read only.
"""
def __post_init__(self):
super().__post_init__()
ReadOnlyStash.__post_init__(self)
def dump(self, name: str, inst: Any):
ReadOnlyStash.dump(self, name, inst)
def delete(self, name: str = None):
ReadOnlyStash.delete(self, name)
def clear(self):
ReadOnlyStash.clear(self)
@dataclass
class KeyLimitStash(DelegateStash):
"""A stash that limits the number of generated keys useful for debugging.
For most stashes, this also limits the iteration output since that is based
on key mapping.
"""
ATTR_EXP_META = ('n_limit',)
n_limit: int = field()
"""The max number of keys provided as a slice of the delegate's keys."""
def keys(self) -> Iterable[str]:
ks = super().keys()
return it.islice(ks, self.n_limit)
def exists(self, name: str) -> bool:
return name in self.keys()
@dataclass
class KeySubsetStash(ReadOnlyDelegateStash):
"""A stash that exposes a subset of the keys available in the
:obj:`delegate`.
"""
key_subset: InitVar[Set[str]] = field()
"""A subset of the keys availble."""
dynamic_subset: InitVar[bool] = field()
"""Whether the delegate keys are dynamic, which forces inefficient key
checks on the delegate.
"""
def __post_init__(self, key_subset: Set[str], dynamic_subset: bool):
super().__post_init__()
self._key_subset = frozenset(key_subset)
self._dynamic_subset = dynamic_subset
def load(self, name: str) -> Any:
if self.exists(name):
return super().load(name)
def get(self, name: str, default: Any = None) -> Any:
if self.exists(name):
return super().get(name)
def keys(self) -> Iterable[str]:
if self._dynamic_subset:
return self._key_subset | super().keys()
else:
return self._key_subset
def exists(self, name: str) -> bool:
if self._dynamic_subset and not super().exists(name):
return False
return name in self._key_subset
@dataclass
class PreemptiveStash(DelegateStash):
"""Provide support for preemptively creating data in a stash. It provides
this with :obj:`has_data` and provides a means of keeping track if the data
has yet been created.
**Implementation note**: This stash retrieves data from the delegate
without checking to see if it exists first since the data might not have
been (preemptively) yet created.
"""
def __post_init__(self):
super().__post_init__()
self._has_data = None
def get(self, name: str, default: Any = None) -> Any:
"""See class doc's implementation note."""
item = self.load(name)
if item is None:
item = default
return item
@property
def has_data(self) -> bool:
"""Return whether or not the stash has any data available or not.
"""
return self._calculate_has_data()
def _calculate_has_data(self) -> bool:
"""Return ``True`` if the delegate has keys.
"""
if self._has_data is None:
try:
next(iter(self.delegate.keys()))
self._has_data = True
except StopIteration:
self._has_data = False
return self._has_data
def _reset_has_data(self):
"""Reset the state of whether the stash has data or not.
"""
self._has_data = None
def _set_has_data(self, has_data: bool = True):
"""Set the state of whether the stash has data or not.
"""
self._has_data = has_data
def clear(self):
if logger.isEnabledFor(logging.DEBUG):
self._debug('not clearing--has no data')
super().clear()
self._reset_has_data()
class Primeable(ABC):
"""Any subclass that has the ability (and need) to do preprocessing. For
stashes, this means processing before an CRUD method is invoked. For all
other classes it usually is some processing that must be done in a single
process.
"""
def prime(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'priming {type(self)}...')
@dataclass
class PrimeableStash(Stash, Primeable):
"""Any subclass that has the ability to do processing before any CRUD method
is invoked.
"""
def prime(self):
if isinstance(self, DelegateStash) and \
isinstance(self.delegate, PrimeableStash):
self.delegate.prime()
def get(self, name: str, default: Any = None) -> Any:
self.prime()
return super().get(name, default)
def load(self, name: str) -> Any:
self.prime()
return super().load(name)
def keys(self) -> Iterable[str]:
self.prime()
return super().keys()
@dataclass
class PrimablePreemptiveStash(PrimeableStash, PreemptiveStash):
"""A stash that's primable and preemptive.
"""
pass
@dataclass
class ProtectiveStash(DelegateStash):
"""A stash that guards :meth:`dump` so that when :class:`Exception` is
raised, the instance of the exception is dumped instead the instance data.
"""
log_errors: bool = field()
"""When ``True`` log caught exceptions as warnings."""
def dump(self, name: str, inst: Any):
try:
super().dump(name, inst)
except Exception as e:
if self.log_errors:
logger.warning(f"Could not dump '{name}', using as value: {e}",
exc_info=True)
super().dump(name, e)
@dataclass
class FactoryStash(PreemptiveStash):
"""A stash that defers to creation of new items to another :obj:`factory`
stash. It does this by calling first getting the data from the
:obj:`delegate` stash, then when it does not exist, it uses the the
:obj:`factory` to create the data when loading with :meth:`load`.
Similarly, when accessing with :meth:`get` or indexing, the factory created
item is dumped back to the delegate when the delegate does not have it.
"""
ATTR_EXP_META = ('enable_preemptive',)
factory: Stash = field()
"""The stash used to create using ``load`` and ``keys``."""
enable_preemptive: bool = field(default=True)
"""If ``False``, do not invoke the super class's data calculation."""
dump_factory_nones: bool = field(default=True)
"""Whether to pass on ``None`` values to the delegate when the factory
creates them.
"""
def _calculate_has_data(self) -> bool:
if self.enable_preemptive:
return super()._calculate_has_data()
else:
return False
def load(self, name: str) -> Any:
item = super().load(name)
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'loaded item {name} -> {type(item)}')
if item is None:
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'resetting data and loading from factory: {name}')
item = self.factory.load(name)
if item is not None or self.dump_factory_nones:
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'dumping {name} -> {type(item)}')
super().dump(name, item)
self._reset_has_data()
if logger.isEnabledFor(logging.DEBUG):
self._debug(f'reset data: has_data={self.has_data}')
return item
def keys(self) -> Iterable[str]:
if self.has_data:
if logger.isEnabledFor(logging.DEBUG):
self._debug('super (delegate) keys')
ks = super().keys()
else:
if logger.isEnabledFor(logging.DEBUG):
self._debug('factory keys')
ks = self.factory.keys()
return ks
def clear(self):
super().clear()
if not isinstance(self.factory, ReadOnlyStash):
self.factory.clear()
@dataclass
class CacheFactoryStash(FactoryStash):
"""Like :class:`.FactoryStash` but suitable for :class:`.ReadOnlyStash`
factory instances that have a defined key set and only need a backing stash
for caching.
"""
dump_factory_nones: bool = field(default=False)
"""Whether to pass on ``None`` values to the delegate when the factory
creates them.
"""
def keys(self) -> Iterable[str]:
return self.factory.keys()
def exists(self, name: str) -> bool:
return self.factory.exists(name) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/persist/domain.py | domain.py |
__author__ = 'Paul Landes'
from typing import Any, Union, Callable, Tuple, ClassVar, Dict
from abc import ABC
import logging
import collections
import traceback
from io import StringIO
from zensols.util import APIError
logger = logging.getLogger(__name__)
class Deallocatable(ABC):
"""All subclasses have the ability to deallocate any resources. This is
useful for cases where there could be reference cycles or deallocation
(i.e. CUDA tensors) need happen implicitly and faster.
.. document private functions
.. automethod:: _print_undeallocated
.. automethod:: _deallocate_attribute
.. automethod:: _try_deallocate
"""
PRINT_TRACE: ClassVar[bool] = False
"""When ``True``, print the stack trace when deallocating with
:meth:`deallocate`.
"""
ALLOCATION_TRACKING: ClassVar[bool] = False
"""Enables allocation tracking. When this if ``False``, this functionality
is not used and disabled.
"""
_ALLOCATIONS: Dict[int, Any] = {}
"""The data structure that retains all allocated instances.
"""
# when true, recurse through deallocatable instances while freeing
_RECURSIVE: ClassVar[bool] = False
def __init__(self):
super().__init__()
if self.ALLOCATION_TRACKING:
k = id(self)
sio = StringIO()
traceback.print_stack(file=sio)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'adding allocated key: {k} -> {type(self)}')
self._ALLOCATIONS[k] = (self, sio.getvalue())
def deallocate(self):
"""Deallocate all resources for this instance.
"""
k = id(self)
if self.PRINT_TRACE:
traceback.print_stack()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocating {k}: {self._deallocate_str()}')
self._mark_deallocated(k)
@classmethod
def _num_deallocations(cls) -> int:
"""Return the number of objects currently allocated."""
return len(cls._ALLOCATIONS)
def _mark_deallocated(self, obj: Any = None):
"""Mark ``obj`` as deallocated regardless if it is, or ever will be
deallocated. After this is called, it will not be reported in such
methods as :meth:`_print_undeallocated`.
"""
if obj is None:
k = id(self)
else:
k = obj
if self.ALLOCATION_TRACKING:
if k in self._ALLOCATIONS:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'removing allocated key: {k}')
del self._ALLOCATIONS[k]
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'no key to deallocate: {k} ' +
f'({self._deallocate_str()})')
@staticmethod
def _try_deallocate(obj: Any, recursive: bool = False) -> bool:
"""If ``obj`` is a candidate for deallocation, deallocate it.
:param obj: the object instance to deallocate
:return: ``True`` if the object was deallocated, otherwise return
``False`` indicating it can not and was not deallocated
"""
cls = globals()['Deallocatable']
recursive = recursive or cls._RECURSIVE
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'trying to deallocate: {type(obj)}')
if isinstance(obj, cls):
obj.deallocate()
return True
elif recursive and isinstance(obj, (tuple, list, set)):
for o in obj:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocate tuple item: {type(o)}')
cls._try_deallocate(o, recursive)
return True
elif recursive and isinstance(obj, dict):
for o in obj.values():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deallocate dict item: {type(o)}')
cls._try_deallocate(o, recursive)
return True
return False
def _deallocate_attribute(self, attrib: str) -> bool:
"""Deallocate attribute ``attrib`` if possible, which means it both
exists and extends from this class.
"""
deallocd = False
if hasattr(self, attrib):
inst = getattr(self, attrib)
deallocd = self._try_deallocate(inst)
if logger.isEnabledFor(logging.DEBUG):
logging.debug(f'deallocated {type(self)}.{attrib}')
delattr(self, attrib)
return deallocd
def _deallocate_attributes(self, attribs: Tuple[str, ...]) -> int:
"""Deallocates all attributes in ``attribs`` using
:meth:`_deallocate_attribute`.
"""
cnt = 0
for attrib in attribs:
if self._deallocate_attribute(attrib):
cnt += 1
return cnt
@classmethod
def _print_undeallocated(cls, include_stack: bool = False,
only_counts: bool = False,
fail: bool = False):
"""Print all unallocated objects.
:param include_stack: if ``True`` print out the stack traces of all the
unallocated references; if ``only_counts`` is
``True``, this is ignored
:param only_counts: if ``True`` only print the counts of each
unallocated class with counts for each
:param fail: if ``True``, raise an exception if there are any
unallocated references found
"""
allocs = cls._ALLOCATIONS
if len(allocs) > 0:
print(f'total allocations: {len(allocs)}')
if only_counts:
cls_counts = collections.defaultdict(lambda: 0)
for cls in map(lambda o: type(o[0]), allocs.values()):
cls_counts[cls] += 1
for k in sorted(cls_counts.keys(), key=lambda x: x.__name__):
print(f'{k}: {cls_counts[k]}')
else:
for k, (v, stack) in allocs.items():
vstr = str(type(v))
if hasattr(v, 'name'):
vstr = f'{vstr} ({v.name})'
print(f'{k} -> {vstr}')
if include_stack:
print(stack)
if fail:
cls.assert_dealloc()
@classmethod
def _deallocate_all(cls):
"""Deallocate all the objects that have not yet been and clear the data
structure.
"""
allocs = cls._ALLOCATIONS
to_dealloc = tuple(allocs.values())
allocs.clear()
for obj, trace in to_dealloc:
obj.deallocate()
def _deallocate_str(self) -> str:
return str(self.__class__)
@classmethod
def assert_dealloc(cls):
cnt = len(cls._ALLOCATIONS)
if cnt > 0:
raise APIError(f'resource leak with {cnt} intances')
class dealloc_recursive(object):
def __init__(self):
self.org_rec_state = Deallocatable._RECURSIVE
def __enter__(self):
Deallocatable._RECURSIVE = True
def __exit__(self, type, value, traceback):
Deallocatable._RECURSIVE = self.org_rec_state
class dealloc(object):
"""Object used with a ``with`` scope for deallocating any subclass of
:class:`.Deallocatable`. The first argument can also be a function, which
is useful when tracking deallocations when ``track`` is ``True``.
Example::
with dealloc(lambda: ImportClassFactory('some/path')) as fac:
return fac.instance('stash')
"""
def __init__(self, inst: Union[Callable, Deallocatable],
track: bool = False, include_stack: bool = False):
"""
:param inst: either an object instance to deallocate or a callable that
creates the instance to deallocate
:param track: when ``True``, set
:obj:`.Deallocatable.ALLOCATION_TRACKING` to ``True`` to
start tracking allocations
:param include_stack: adds stack traces in the call to
:meth:`.Deallocatable._print_undeallocated`
"""
self.track = track
self.include_stack = include_stack
self.org_track = Deallocatable.ALLOCATION_TRACKING
if track:
Deallocatable.ALLOCATION_TRACKING = True
if callable(inst) and not isinstance(inst, Deallocatable):
inst = inst()
self.inst = inst
def __enter__(self):
return self.inst
def __exit__(self, type, value, traceback):
self.inst.deallocate()
if self.track:
Deallocatable._print_undeallocated(self.include_stack)
Deallocatable.ALLOCATION_TRACKING = self.org_track | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/persist/dealloc.py | dealloc.py |
__author__ = 'Paul Landes'
from typing import Any, Iterable, Optional, List
from dataclasses import dataclass, field
import logging
import itertools as it
from pathlib import Path
import shelve as sh
from zensols.util.tempfile import tempfile
from zensols.persist import persisted, CloseableStash
logger = logging.getLogger(__name__)
@dataclass
class ShelveStash(CloseableStash):
"""Stash that uses Python's shelve library to store key/value pairs in DBM
databases.
"""
path: Path = field()
"""A file to be created to store and/or load for the data storage."""
writeback: bool = field(default=True)
"""The writeback parameter given to ``shelve``."""
auto_close: bool = field(default=True)
"""If ``True``, close the shelve for each operation."""
def __post_init__(self):
self.is_open = False
@classmethod
def get_extension(cls) -> str:
if not hasattr(cls, '_EXTENSION'):
ext: Optional[str] = None
with tempfile(create=False, remove=False) as path:
inst = sh.open(str(path.resolve()), writeback=False)
del_path: Path = None
try:
inst.close()
spaths: List[Path] = path.parent.glob(path.name + '*')
spath: Path
for spath in it.islice(spaths, 1):
ext = spath.suffix
if len(ext) > 1 and ext.startswith('.'):
ext = ext[1:]
del_path = spath
ext = None if len(ext) == 0 else ext
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found extension: <{ext}>')
finally:
if del_path is not None:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deleting: {del_path}')
del_path.unlink()
cls._EXTENSION = ext
return cls._EXTENSION
@property
@persisted('_real_path')
def real_path(self) -> Path:
"""The path the shelve API created on this file system. This is provided since
:obj:`path` does *not* take in to account that some (G)DBM
implementations add an extension and others do not This differes across
libraries compiled against the Python interpreter and platorm.
"""
ext = ShelveStash.get_extension()
ext = '' if ext is None else f'.{ext}'
return self.path.parent / f'{self.path.name}{ext}'
@property
@persisted('_shelve')
def shelve(self):
"""Return an opened shelve mod:`shelve` object.
"""
if logger.isEnabledFor(logging.DEBUG):
exists: bool = self.real_path.exists()
logger.debug(f'creating shelve data, exists: {exists}')
if not self.is_open:
self.path.parent.mkdir(parents=True, exist_ok=True)
fname = str(self.path.absolute())
inst = sh.open(fname, writeback=self.writeback)
self.is_open = True
return inst
def _assert_auto_close(self):
if self.auto_close:
self.close()
def load(self, name: str) -> Any:
ret = None
if self.exists(name):
ret = self.shelve[name]
self._assert_auto_close()
return ret
def dump(self, name: str, inst: Any):
self.shelve[name] = inst
self._assert_auto_close()
def exists(self, name) -> bool:
exists = name in self.shelve
self._assert_auto_close()
return exists
def keys(self) -> Iterable[str]:
ret = self.shelve.keys()
if self.auto_close:
ret = tuple(ret)
self._assert_auto_close()
return ret
def delete(self, name: str = None):
"Delete the shelve data file."
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'deleting: {name}')
if name is None:
self.clear()
else:
del self.shelve[name]
def close(self):
"Close the shelve object, which is needed for data consistency."
if self.is_open:
logger.debug('closing shelve data')
try:
self.shelve.close()
self._shelve.clear()
except Exception:
self.is_open = False
def clear(self):
self.close()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'clearing shelve data if exists: {self.real_path}')
if self.real_path.exists():
self.real_path.unlink()
class shelve(object):
"""Object used with a ``with`` scope that creates the closes a shelve object.
For example, the following opens a file ``path``, sets a temporary variable
``stash``, prints all the data from the shelve, and then closes it.
Example::
with shelve(path) as stash:
for id, val in stash, 30:
print(f'{id}: {val}')
"""
def __init__(self, *args, **kwargs):
self.shelve = ShelveStash(*args, **kwargs)
def __enter__(self):
return self.shelve
def __exit__(self, type, value, traceback):
self.shelve.close() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/persist/shelve.py | shelve.py |
__author__ = 'Paul Landes'
from typing import Iterable, Union
from dataclasses import dataclass
from zipfile import ZipFile
from pathlib import Path
from . import PersistableError, persisted, PersistedWork, ReadOnlyStash
@dataclass(init=False)
class ZipStash(ReadOnlyStash):
"""Acesss a zip file by using the entry file names as keys and the content of
the entries as items. The returned items are either byte arrays if created
without an encoding, otherwise decode strings are returned.
A root path can be specified so the zip file appears to have been created
in a sub-directory.
*Implementation note*: keys are cached to speed up access and cleared if
the path set on the instance.
"""
def __init__(self, path: Path, root: str = None, encoding: str = None):
"""See class docs.
:param path: the zip file path
:param root: the sub-directory in the zip file to base look ups (see
class doc)
:param encoding: if provided, returned items will be strings decoded
with this encoding (such as ``utf-8``)
"""
super().__init__()
if root is not None and (root.startswith('/') or root.endswith('/')):
raise PersistableError(
f"Roots can not start or end with '/': {root}")
self._path = path
self._root = root
self._encoding = encoding
self._keys = PersistedWork('_keys', self)
@property
def path(self) -> Path:
"""The zip file path."""
return self._path
@path.setter
def path(self, path: Path):
"""The zip file path."""
self._path = path
self._keys.clear()
def _map_name(self, name: str):
"""Create an absolute entry name from the item name (key)."""
if self._root is not None:
name = self._root + '/' + name
return name
def load(self, name: str) -> Union[bytearray, str]:
if name in self._key_set():
name = self._map_name(name)
with ZipFile(self.path) as z:
with z.open(name) as myfile:
inst: bytearray = myfile.read()
if self._encoding is not None:
inst = inst.decode(self._encoding)
return inst
def keys(self) -> Iterable[str]:
return iter(self._key_set())
@persisted('_key_set_pw')
def _key_set(self) -> Iterable[str]:
root = self._root
rlen = None if self._root is None else len(root)
keys = []
with ZipFile(self.path) as z:
keys.extend(filter(lambda n: not n.endswith('/'), z.namelist()))
if self._root is not None:
keys = map(lambda k: k[rlen+1:] if k.startswith(root) else None,
keys)
keys = filter(lambda k: k is not None, keys)
return set(keys)
def exists(self, name: str) -> bool:
return name in self._key_set() | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/persist/zip.py | zip.py |
__author__ = 'Paul Landes'
from typing import Union
import logging
from logging import Logger
import inspect
import time as tm
import traceback as trc
from functools import wraps
from io import TextIOBase
import errno
import os
import signal
_time_logger = logging.getLogger(__name__)
TIMEOUT_DEFAULT = 10
class TimeoutError(Exception):
"""Raised when a time out even occurs in :func:`.timeout` or
:class:`.timeprotect`.
"""
pass
class time(object):
"""Used in a ``with`` scope that executes the body and logs the elapsed time.
Format f-strings are supported as the locals are taken from the calling
frame on exit. This means you can do things like:
with time('processed {cnt} items'):
cnt = 5
tm.sleep(1)
which produeces: ``processed 5 items``.
See the initializer documentation about special treatment for global
loggers.
"""
def __init__(self, msg: str = 'finished', level=logging.INFO,
logger: Union[Logger, TextIOBase] = None):
"""Create the time object.
If a logger is not given, it is taken from the calling frame's global
variable named ``logger``. If this global doesn't exit it logs to
standard out. Otherwise, standard out/error can be used if given
:obj:`sys.stdout` or :obj:`sys.stderr`.
:param msg: the message log when exiting the closure
:param logger: the logger to use for logging or a file like object
(i.e. :obj:`sys.stdout`) as a data sync
:param level: the level at which the message is logged
"""
self.msg = msg
self.level = level
if logger is None:
frame = inspect.currentframe()
try:
globs = frame.f_back.f_globals
if 'logger' in globs:
logger = globs['logger']
except Exception as e:
_time_logger.error(
f"Error in initializing time: {e} with '{msg}'",
exc_info=True)
trc.print_exc()
self.logger = logger
@staticmethod
def format_elapse(msg: str, seconds: int):
mins = seconds / 60.
hours = mins / 60.
mins = int(mins % 60)
hours = int(hours)
sec_int = float(int(seconds % 60))
sec_dec = seconds - int(seconds)
lsd = sec_int + sec_dec
tparts = []
if hours > 0:
suffix = 's' if hours > 1 else ''
tparts.append(f'{hours} hour{suffix}')
if mins > 0:
suffix = 's' if mins > 1 else ''
tparts.append(f'{mins} minute{suffix}')
sfmt = '{:.0f}s'
else:
if sec_int > 0:
sfmt = '{:.2f}s'
else:
lsd = int(lsd * 100)
sfmt = '{:d}ms'
tparts.append(sfmt.format(lsd))
return f'{msg} in ' + ', '.join(tparts)
def __enter__(self):
self.t0 = tm.time()
def __exit__(self, type, value, traceback):
seconds = tm.time() - self.t0
msg = self.msg
frame = inspect.currentframe()
try:
locals = frame.f_back.f_locals
msg = msg.format(**locals)
except Exception as e:
_time_logger.error(
f"Error in exiting time: {e} with '{msg}'", exc_info=True)
msg = self.format_elapse(msg, seconds)
if self.logger is None:
print(msg)
elif isinstance(self.logger, Logger):
self.logger.log(self.level, msg, stacklevel=2)
else:
self.logger.write(msg + '\n')
def timeout(seconds=TIMEOUT_DEFAULT, error_message=os.strerror(errno.ETIME)):
"""This creates a decorator called @timeout that can be applied to any long
running functions.
So, in your application code, you can use the decorator like so::
from timeout import timeout
# Timeout a long running function with the default expiry of
# TIMEOUT_DEFAULT seconds.
@timeout
def long_running_function1():
pass
This was derived from the `David Narayan's <https://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish>`_
StackOverflow thread.
"""
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wraps(func)(wrapper)
return decorator
class timeprotect(object):
"""Invokes a block and bails if not completed in a specified number of seconds.
:param seconds: the number of seconds to wait
:param timeout_handler: function that takes a single argument, which is
this ``timeprotect`` object instance; if ``None``,
then nothing is done if the block times out
:param context: an object accessible from the ``timeout_hander`` via
``self``, which defaults to ``None``
:see: :func:`timeout`
"""
def __init__(self, seconds=TIMEOUT_DEFAULT, timeout_handler=None,
context=None, error_message=os.strerror(errno.ETIME)):
self.seconds = seconds
self.timeout_handler = timeout_handler
self.context = context
self.error_message = error_message
self.timeout_handler_exception = None
def __enter__(self):
def _handle_timeout(signum, frame):
signal.alarm(0)
if self.timeout_handler is not None:
try:
self.timeout_handler(self)
except Exception as e:
_time_logger.exception(
f'could not recover from timeout handler: {e}')
self.timeout_handler_exception = e
raise TimeoutError(self.error_message)
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(self.seconds)
def __exit__(self, cls, value, traceback):
signal.alarm(0)
return True | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/util/time.py | time.py |
__author__ = 'Paul Landes'
from typing import Optional
from dataclasses import dataclass, field
import logging
from pathlib import Path
import pkg_resources as pkg
logger = logging.getLogger(__name__)
@dataclass
class PackageResource(object):
"""Contains resources of installed Python packages. It makes the
:obj:`distribution` available and provides access to to resource files with
:meth:`get_path` and as an index.
"""
name: str = field()
"""The name of the module (i.e. zensols.someappname)."""
file_system_defer: bool = field(default=True)
"""Whether or not to return resource paths that point to the file system when
this package distribution does not exist.
:see: :meth:`get_path`
"""
@property
def distribution(self) -> Optional[pkg.DistInfoDistribution]:
"""The package distribution.
:return: the distribution or ``None`` if it is not installed
"""
if not hasattr(self, '_dist'):
try:
self._dist = pkg.get_distribution(self.name)
except pkg.DistributionNotFound:
logger.info(f'no distribution found: {self.name}')
self._dist = None
return self._dist
@property
def exists(self) -> bool:
"""Return if the package exists and installed.
"""
return self.distribution is not None
@property
def version(self) -> Optional[str]:
"""Return the version if the package exists.
"""
if self.exists:
return self.distribution.version
def get_path(self, resource: str) -> Optional[Path]:
"""Return a resource file name by name. Optionally return resource as a
relative path if the package does not exist.
:param resource: a forward slash (``/``) delimited path
(i.e. ``resources/app.conf``) of the resource name
:return: a path to that resource on the file system or ``None`` if the
package doesn't exist, the resource doesn't exist and
:obj:`file_system_defer` is ``False``
"""
res_name = str(Path(*resource.split('/')))
path = None
if self.exists and pkg.resource_exists(self.name, res_name):
path = pkg.resource_filename(self.name, res_name)
path = Path(path)
else:
path = Path(res_name)
return path
def __getitem__(self, resource: str) -> Path:
if not self.exists:
raise KeyError(f'package does not exist: {self.name}')
res = self.get_path(resource)
if res is None:
raise KeyError(f'no such resource file: {resource}')
return res
def __str__(self) -> str:
if self.exists:
return str(self.distribution)
else:
return self.name | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/util/pkgres.py | pkgres.py |
__author__ = 'Paul Landes'
from typing import List, Union
import logging
from logging import Logger
import sys
import threading
from io import StringIO
class LoggerStream(object):
"""Each line of standard out/error becomes a logged line
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, c):
if c == '\n':
self.logger.log(self.log_level, self.linebuf.rstrip())
self.linebuf = ''
else:
self.linebuf += c
def flush(self):
if len(self.linebuf) > 0:
self.write('\n')
class LogLevelSetFilter(object):
def __init__(self, levels):
self.levels = levels
def filter(self, record):
return record.levelno in self.levels
class StreamLogDumper(threading.Thread):
"""Redirect stream output to a logger in a running thread.
"""
def __init__(self, stream, logger, level):
super().__init__()
self.stream = stream
self.logger = logger
self.level = level
def run(self):
with self.stream as s:
for line in iter(s.readline, b''):
line = line.decode('utf-8')
line = line.rstrip()
self.logger.log(self.level, line)
@staticmethod
def dump(stdout, stderr, logger: Logger):
StreamLogDumper(stdout, logger, logging.INFO).start()
StreamLogDumper(stderr, logger, logging.ERROR).start()
class LogConfigurer(object):
"""Configure logging to go to a file or Graylog.
"""
def __init__(self, logger=logging.getLogger(None),
log_format='%(asctime)s %(levelname)s %(message)s',
level=None):
self.log_format = log_format
self.logger = logger
if level is not None:
self.logger.setLevel(level)
self.level = level
def config_handler(self, handler):
if self.log_format is not None:
formatter = logging.Formatter(self.log_format)
handler.setFormatter(formatter)
self.logger.addHandler(handler)
return handler
def config_stream(self, stdout_stream, stderr_stream=None):
out = logging.StreamHandler(stdout_stream)
if stderr_stream is not None:
err = logging.StreamHandler(stderr_stream)
err.addFilter(LogLevelSetFilter({logging.ERROR}))
out.addFilter(LogLevelSetFilter(
{logging.WARNING, logging.INFO, logging.DEBUG}))
self.config_handler(err)
self.config_handler(out)
def config_buffer(self):
log_stream = StringIO()
self.config_stream(log_stream)
return log_stream
def config_file(self, file_name):
return self.config_handler(logging.FileHandler(file_name))
def config_basic(self):
logging.basicConfig(format=self.log_format, level=self.level)
def capture(self,
stdout_logger=logging.getLogger('STDOUT'),
stderr_logger=logging.getLogger('STDERR')):
if stdout_logger is not None:
sys.stdout = LoggerStream(stdout_logger, logging.INFO)
if stderr_logger is not None:
sys.stderr = LoggerStream(stderr_logger, logging.INFO)
class loglevel(object):
"""Object used with a ``with`` scope that sets the logging level temporarily
and sets it back.
Example::
with loglevel(__name__):
logger.debug('test')
with loglevel(['zensols.persist', 'zensols.config'], init=True):
logger.debug('test')
"""
def __init__(self, name: Union[List[str], str, None] = '',
level: int = logging.DEBUG, init: Union[bool, int] = None,
enable: bool = True):
"""Configure the temporary logging setup.
:param name: the name of the logger to set, or if a list is passed,
configure all loggers in the list; if a string, configure
all logger names split on spaces; if ``None`` or
``False``, do not configure anything (handy for REPL
prototyping); default to the root logger to log everything
:param level: the logging level, which defaults to :obj:`logging.DEBUG`
:param init: if not ``None``, initialize logging with
:func:`logging.basicConfig` using the given level or
``True`` to use :obj:`logging.WARNING`
:param enable: if ``False``, disable any logging configuration changes
for the block
"""
if name is None or not name:
name = ()
elif isinstance(name, str):
name = name.split()
if enable:
self.loggers = tuple(map(logging.getLogger, name))
else:
self.loggers = ()
self.initial_levels = tuple(map(lambda lg: lg.level, self.loggers))
self.level = level
if init is not None and enable:
if init is True:
init = logging.WARNING
logging.basicConfig(level=init)
def __enter__(self):
for lg in self.loggers:
lg.setLevel(self.level)
def __exit__(self, type, value, traceback):
for lg, lvl in zip(self.loggers, self.initial_levels):
lg.setLevel(lvl) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/util/log.py | log.py |
__author__ = 'Paul Landes'
from typing import Union, Iterable, Optional
from dataclasses import dataclass, field
import logging
from logging import Logger
from pathlib import Path
import subprocess
from subprocess import Popen
from zensols.util import StreamLogDumper
logger = logging.getLogger(__name__)
@dataclass
class Executor(object):
"""Run a process and log output. The process is run in the foreground by
default, or background. If the later, a process object is returned from
:meth:`run`.
"""
logger: Logger = field()
"""The client logger used to log output of the process."""
dry_run: bool = field(default=False)
"""If ``True`` do not do anything, just log as if it were to act/do
something.
"""
check_exit_value: int = field(default=0)
"""Compare and raise an exception if the exit value of the process is not
this number, or ``None`` to not check.
"""
timeout: int = field(default=None)
"""The wait timeout in :meth:`wait`."""
async_proc: bool = field(default=False)
"""If ``True``, return a process from :meth:`run`, which calls
:meth:`wait`.
"""
working_dir: Path = field(default=None)
"""Used as the `cwd` when creating :class:`.Popen`.
"""
def __call__(self, cmd: Union[str, Iterable[str], Path]) -> \
Optional[Union[Popen, int]]:
"""Run a command.
:see: :meth:`.run`
"""
return self.run(cmd)
def run(self, cmd: Union[str, Iterable[str], Path]) -> \
Optional[Union[Popen, int]]:
"""Run a commmand.
:param cmd: either one string, a sequence of arguments or a path (see
:class:`subprocess.Popen`)
:return: the process if :obj:`async_proc` is ``True``, otherwise,
the exit status of the subprocess
"""
if logger.isEnabledFor(logging.INFO):
if isinstance(cmd, (tuple, list)):
cmd_str = ' '.join(cmd)
else:
cmd_str = str(cmd)
logger.info(f'system <{cmd_str}>')
if not self.dry_run:
params = {'shell': isinstance(cmd, (str, Path)),
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE}
if self.working_dir is not None:
params['cwd'] = str(self.working_dir)
proc = Popen(cmd, **params)
StreamLogDumper.dump(proc.stdout, proc.stderr, self.logger)
if self.async_proc:
return proc
else:
return self.wait(proc)
def wait(self, proc: Popen) -> int:
"""Wait for process ``proc`` to end and return the processes exit value.
"""
ex_val = self.check_exit_value
proc.wait(self.timeout)
ret = proc.returncode
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'exit value: {ret} =? {ex_val}')
if ex_val is not None and ret != ex_val:
raise OSError(f'command returned with {ret}, expecting {ex_val}')
return ret | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/util/executor.py | executor.py |
__author__ = 'Paul Landes'
from typing import ClassVar, Union
import logging
from pathlib import Path
from io import TextIOBase
import sys
logger = logging.getLogger(__name__)
class stdwrite(object):
"""Capture standard out/error.
"""
def __init__(self, stdout: TextIOBase = None, stderr: TextIOBase = None):
"""Initialize.
:param stdout: the data sink for stdout (i.e. :class:`io.StringIO`)
:param stdout: the data sink for stderr
"""
self._stdout = stdout
self._stderr = stderr
def __enter__(self):
self._sys_stdout = sys.stdout
self._sys_stderr = sys.stderr
if self._stdout is not None:
sys.stdout = self._stdout
if self._stderr is not None:
sys.stderr = self._stderr
def __exit__(self, type, value, traceback):
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = self._sys_stdout
sys.stderr = self._sys_stderr
class stdout(object):
'''Write to a file or standard out. This is desigend to be used in command
line application (CLI) applications with the :mod:`zensols.cli` module.
Application class can pass a :class:`pathlib.Path` to a method with this
class.
Example::
def write(self, output_file: Path = Path('-')):
"""Write data.
:param output_file: the output file name, ``-`` for standard out, or
``+`` for a default
"""
with stdout(output_file, recommend_name='unknown-file-name',
extension='txt', capture=True, logger=logger):
print('write data')
'''
STANDARD_OUT_PATH: ClassVar[str] = '-'
"""The string used to indicate to write to standard out."""
FILE_RECOMMEND_NAME: ClassVar[str] = '+'
"""The string used to indicate to use the recommended file name."""
def __init__(self, path: Union[str, Path] = None, extension: str = None,
recommend_name: str = 'unnamed', capture: bool = False,
logger: logging.Logger = logger, open_args: str = 'w'):
"""Initailize where to write. If the path is ``None`` or its name is
:obj:`STANDARD_OUT_PATH`, then standard out is used instead of opening a
file. If ``path`` is set to :obj:`FILE_RECOMMEND_NAME`, it is
constructed from ``recommend_name``. If no suffix (file extension) is
provided for ``path`` then ``extesion`` is used if given.
:param path: the path to write, or ``None``
:param extension: the extension (sans leading dot ``.``) to postpend to
the path if one is not provied in the file name
:param recommend_name: the name to use as the prefix if ``path`` is not
provided
:param capture: whether to redirect standard out (:obj:`sys.stdout`) to
the file provided by ``path`` if not already indicated
to be standard out
:param logger: used to log the successful output of the file, which
defaults to this module's logger
:param open_args: the arguments given to :func:`open`, which defaults to
``w`` if none are given
"""
path = Path(path) if isinstance(path, str) else path
if path is None or self.is_stdout(path):
path = None
elif (path is not None and
path.name == self.FILE_RECOMMEND_NAME and
recommend_name is not None):
path = Path(recommend_name)
if path is None:
self._path = None
self._args: str = None
else:
if len(path.suffix) == 0 and extension is not None:
path = path.parent / f'{path.name}.{extension}'
self._path: Path = path
self._args: str = open_args
self._logger: logging.Logger = logger
self._capture: bool = capture
self._stdwrite: stdwrite = None
@classmethod
def is_stdout(self, path: Path) -> bool:
"""Return whether the path indicates to use to standard out."""
return path.name == self.STANDARD_OUT_PATH
def __enter__(self):
if self._path is None:
self._sink = sys.stdout
self._should_close = False
else:
self._sink = open(self._path, self._args)
self._should_close = True
if self._capture:
self._stdwrite = stdwrite(self._sink)
self._stdwrite.__enter__()
return self._sink
def __exit__(self, type, value, traceback):
self._sink.flush()
should_log: bool = False
if self._should_close:
try:
if self._stdwrite is not None:
self._stdwrite.__exit__(None, None, None)
self._sink.close()
should_log = value is None and \
self._logger.isEnabledFor(logging.INFO) and \
self._path is not None
except Exception as e:
logger.error(f'Can not close stream: {e}', e)
if should_log:
self._logger.info(f'wrote: {self._path}') | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/util/std.py | std.py |
from __future__ import annotations
__author__ = 'Paul Landes'
from typing import ClassVar, Union, Any, Set, Tuple, Iterable, Type, List
from enum import Enum, auto
from ..util import APIError
import re
class IntegerSelectionError(APIError):
"""Raised for errors parsing or working with :class:`.IntegerSelection`.
"""
pass
class Kind(Enum):
"""The kind of integer selection provided by :class:`.IntegerSelection`.
"""
single = auto()
list = auto()
interval = auto()
@staticmethod
def from_class(cls: Type) -> Kind:
kind: str = {
int: Kind.single,
list: Kind.list,
tuple: Kind.interval
}.get(cls)
if kind is None:
raise IntegerSelectionError(f'Unknown selection kind: {cls}')
return kind
class IntegerSelection(object):
"""Parses an string that selects integers. These (:obj:`kind`) include:
* :obj:`Kind.single`: ``<int>``: a singleton integers
* :obj:`Kind.interval`: ``<int>-<int>``: all the integers in the inclusive
interval
* :obj:`Kind.list`: ``<int>,<int>,...``: a comma separated list (space
optional)
To use, create it with :meth:`from_string` and use :meth:`tuple`,
:meth:`list`, then use as an iterable.
"""
INTERVAL_DELIM: ClassVar[str] = ':'
_DICTABLE_ATTRIBUTES: ClassVar[Set[str]] = {'kind'}
_INTEGER_REGEX: ClassVar[re.Pattern] = re.compile(r'^[-]?\d+$')
_INTERVAL_REGEX: ClassVar[re.Pattern] = re.compile(
r'^(\d+?)' + INTERVAL_DELIM + r'(\d+)$')
_LIST_REGEX: ClassVar[re.Pattern] = re.compile(r'^\d+(?:,\s*\d+)+$')
def __init__(self, raw: str) -> IntegerSelection:
"""Parse an integer selection from string ``raw``."""
v: Any = None
if self._INTEGER_REGEX.match(raw):
v = int(raw)
if v is None:
m: re.Match = self._INTERVAL_REGEX.match(raw)
if m is not None:
v = int(m.group(1)), int(m.group(2))
if v is None and self._LIST_REGEX.match(raw) is not None:
v = list(map(int, re.split(r'\s*,\s*', raw)))
if v is None:
raise IntegerSelectionError(f"Bad selection format: '{raw}'")
self._select = v
@property
def selection(self) -> Union[int, Tuple[int, int], Tuple[int]]:
"""The selection data based on what was parsed in the initializer (see
class docs).
"""
return self._select
@property
def kind(self) -> Kind:
"""The kind of selection (see class docs)."""
return Kind.from_class(type(self.selection))
def select(self, arr: Tuple[Any, ...]) -> List[Any, ...]:
"""Return element(s) ``arr`` based on the :obj:`selection`.
"""
if self.kind == Kind.single:
return [arr[self.selection]]
elif self.kind == Kind.interval:
return arr[self.selection[0]:self.selection[1]]
else:
return list(map(lambda i: arr[i], self.selection))
def __call__(self, arr: Tuple[Any, ...]) -> Union[Any, List[Any, ...]]:
"""See :meth:`select`."""
return self.select(arr)
def __iter__(self) -> Iterable[int]:
return {
Kind.single: lambda: iter((self.selection,)),
Kind.interval: lambda: iter(
range(self.selection[0], self.selection[1] + 1)),
Kind.list: lambda: iter(self.selection),
}[self.kind]()
def __len__(self) -> int:
return sum(1 for _ in self)
def __str__(self) -> str:
return str(self.selection) | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/introspect/intsel.py | intsel.py |
__author__ = 'Paul Landes'
from typing import Any, Tuple, Type, Sequence, Dict, ClassVar
from types import ModuleType
from abc import ABC
import logging
import importlib
from functools import reduce
import textwrap
import re
logger = logging.getLogger(__name__)
class ClassImporterError(Exception):
"""Raised for any run time exceptions during resolving and instantiating
classes with :class:`.ClassImporter`.
"""
pass
class ClassImporter(object):
"""Utility class that reloads a module and instantiates a class from a string
class name. This is handy for prototyping code in a Python REPL.
"""
_CLASS_REGEX: ClassVar[re.Pattern] = re.compile(
r'^([a-zA-Z0-9_.]+)\.([a-zA-Z_][a-zA-Z0-9_]*)$')
def __init__(self, class_name: str, reload: bool = True):
"""Initialize with the class name.
:param class_name: the fully qualifed name of the class (including the
module portion of the class name)
:param reload: if ``True`` then reload the module before returning the
class
"""
self.class_name = class_name
self.reload = reload
@classmethod
def is_valid_class_name(cls: Type, class_name: str) -> bool:
"""Return whether a string represents a valid class name."""
return cls._CLASS_REGEX.match(class_name) is not None
@staticmethod
def full_classname(cls: Type) -> str:
"""Return a fully qualified class name string for class ``cls``.
"""
module = cls.__module__
if module is None or module == str.__class__.__module__:
return cls.__name__
else:
return module + '.' + cls.__name__
@staticmethod
def get_module(name: str, reload: bool = False) -> ModuleType:
"""Return the module that has ``name``.
:param name: the string name, which can have dots (``.``) to for sub
modules
"""
pkg_s = name.split('.')
mod = reduce(lambda m, n: getattr(m, n), pkg_s[1:], __import__(name))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mod: {mod}, reloading: {reload}')
if reload:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'reload: cls: {mod}')
mod = importlib.reload(mod)
return mod
def parse_module_class(self) -> Sequence[str]:
"""Parse the module and class name part of the fully qualifed class name.
"""
cname: str = self.class_name
match: re.Match = re.match(self._CLASS_REGEX, cname)
if not match:
raise ClassImporterError(
f'Not a fully qualified class name: {cname}')
return match.groups()
def get_module_class(self, resolve_module: bool = False) -> \
Tuple[ModuleType, Type]:
"""Return the module and class as a tuple of the given class in the
initializer.
:param resolve_module: if ``True`` then resolve the module from the
class rather than the module portion of the
:obj:`class_name` string
:return: a tuple of the module and class represented by
:obj:`class_name`
"""
mod_name, cname = self.parse_module_class()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mod_name: {mod_name}, class: {cname}')
mod = self.get_module(mod_name, self.reload)
if not hasattr(mod, cname):
raise ClassImporterError(
f"No class '{cname}' found in module '{mod}'")
cls = getattr(mod, cname)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'class: {cls}')
if resolve_module:
mod = self.get_module(cls.__module__, self.reload)
return mod, cls
def get_class(self) -> Type:
"""Return the given class in the initializer.
"""
return self.get_module_class()[1]
def get_class_or_global(self) -> Type:
"""Like :meth:`get_class` but try globals if the class isn't fully
qualified (i.e. sans module).
"""
if self.is_valid_class_name(self.class_name):
return self.get_class()
else:
cls = globals().get(self.class_name)
if cls is None:
raise ClassImporterError(
'Not a fully qualified class name and not in globals: ' +
self.class_name)
return cls
def _bless(self, inst: Any) -> Any:
"""A template method to modify a nascent instance just created. The returned
instance is the instance used.
This base class implementation just returns ``inst``.
:param inst: the instance to bless
:return: the instance to returned and used by the client
"""
return inst
def instance(self, *args, **kwargs):
"""Create an instance of the specified class in the initializer.
:param args: the arguments given to the initializer of the new class
:param kwargs: the keyword arguments given to the initializer of the
new class
"""
cls = self.get_class()
try:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'class importer creating instance of {cls}')
inst = cls(*args, **kwargs)
inst = self._bless(inst)
except Exception as e:
llen = 200
kwstr = textwrap.shorten(f'{args}, {kwargs}', llen)
msg = f'Can not instantiate {cls}({kwstr})'
logger.error(msg)
raise e
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'inst class: {type(inst)}')
return inst
def set_log_level(self, level: int = logging.INFO):
"""Convenciene method to set the log level of the module given in the
initializer of this class.
:param level: a logging level in :mod:`logging`
"""
mod, cls = self.parse_module_class()
logging.getLogger(mod).setLevel(level)
class ClassResolver(ABC):
"""Used to resolve a class from a string.
"""
@staticmethod
def full_classname(cls: type) -> str:
"""Return a fully qualified class name string for class ``cls``.
"""
return ClassImporter.full_classname(cls)
def find_class(self, class_name: str) -> Type:
"""Return a class given the name of the class.
:param class_name: represents the class name, which might or might not
have the module as part of that name
"""
pass
class DictionaryClassResolver(ClassResolver):
"""Resolve a class name from a list of registered class names without the
module part. This is used with the ``register`` method on
``ConfigFactory``.
:see: ConfigFactory.register
"""
def __init__(self, instance_classes: Dict[str, type]):
self.instance_classes = instance_classes
def find_class(self, class_name: str) -> Type:
classes = {}
classes.update(globals())
classes.update(self.instance_classes)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'looking up class: {class_name}')
if class_name not in classes:
raise ClassImporterError(
f'Class {class_name} is not registered in factory {self}')
cls = classes[class_name]
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'found class: {cls}')
return cls | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/introspect/imp.py | imp.py |
__author__ = 'Paul Landes'
from typing import List, Tuple, Dict, Any, Type, Optional, ClassVar
from dataclasses import dataclass, field
import dataclasses
import logging
from collections import OrderedDict
import re
import ast
import inspect
from inspect import Parameter, Signature
from pathlib import Path
from . import ClassImporter, IntegerSelection
logger = logging.getLogger(__name__)
class ClassError(Exception):
"""Raised by :class:`.ClassInspector.` when a class can not be inspected or
parsed by :mod:`ast`.
"""
pass
def _create_data_types() -> Dict[str, Type]:
types = {t.__name__: t for t in
[str, int, float, bool, list, dict, Path, IntegerSelection]}
types['pathlib.Path'] = Path
return types
DEFAULT_DATA_TYPES: Dict[str, Type] = _create_data_types()
@dataclass
class TypeMapper(object):
"""A utility class to map string types parsed from :class:`.ClassInspector`
to Python types.
"""
DEFAULT_DATA_TYPES: ClassVar[Dict[str, Type]] = _create_data_types()
"""Supported data types mapped from data class fields."""
cls: Type = field()
"""The class to map."""
data_types: Dict[str, Type] = field(
default_factory=lambda: DEFAULT_DATA_TYPES)
"""Data type mapping for this instance."""
default_type: Type = field(default=str)
"""Default type for when no type is given."""
allow_class: bool = field(default=True)
"""Whether or not to allow classes acceptable types. When the mapper
encouters these classes, the class is loaded from the module and returned as
a type.
"""
def _try_class(self, stype: str) -> Type:
"""Try to resolve ``stype`` as class."""
mod = self.cls.__module__
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'module: {mod}')
class_name = f'{mod}.{stype}'
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'trying to load {class_name}')
ci = ClassImporter(class_name, reload=False)
cls: type = ci.get_class()
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'successfully loaded class {cls}')
return cls
def map_type(self, stype: str) -> Type:
tpe: Optional[Type]
if stype is None:
tpe = self.default_type
else:
tpe = self.data_types.get(stype)
if tpe is None and self.allow_class:
try:
tpe = self._try_class(stype)
except Exception as e:
logger.error(f'Could not narrow to class: {stype}: {e}',
exc_info=True)
if tpe is None:
raise ClassError(f'Non-supported data type: {stype}')
return tpe
@dataclass(eq=True)
class ClassDoc(object):
"""A meta data for documentation at any level of the class code (methods etc).
"""
PARAM_REGEX = re.compile(r'^\s*:param ([^:]+):\s*(.+)$')
"""Matches :param: documentation."""
text: str = field()
"""The text of the documentation."""
params: Dict[str, str] = field(default=None)
"""The parsed parameter documentation."""
def __post_init__(self):
doc, params = self._parse_params(self.text)
if doc is not None:
doc = doc.strip()
if len(doc) == 0:
doc = None
self.text = doc
self.params = params
def _parse_params(self, text: str) -> Dict[str, str]:
doc_lines = []
params: Dict[str, List[str]] = {}
last_param: List[str] = None
param_sec = False
for line in text.split('\n'):
line = line.strip()
if len(line) > 0:
m = self.PARAM_REGEX.match(line)
if m is None:
if param_sec:
last_param.append(line)
else:
doc_lines.append(line)
else:
name, doc = m.groups()
last_param = [doc]
params[name] = last_param
param_sec = True
param_doc = {}
for k, v in params.items():
param_doc[k] = ' '.join(v)
doc = ' '.join(doc_lines)
return doc, param_doc
@dataclass(eq=True)
class ClassParam(object):
"""Represents a :class:`dataclasses.dataclass` field.
"""
name: str = field()
"""The name of the field."""
dtype: type = field()
"""The data type."""
doc: ClassDoc = field()
"""The documentation of the field."""
@dataclass(eq=True)
class ClassField(ClassParam):
"""Represents a :class:`dataclasses.dataclass` field.
"""
kwargs: Dict[str, Any] = field()
"""The field arguments."""
@property
def default(self) -> Any:
if self.kwargs is not None:
return self.kwargs.get('default')
@dataclass(eq=True)
class ClassMethodArg(ClassParam):
"""Meta data for an argument in a method.
"""
default: str = field()
"""The default if any, otherwise ``None``."""
is_positional: bool = field()
"""``True`` is the argument is positional vs. a keyword argument."""
@dataclass(eq=True)
class ClassMethod(object):
"""Meta data for a method in a dataclass.
"""
name: str = field()
"""The name of the method."""
doc: ClassDoc = field()
"""The docstring of the method."""
args: Tuple[ClassMethodArg, ...] = field()
"""The arguments of the method."""
@dataclass(eq=True)
class Class(object):
class_type: type = field()
"""The class that was inspected."""
doc: ClassDoc = field()
"""The docstring of the class."""
fields: Dict[str, ClassField] = field()
"""The fields of the class."""
methods: Dict[str, ClassMethod] = field()
"""The methods of the class."""
@property
def name(self) -> str:
"""The fully qualified class name."""
return ClassImporter.full_classname(self.class_type)
@property
def is_dataclass(self) -> bool:
"""Whether or not the class is a :class:`dataclasses.dataclass`."""
return dataclasses.is_dataclass(self.class_type)
@dataclass
class ClassInspector(object):
"""A utility class to return all :class:`dataclasses.dataclass` attribute
(field) documentation.
"""
INSPECT_META: ClassVar[str] = 'CLASS_INSPECTOR'
"""Attribute to set to indicate to traverse superclasses as well. This is
set as an empty ``dict`` to allow future implementations to filter on what's
traversed (i.e. ``include_fields``).
"""
DECORATOR_META: ClassVar[str] = 'CLASS_DECORATOR'
"""Attribute to set which must be a :class:`builtins.dict` with the
following keys:
* ``includes``: as a set of decorator names that can be set on methods to
indicate inclusion on introspected method set. Otherwise the decorated
method (such as `@property`) is omitted from the class metadata
"""
cls: type = field()
"""The class to inspect."""
attrs: Tuple[str, ...] = field(default=None)
"""The class attributes to inspect, or all found are returned when ``None``.
"""
data_type_mapper: TypeMapper = field(default=None)
"""The mapper used for narrowing a type from a string parsed from the Python
AST.
"""
include_private: bool = field(default=False)
"""Whether to include private methods that start with ``_``."""
include_init: bool = field(default=False)
"""Whether to include the ``__init__`` method."""
strict: str = field(default='y')
"""Indicates what to do for undefined or unsupported structures.
One of:
* y: raise errors
* n: ignore
* w: log as warning
"""
def __post_init__(self):
self.data_type_mapper = TypeMapper(self.cls)
def _get_class_node(self) -> ast.AST:
fname = inspect.getfile(self.cls)
logger.debug(f'parsing source file: {fname}')
with open(fname, 'r') as f:
fstr = f.read()
for node in ast.walk(ast.parse(fstr)):
if isinstance(node, ast.ClassDef):
if node.name == self.cls.__name__:
return node
def _map_default(self, item: str, def_node: ast.AST):
"""Map a default from what will be at times an :class:`ast.Name`. This happens
when an enum is used as a type, but ``name.id`` only gives the enum
class name and not the enum value.
:param item: mapped target string used to create an error message
:param def_node: the node to map a default
"""
def map_arg(node):
if isinstance(node.value, str):
return f"'{node.value}'"
else:
return str(node.value)
try:
if isinstance(def_node, ast.Attribute):
enum_name: str = def_node.attr
cls: type = self.data_type_mapper.map_type(def_node.value.id)
if hasattr(cls, '__members__'):
default = cls.__members__[enum_name]
else:
msg = f'No default found for class: {cls}.{enum_name}'
if self.strict == 'y':
raise ClassError(msg)
elif self.strict == 'w' and \
logger.isEnabledFor(logging.WARN):
logger.warning(msg)
default = None
# ast.Num and ast.Str added for Python 3.7 backward compat
elif isinstance(def_node, ast.Num):
default = def_node.n
elif isinstance(def_node, ast.Str):
default = def_node.s
elif isinstance(def_node, ast.Call):
func = def_node.func.id
args = map(map_arg, def_node.args)
default = f'{func}({", ".join(args)})'
try:
evald = eval(default)
default = evald
except Exception as e:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'could not invoke: {default}: {e}')
elif isinstance(def_node, ast.UnaryOp):
op = def_node.operand
default = op.value
elif isinstance(def_node, ast.Name):
default = self.data_type_mapper.map_type(def_node.id)
elif hasattr(def_node, 'value'):
default = def_node.value
else:
default = str(def_node)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'default: {default} ({type(default)})/' +
f'({type(def_node)})')
return default
except Exception as e:
raise ClassError(f'Could not map {item}: {def_node}: {e}')
def _get_args(self, node: ast.arguments) -> List[ClassMethodArg]:
args = []
defaults = node.defaults
dsidx = len(node.args) - len(defaults)
for i, arg in enumerate(node.args):
name = arg.arg
try:
dtype = None
is_positional = True
default = None
didx = i - dsidx
if didx >= 0:
default = self._map_default(f'arg {arg}', defaults[didx])
is_positional = False
if arg.annotation is not None:
if isinstance(arg.annotation, ast.Subscript):
dtype = arg.annotation.value.id
else:
dtype = arg.annotation.id
mtype = self.data_type_mapper.map_type(dtype)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mapped {name}:{dtype} -> {mtype}, ' +
f'default={default}')
arg = ClassMethodArg(name, mtype, None, default, is_positional)
except Exception as e:
raise ClassError(f'Could not map argument {name}: {e}')
args.append(arg)
return args
def _get_method(self, node: ast.FunctionDef) -> ClassMethod:
method: ClassMethod = None
decorators = filter(lambda n: n not in self._decorator_includes,
map(lambda n: hasattr(n, 'id') and n.id,
node.decorator_list))
decorators = tuple(decorators)
name: str = node.name
is_priv: bool = name.startswith('_')
is_prop: bool = any(decorators)
# only public methods (not properties) are parsed for now
if not is_prop and (self.include_private or not is_priv):
args = self._get_args(node.args)
node = None if len(node.body) == 0 else node.body[0]
# parse the docstring for instance methods only
if (node is not None) and (len(args) > 0) and \
(args[0].name == 'self'):
args = args[1:]
else:
args = ()
if isinstance(node, ast.Expr) and \
isinstance(node.value, ast.Constant):
doc = ClassDoc(node.value.value)
# ast.Str added for Python 3.7 backward compat
elif isinstance(node, ast.Expr) and \
isinstance(node.value, ast.Str):
doc = ClassDoc(node.value.s)
else:
doc = None
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'doc: {name}: {doc}')
method = ClassMethod(name, doc, args)
# copy the parsed parameter doc found in the method doc to the
# argument meta data
if (method.doc is not None) and \
(method.doc.params is not None) and \
(method.args is not None):
for arg in method.args:
param = method.doc.params.get(arg.name)
if (param is not None) and (arg.doc is None):
arg.doc = ClassDoc(param, None)
return method
def _get_inspect_method(self, cls: Type, meth_name: str) -> ClassMethod:
mems = filter(lambda t: t[0] == meth_name, inspect.getmembers(cls))
for mem_name, mem in mems:
sig: Signature = inspect.signature(mem)
meth_args: List[ClassMethodArg] = []
for param_name, param in sig.parameters.items():
if param_name == 'self':
continue
positional = param.kind == Parameter.POSITIONAL_ONLY
meth_args.append(ClassMethodArg(
name=param.name,
dtype=None if param.annotation == Parameter.empty else param.annotation,
doc=None,
default=None if param.default == Parameter.empty else param.default,
is_positional=positional))
return ClassMethod(
name=mem_name,
doc=inspect.cleandoc(inspect.getdoc(mem)),
args=tuple(meth_args))
def _get_class(self, cls: Type) -> Class:
"""Return a dict of attribute (field) to metadata and docstring.
"""
attrs = self.attrs
if attrs is None:
attrs = tuple(filter(lambda i: i[:1] != '_', cls.__dict__.keys()))
cnode: ast.Node = self._get_class_node()
fields: List[ClassField] = []
methods: List[ClassMethod] = []
for node in cnode.body:
# parse the dataclass attribute/field defintion
if isinstance(node, ast.AnnAssign) and \
isinstance(node.annotation, (ast.Name, ast.Subscript)):
str_dtype: str
if isinstance(node.annotation, ast.Name):
str_dtype = node.annotation.id
elif isinstance(node.annotation, ast.Subscript):
str_dtype = node.annotation.value.id
name: str = node.target.id
dtype: type = self.data_type_mapper.map_type(str_dtype)
item: str = f"kwarg: '{name}'"
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'mapped dtype {name} {str_dtype} -> {dtype}')
if node.value is not None and hasattr(node.value, 'keywords'):
kwlst: List[ast.keyword] = node.value.keywords
kwargs = {k.arg: self._map_default(item, k.value)
for k in kwlst}
fields.append(ClassField(name, dtype, None, kwargs))
# parse documentation string right after the dataclass field
elif (isinstance(node, ast.Expr) and
isinstance(node.value, ast.Constant) and
len(fields) > 0):
doc = ClassDoc(node.value.value)
last_field: ClassField = fields[-1]
if last_field.doc is None:
last_field.doc = doc
# ast.Str added for Python 3.7 backward compat
elif (isinstance(node, ast.Expr) and
isinstance(node.value, ast.Str) and
len(fields) > 0):
doc = ClassDoc(node.value.s)
last_field: ClassField = fields[-1]
if last_field.doc is None:
last_field.doc = doc
# parse the method
elif isinstance(node, ast.FunctionDef):
try:
meth = self._get_method(node)
except Exception as e:
raise ClassError(
f'could not parse method in {node}', e)
if meth is not None:
methods.append(meth)
elif isinstance(node, ast.AnnAssign):
if self.strict == 'w' and logger.isEnabledFor(logging.WARNING):
logger.warning(f'assign: {node.target.id}, {node.annotation}')
else:
msg = f'not processed node: {type(node)}: {node.value}'
if self.strict == 'w' and logger.isEnabledFor(logging.WARNING):
logger.warning(msg)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(msg)
if self.include_init:
meth = self._get_inspect_method(cls, '__init__')
if meth is not None:
methods.append(meth)
field_dict = OrderedDict()
meth_dict = OrderedDict()
for f in fields:
field_dict[f.name] = f
for m in methods:
meth_dict[m.name] = m
return Class(
cls,
None if self.cls.__doc__ is None else ClassDoc(self.cls.__doc__),
fields=field_dict,
methods=meth_dict)
def _get_super_class(self, cls: Type) -> List[Class]:
"""Traverse all superclasses of ``cls``.
"""
supers = filter(lambda c: c is not object and c is not cls, cls.mro())
classes = []
for cls in supers:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'traversing super class: {cls}')
ci = self.__class__(cls)
clmeta = ci.get_class()
classes.append(clmeta)
return classes
def get_class(self) -> Class:
"""Return a dict of attribute (field) to metadata and docstring.
"""
if hasattr(self.cls, self.DECORATOR_META):
meta: Dict[str, Any] = getattr(self.cls, self.DECORATOR_META)
self._decorator_includes = meta.get('includes', set())
else:
self._decorator_includes = set()
cls = self._get_class(self.cls)
if hasattr(self.cls, self.INSPECT_META):
meta: Dict[str, str] = getattr(self.cls, self.INSPECT_META)
if not isinstance(meta, dict):
raise ClassError(
f'{self.INSPECT_META} must be a dict in {self.cls}' +
f'but got type: {type(meta)}')
superclasses = self._get_super_class(self.cls)
superclasses.reverse()
superclasses.append(cls)
for sc in superclasses:
cls.fields.update(sc.fields)
cls.methods.update(sc.methods)
return cls | zensols.util | /zensols.util-1.13.1-py3-none-any.whl/zensols/introspect/insp.py | insp.py |
__author__ = 'Paul Landes'
from typing import List, Dict, Union, Any, Type, Optional
import logging
import re
from zensols.zotsite import (
ZoteroApplicationError, Library, Visitor,
ZoteroObject, ItemMapper, Item, Note,
)
logger = logging.getLogger(__name__)
class NavCreateVisitor(Visitor):
"""This class creates the data structure used by the Javascript navigation
widget in the created website.
"""
ITEM_ICONS = {'computerProgram': 'floppy-disk',
'conferencePaper': 'file',
'journalArticle': 'file',
'attachment': 'paperclip',
'bookSection': 'book',
'book': 'book',
'report': 'font',
'webpage': 'bookmark',
'thesis': 'education',
'patent': 'certificate',
'blogPost': 'pencil'}
UPPER = re.compile(r'([A-Z][a-z]+)')
PDF_EXT_REGEXP = re.compile(r'.*\.pdf$')
PDF_FULL_REGEXP = re.compile(r'^.*Full\s*[tT]ext PDF')
CAPS_META_KEYS = set('url'.split())
def __init__(self, lib: Library, item_mapper: ItemMapper):
"""Initialize the visitor object.
:param lib: the object graph returned from
``DatabaseReader.get_library``.
:param item_mapper: used for file name substitution so the widget uses
the correct names (i.e. underscore substitution)
"""
self._item_mapper = item_mapper
self._root = {'nodes': []}
self._parents = [self._root]
@classmethod
def _sort_nodes(cls: Type, lst: List[Dict[str, Any]],
by: str = 'item_title'):
"""Sort the nodes in the root node. The default is to sort by item
title.
"""
assert type(lst) == list
lst.sort(key=lambda n: n[by])
for n in lst:
if 'nodes' in n:
cls._sort_nodes(n['nodes'], by)
@property
def primary_roots(self) -> List[Dict[str, Any]]:
"""The (root level) collections."""
node: Dict[str, Union[str, List]] = self._root['nodes'][0]
if 'nodes' not in node:
raise ZoteroApplicationError(
'No collections found; maybe too restrictive collections ' +
'regular expression?')
target: List[Dict[str, Any]] = node['nodes']
self._sort_nodes(target)
return target
def icon_name(self, node) -> str:
"""Return the name of the icon name for ``node``."""
icon_name = None
if isinstance(node, Item):
if node.type in self.ITEM_ICONS:
icon_name = self.ITEM_ICONS[node.type]
else:
# :(
logger.warning(f'no such icon found for {node.type}')
icon_name = 'unchecked'
elif isinstance(node, Note):
icon_name = 'text-background'
return icon_name
def _munge_meta_key(self, name: str) -> str:
if name in self.CAPS_META_KEYS:
name = name.upper()
elif not name.isupper():
parts = re.split(self.UPPER, name)
parts = filter(lambda s: len(s) > 0, parts)
parts = map(lambda s: s.capitalize(), parts)
name = ' '.join(parts)
return name
def _node_metadata(self, item: Item) -> Optional[Dict[str, Any]]:
meta = item.metadata
if meta is not None:
mdarr = []
for k, v in meta.items():
k = self._munge_meta_key(k)
mdarr.append((k, v))
return mdarr
def _find_child_resource(self, item: Item, pat: re.Pattern):
res = tuple(filter(lambda p: p is not None and pat.match(p),
map(lambda c: self._item_mapper.get_resource_name(c),
item.children)))
if len(res) == 1:
return res[0]
def _find_child_name(self, item: Item, pat: re.Pattern):
res = tuple(filter(lambda p: p is not None and pat.match(p),
map(lambda c: c.name, item.children)))
if len(res) > 0:
for c in item.children:
if c.name == res[0]:
return self._item_mapper.get_resource_name(c)
def _create_node(self, item: Item) -> Dict[str, Any]:
"""Create a node for an item."""
node = {'text': item.title,
'item-id': item.id,
'nodes': []}
icon = self.icon_name(item)
if icon:
node['icon'] = 'glyphicon glyphicon-{}'.format(icon)
node['item_title'] = item.title
node['item_type'] = item.type
node['item_note'] = item.note
node['node_type'] = item.__class__.__name__.lower()
if isinstance(item, Item):
meta = self._node_metadata(item)
creators = item.creators
if meta is not None:
node['metadata'] = meta
res = self._item_mapper.get_resource_name(item)
if res is None:
res = self._find_child_resource(item, self.PDF_EXT_REGEXP)
if res is None:
res = self._find_child_name(item, self.PDF_FULL_REGEXP)
if res is not None:
node['resource'] = res
if creators is not None:
if meta is None:
meta = []
node['metadata'] = meta
meta.append(('Creators', ', '.join(map(str, creators))))
if meta is not None:
meta.sort()
return node
def enter_parent(self, parent: ZoteroObject):
new_par: Dict[str, Any] = self._create_node(parent)
cur_par: Dict[str, List[Dict]] = self._parents[-1]
cur_par['nodes'].append(new_par)
self._parents.append(new_par)
def visit_child(self, child: ZoteroObject):
pass
def leave_parent(self, parent: ZoteroObject):
node = self._parents.pop()
if len(node['nodes']) == 0:
del node['nodes']
else:
node['selectable'] = False | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/navvisitor.py | navvisitor.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
import re
import logging
from pathlib import Path
from . import SiteCreator
logger = logging.getLogger(__name__)
@dataclass
class Application(object):
"""This project exports your local Zotero library to a usable HTML website.
"""
site_creator: SiteCreator = field()
"""Creates the Zotero content web site."""
prune_pattern: str = field(default=None)
"""A regular expression used to filter ``Collection`` nodes."""
def _prepare_creator(self, output_dir: Path) -> Path:
if output_dir is not None:
self.site_creator.out_dir = output_dir
else:
output_dir = self.site_creator.out_dir
if self.prune_pattern is not None:
pat: re.Pattern = re.compile(self.prune_pattern)
self.site_creator.prune_visitor.prune_pattern = pat
return output_dir
def _show(self, index_file: Path):
from zensols.cli import CliHarness
from zensols.showfile import ApplicationFactory, Application
harness: CliHarness = ApplicationFactory.create_harness()
app: Application = harness.get_instance('config')
logger.info(f'showing {index_file}')
app.show(str(index_file))
def export(self, output_dir: Path = None, show: bool = False):
"""Generate and export the Zotero website.
:param output_dir: the directory to dump the site; default to
configuration file
:param show: whether to browse to the created site (needs ``pip install
zensols.showfile``)
"""
if logger.isEnabledFor(logging.INFO):
logger.info(f'exporting site: {output_dir}')
output_dir = self._prepare_creator(output_dir)
self.site_creator.export()
if show:
self._show(output_dir / 'index.html')
def print_structure(self):
"""Print (sub)collections and papers in those collections as a tree."""
self._prepare_creator(None)
self.site_creator.print_structure() | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/app.py | app.py |
__author__ = 'Paul Landes'
from abc import ABC, abstractmethod
import logging
import re
from zensols.zotsite import Item, Library
logger = logging.getLogger(__name__)
class ItemMapper(ABC):
"""Maps :class:`.Item` unique identifiers.
"""
EXT_RE = re.compile(r'.+\.(.+)?$')
def _item_to_ext(self, item: Item):
m = self.EXT_RE.match(item.path.name)
return f'.{m.group(1)}' if m is not None else ''
@abstractmethod
def get_resource_name(self, item: Item) -> str:
"""Return a resource used on the browser side for ``item``."""
pass
@abstractmethod
def get_file_name(self, item: Item) -> str:
"""Return a file path used on the browser side for ``item``."""
pass
class RegexItemMapper(ItemMapper):
"""Map by using regular expression replacements.
"""
def __init__(self, lib: Library, fmatch_re=None, repl_re=None):
self.lib = lib
if fmatch_re is not None:
self.fmatch_re = re.compile(fmatch_re)
else:
self.fmatch_re = None
if repl_re is not None:
self.repl_re = re.compile(repl_re)
else:
self.repl_re = None
def _map(self, item: Item) -> str:
"""Return the regular expression matched/modified string of ``fname``.'
"""
fname = self.lib.attachment_resource(item)
if fname is not None:
if self.fmatch_re and self.repl_re and self.fmatch_re.match(fname):
fname = self.repl_re.sub('_', fname)
return fname
def get_resource_name(self, item: Item) -> str:
return self._map(item)
def get_file_name(self, item: Item) -> str:
return self._map(item)
class IdItemMapper(ItemMapper):
"""Map by using item IDs.
"""
def __init__(self, lib: Library, fmatch_re=None, repl_re=None):
self.lib = lib
if fmatch_re is not None:
self.fmatch_re = re.compile(fmatch_re)
else:
self.fmatch_re = None
if repl_re is not None:
self.repl_re = re.compile(repl_re)
else:
self.repl_re = None
def _map(self, item: Item) -> str:
"""Return the regular expression matched/modified string of ``fname``.'
"""
if item.type == 'attachment' and item.path is not None:
ext = self._item_to_ext(item)
return f'{self.lib.storage_dirname}/{item.id}{ext}'
def get_resource_name(self, item: Item) -> str:
return self._map(item)
def get_file_name(self, item: Item) -> str:
return self._map(item) | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/itemmap.py | itemmap.py |
__author__ = 'Paul Landes'
from typing import Callable
import logging
from abc import ABC, abstractmethod
import re
from io import TextIOBase
from pathlib import Path
from zensols.cli import ApplicationError
from zensols.persist import persisted
logger = logging.getLogger(__name__)
class ZoteroApplicationError(ApplicationError):
"""Thrown for application errors meant to be reported by the command line.
"""
pass
class ZoteroObject(ABC):
"""Represents any collection, item etc. Zotero data object.
"""
def __init__(self, children):
self._children = children
@property
def children(self):
return self._children
@abstractmethod
def get_id(self):
pass
@property
def id(self):
return self.get_id()
def __str__(self):
return '{} ({})'.format(self.__format_zobj__(), self.id)
def __repr__(self):
return self.__str__()
def __format_zobj__(self):
return self.name
@property
def title(self):
return self.name
@property
def note(self):
if hasattr(self, 'sel') and 'n_note' in self.sel:
return self.sel['n_note']
def short_title(self, str_len):
"""Return the short name of this object."""
lstr = self.title
return (lstr[:str_len] + '...') if len(lstr) > str_len else lstr
@property
def type(self):
"""Return the type this item is."""
if hasattr(self, 'sel') and 'type' in self.sel:
return self.sel['type']
class Note(ZoteroObject):
"""Represents a note Zotero data object.
"""
def __init__(self, sel):
self.sel = sel
super().__init__([])
def get_id(self):
return 'n' + str(self.sel['i_id'])
@property
def title(self):
return self.sel['n_title']
@property
def name(self):
return '<{}> [note]'.format(self.title)
class Name(object):
def __init__(self, first: str, last: str):
self.first = first
self.last = last
def __str__(self):
return f'{self.first} {self.last}'
def __repr__(self):
return self.__str__()
class Item(ZoteroObject):
"""Represents an attachement object, like PDFs, site links etc.
"""
def __init__(self, sel, children):
self.sel = sel
super().__init__(children)
self.storage_pat = re.compile('^(?:storage|attachments):(.+)$')
def get_db_id(self):
return self.sel['i_id']
def get_id(self):
if not hasattr(self, '_id'):
self._id = 'i' + str(self.get_db_id())
return self._id
def set_id(self, id):
self._id = id
@property
def name(self):
meta = self.sel['meta']
name = 'none'
for k in 'shortTitle title publicationTitle'.split(' '):
if k in meta:
name = meta[k]
break
return name
@property
def metadata(self):
return self.sel.get('meta')
@property
def creators(self) -> (list, Name):
return self.sel.get('creators')
@property
@persisted('_path')
def path(self):
abs_path = None
path = self.sel['path']
if path is not None:
m = self.storage_pat.match(path)
if m is None:
# assume ZoteroFile is used
abs_path = Path(path)
if not abs_path.exists():
raise ValueError(f'unknown storage and not a file: {path}')
else:
pdir = self.sel['key']
fpart = m.group(1)
abs_path = self.lib.get_storage_path() / f'{pdir}/{fpart}'
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'pdir={pdir}, fpart={fpart}, abs={abs_path}')
return abs_path
def __format_zobj__(self):
abs_path = self.path
its = self.sel.copy()
its.update({'name': self.name, 'abs_path': abs_path})
return '{name} [{type}]{abs_path}'.format(**its)
class Container(ZoteroObject):
"""Container class holds items and sub-collections.
"""
def __init__(self, items, collections):
self.items = items
self.collections = collections
super().__init__(None)
@property
def children(self):
ret = []
ret.extend(self.collections)
ret.extend(self.items)
return ret
class Collection(Container):
"""Represents a (sub)collection, which is a container for other collections and
items.
"""
def __init__(self, sel, items, collections):
self.sel = sel
super().__init__(items, collections)
def get_id(self):
return 'c{},i{}'.format(self.sel['c_id'], self.sel['c_iid'])
@property
def name(self):
return self.sel['c_name']
class Library(Container):
"""Represents the top level object that contains the root level collections.
"""
def __init__(self, data_dir, library_id, collections):
self.data_dir = data_dir
self.library_id = library_id
self.storage_dirname = 'storage'
super().__init__([], collections)
for c in collections:
self._init_child(c)
def _init_child(self, parent):
if isinstance(parent, Item):
parent.lib = self
for c in parent.children:
self._init_child(c)
def get_storage_path(self, fname=None):
path = Path(self.data_dir, self.storage_dirname)
if fname:
path = Path(path, fname)
return path
def get_id(self):
return 'l' + str(self.library_id)
def attachment_resource(self, item):
if item.type == 'attachment':
return f'{self.storage_dirname}/{item.path}'
@property
def name(self):
return 'lib'
@property
def title(self):
if self.library_id == 1:
return 'Personal Library'
else:
return 'Library'
class Visitor(ABC):
"""The visitor in the GoF *visitor pattern*.
"""
@abstractmethod
def enter_parent(self, parent: ZoteroObject):
"""Template method for traversing down/into a node."""
pass
@abstractmethod
def visit_child(self, child: ZoteroObject):
"""Template method for visiting a node."""
pass
@abstractmethod
def leave_parent(self, parent: ZoteroObject):
"""Template method for traversing up/out of a node."""
pass
class PrintVisitor(Visitor):
"""A visitor that prints items for debugging.
"""
def __init__(self, writer: TextIOBase):
self.writer = writer
self.depth = 0
def enter_parent(self, parent: ZoteroObject):
self.writer.write(f"{' ' * (self.depth * 4)}{str(parent)} " +
f'({parent.__class__.__name__})\n')
self.depth += 1
def visit_child(self, child: ZoteroObject):
pass
def leave_parent(self, parent: ZoteroObject):
self.depth -= 1
class Walker(ABC):
"""Iterates the Zotero data and calls the visitor for each node.
"""
@abstractmethod
def walk(self, parent: ZoteroObject, visitor: Visitor):
"""Recursively traverse the object graph."""
pass
class UnsortedWalker(Walker):
"""Iterates through the Zotero visiting children in whatever order is
provided by the database.
"""
def walk(self, parent: ZoteroObject, visitor: Visitor):
visitor.enter_parent(parent)
for c in parent.children:
visitor.visit_child(c)
self.walk(c, visitor)
visitor.leave_parent(parent)
class SortedWalker(Walker):
"""Iterates through the Zotero visiting children in sorted order.
"""
def __init__(self, key_fn: Callable = None, reverse: bool = False):
"""Initialize.
:param key_fn: a function/callable used to sort the data that takes a
single argument to access compared data, which defaults
to :function:`str`
:param reverse: whether or not to reverse the visited results
"""
if key_fn is None:
self.key_fn = str
else:
self.key_fn = key_fn
self.reverse = reverse
def walk(self, parent: ZoteroObject, visitor: Visitor):
visitor.enter_parent(parent)
kids = sorted(parent.children, key=self.key_fn, reverse=self.reverse)
for c in kids:
visitor.visit_child(c)
self.walk(c, visitor)
visitor.leave_parent(parent) | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/domain.py | domain.py |
__author__ = 'Paul Landes'
from dataclasses import dataclass, field
import logging
from pathlib import Path
import sqlite3
from zensols.zotsite.domain import Collection, Library, Item, Note, Name
logger = logging.getLogger(__name__)
@dataclass
class DatabaseReader(object):
"""Database access to Zotero store.
"""
data_dir: Path = field()
"""Directory containing the Zotero DB files (sqlite and collections)."""
collection_like: str = field(default='%')
"""The SQL pattern to match against subcollection names."""
library_id: int = field(default=1)
"""The DB ide of the library to export."""
def _collection_sql(self, whparams):
"""Create an SQL string to get collections rows."""
return """
select c.collectionId c_id, ci.itemId c_iid,
c.parentCollectionId c_pid, c.collectionName c_name
from collections c
left join collectionItems ci on c.collectionId = ci.collectionId
where c.libraryId = %(library_id)s and
c.collectionName like '%(coll_name)s'
""" % whparams
def _item_sql(self, whparams):
"""Create an SQL string to get items (attachments) rows."""
return """
select c.collectionId c_id, c.parentCollectionId c_pid,
c.collectionName c_name,
it.itemId i_id, ia.parentItemId i_pid, it.key, iy.typeName type,
ia.contentType content_type, ia.path,
itn.title n_title, itn.note n_note, itn.parentItemId n_pid
from items it, itemTypes iy
left join itemAttachments ia on it.itemId = ia.itemId
left join collectionItems ci on ci.itemId = it.itemId
left join collections c on c.collectionId = ci.collectionId
left join itemNotes itn on it.itemId = itn.itemId
where it.itemTypeId = iy.itemTypeId and
it.itemId not in (select itemId from deletedItems)
order by ci.orderIndex;
""" % whparams
def _item_meta_sql(self, whparams):
"""Create an SQL string to get items metadata rows."""
return """
select f.fieldName name, iv.value
from items i, itemTypes it, itemData id, itemDataValues iv, fields f
where i.itemTypeId = it.itemTypeId and
i.itemId = id.itemId and
id.valueId = iv.valueId and
id.fieldId = f.fieldId and
i.itemId = %(item_id)s and
i.itemId not in (select itemId from deletedItems)""" % whparams
def _item_creators_sql(self, whparams):
"""Return SQL for creators (authors) across several items"""
return """
select c.firstName, c.lastName
from itemCreators ic, creators c
where ic.creatorID = c.creatorID and
ic.itemID = %(item_id)s
order by ic.orderIndex""" % whparams
def get_connection(self):
"""Return a database connection the SQLite database.
"""
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
db_file = Path(self.data_dir, 'zotero.sqlite')
logger.info(f'reading SQLite file: {db_file}')
if not db_file.exists():
raise OSError(f'no such data file: {db_file}')
conn = sqlite3.connect(db_file)
conn.row_factory = dict_factory
return conn
def _get_item_meta(self, item, conn, whparams):
"""Return the item metadata from the database.
:param item: the item to fetch data for
:param conn: the DB connection
:param whparams: dict of parameters used for the metadata SQL query
"""
whparams['item_id'] = item['i_id']
meta = {}
for row in conn.execute(self._item_meta_sql(whparams)):
meta[row['name']] = row['value']
return meta
def _get_item_creators(self, item, conn, whparams):
"""Return the item metadata from the database.
:param item: the item to fetch data for
:param conn: the DB connection
:param whparams: dict of parameters used for the metadata SQL query
"""
whparams['item_id'] = item['i_id']
creators = []
for row in conn.execute(self._item_creators_sql(whparams)):
name = Name(row['firstName'], row['lastName'])
creators.append(name)
if len(creators) > 0:
return creators
def _select_items(self, conn):
"""Return items from the database.
:param conn: the DB connection
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'data_dir: {self.data_dir}')
wparams = {'library_id': self.library_id}
if logger.isEnabledFor(logging.DEBUG):
logger.debug('wparams: %s' % wparams)
items = {}
for item in conn.execute(self._item_sql(wparams)):
item['subs'] = []
if not item['i_pid'] and not item['c_pid']:
item['i_pid'] = item['n_pid']
iid = item['i_id']
if iid in items:
items[iid].append(item)
else:
items[iid] = [item]
for itemlst in items.values():
for item in itemlst:
meta = self._get_item_meta(item, conn, wparams)
item['meta'] = meta
creators = self._get_item_creators(item, conn, wparams)
item['creators'] = creators
for itemlst in items.values():
for item in itemlst:
i_pid = item['i_pid']
if i_pid in items:
for par in items[i_pid]:
par['subs'].append(item)
flst = []
for itemlst in items.values():
flst.extend(itemlst)
return flst
def _select_collections(self, conn):
"""Return items from the database.
:param conn: the DB connection
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'data_dir: {self.data_dir} ' +
f'pattern: {self.collection_like}')
wparams = {'library_id': self.library_id,
'coll_name': self.collection_like}
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'wparams: {wparams}')
colls = {}
for row in conn.execute(self._collection_sql(wparams)):
row['subs'] = []
colls[row['c_id']] = row
for coll in colls.values():
c_pid = coll['c_pid']
if c_pid not in colls:
coll['c_pid'] = None
c_pid = None
if c_pid:
par = colls[c_pid]
par['subs'].append(coll)
return list(filter(lambda x: x['c_pid'] is None and x['c_id'],
colls.values()))
def _create_item(self, item):
"""Return a domain object that represents an item (i.e. PDF attachement,
link, note etc).
"""
children = list(map(lambda x: self._create_item(x), item['subs']))
if item['type'] == 'note':
item = Note(item)
else:
item = Item(item, children)
return item
def _create_collection(self, coll, by_cid):
"""Return a domain object that represents a Zotero DB (sub)collection.
:param conn: the DB connection
:param by_cid: parent to child collection IDs
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug('processing: {} ({}, {})'.
format(coll['c_name'], coll['c_id'], coll['c_iid']))
cid = coll['c_id']
items = []
if cid in by_cid:
toadd = by_cid[cid]
items.extend(toadd)
logger.debug('children items: %d' % len(toadd))
children = list(map(lambda x: self._create_collection(x, by_cid),
coll['subs']))
items = list(map(lambda x: self._create_item(x), items))
return Collection(coll, items, children)
def _create_library(self, colls, items) -> Library:
"""Return a domain object that represents a Zotero DB (sub)collection.
:param conn: the DB connection
:param by_cid: parent to child collection IDs
"""
by_cid = {}
for i in items:
cid = i['c_id']
if cid:
if cid in by_cid:
cid_lst = by_cid[cid]
else:
cid_lst = []
by_cid[cid] = cid_lst
cid_lst.append(i)
fcolls = []
for coll in colls:
fcoll = self._create_collection(coll, by_cid)
fcolls.append(fcoll)
return Library(self.data_dir, self.library_id, fcolls)
def get_library(self) -> Library:
"""Get an object graph representing the data in the Zotero database.
"""
conn = self.get_connection()
try:
colls = self._select_collections(conn)
items = self._select_items(conn)
lib = self._create_library(colls, items)
finally:
conn.close()
return lib | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/db.py | db.py |
__author__ = 'Paul Landes'
from typing import Dict
from dataclasses import dataclass, field
import logging
import sys
import json
from pathlib import Path
from io import TextIOBase
import shutil
from zensols.config import Settings, ConfigFactory
from zensols.persist import persisted
from zensols.zotsite import (
ZoteroApplicationError, DatabaseReader, RegexItemMapper, IdItemMapper,
Library, Walker,
NavCreateVisitor, FileSystemCopyVisitor, PruneVisitor, PrintVisitor,
BetterBibtexVisitor,
)
logger = logging.getLogger(__name__)
@dataclass
class SiteCreator(object):
"""Creates the Zotero content web site.
"""
config_factory: ConfigFactory = field()
"""The configuration factory used to create the :class:`.Walker` instance.
"""
package: Settings = field()
"""Containes this Python package information used to create the site
metadata.
"""
site_resource: Path = field()
"""The (resource) path the static site files."""
db: DatabaseReader = field()
"""The database access object."""
prune_visitor: PruneVisitor = field()
"""A visitor that prunes collections based on a regular expression."""
sort_walkers: Settings = field()
"""A mapping of name to a :class:`.Walker` instance definition configuration
section.
"""
sort: str = field(default='none')
"""whether or not to sort items, either: ``none`` or ``case`` (non-case
might be added later).
"""
id_mapping: bool = field(default='none')
"""How to generate unique identifiers for URLS, either ``none``, or
`betterbib``.
"""
file_mapping: str = field(default='item')
"""Whether to use unique item IDs for the file names or the full PDF file
name; either: ``item`` or ``long``
"""
out_dir: Path = field(default=None)
"""The default output directory to store the collection."""
robust_fs: bool = field(default=False)
"""Whether to raise an exception on file system errors."""
@property
@persisted('_walker')
def walker(self) -> Walker:
walker_class_name = self.sort_walkers.get(self.sort)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'using walker: {walker_class_name}')
if walker_class_name is None:
raise ZoteroApplicationError(
f'Configuration error: no such walker: {self.sort}')
return self.config_factory(walker_class_name)
@property
@persisted('_library')
def library(self) -> Library:
lib: Library = self.db.get_library()
if self.prune_visitor.should_walk:
self.walker.walk(lib, self.prune_visitor)
if self.id_mapping == 'none':
pass
elif self.id_mapping == 'betterbib':
visitor = BetterBibtexVisitor(lib)
self.walker.walk(lib, visitor)
else:
raise ZoteroApplicationError(
f'Unknown ID mapping: {self.id_mapping}')
return lib
@property
@persisted('_item_mapper')
def item_mapper(self):
if self.file_mapping == 'long':
mapper = RegexItemMapper(self.library, r'.*\.pdf$', '[ ]')
elif self.file_mapping == 'item':
mapper = IdItemMapper(self.library)
else:
raise ZoteroApplicationError(
f'Unknown file mapping: {self.file_mapping}')
return mapper
def print_structure(self, writer: TextIOBase = sys.stdout):
"""Print (sub)collections and papers in those collections as a tree."""
self.walker.walk(self.library, PrintVisitor(writer))
def _write_meta(self, path: Path):
"""Write version and other metadata to the website, which is used during
rending of the site.
"""
meta: Dict[str, str] = {'version': self.package.version or '<none>',
'project_name': self.package.name or '<none>'}
js: str = f'var zoteroMeta = {json.dumps(meta)};'
with open(path, 'w') as f:
f.write(js)
def _create_tree_data(self):
"""Create the table of contents/tree info used by the navigation widget.
"""
js_dir: Path = self.out_dir / 'js'
nav_file: Path = js_dir / 'zotero-tree-data.js'
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating js nav tree: {nav_file}')
visitor = NavCreateVisitor(self.library, self.item_mapper)
self.walker.walk(self.library, visitor)
with open(nav_file, 'w') as f:
f.write("var tree =\n")
f.write(json.dumps(visitor.primary_roots, indent=2))
meta_file = Path(js_dir, 'zotero-meta.js')
if logger.isEnabledFor(logging.INFO):
logger.info(f'creating js metadata: {meta_file}')
self._write_meta(meta_file)
def _copy_storage(self):
"""Copy the storage contents, which is the location of the PDF (and other)
documents that will be rendered in the site GUI.
"""
dst: Path = self.out_dir
fsvisitor = FileSystemCopyVisitor(
self.library, dst, self.robust_fs, self.item_mapper)
if logger.isEnabledFor(logging.INFO):
logger.info(f'copying storage to {dst}')
self.walker.walk(self.library, fsvisitor)
def _copy_static_res(self, src: Path, dst: Path):
"""Copy static resources from the distribution package.
:param src: the source package directory
:param dst: the destination on the file system
"""
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'copy: {src} -> {dst}')
dst.mkdir(parents=True, exist_ok=True)
for res in src.iterdir():
res = res.name
src_file = src / res
dst_file = dst / res
if src_file.is_dir():
self._copy_static_res(src_file, dst_file)
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'copy: {src_file} -> {dst_file}')
shutil.copyfile(src_file, dst_file)
def _copy_static(self):
if logger.isEnabledFor(logging.INFO):
logger.info(f'copying static data -> {self.out_dir}')
res: Path = self.site_resource
if not res.exists():
raise ZoteroApplicationError(
f'Missing resource directory {res}')
for rpath in res.iterdir():
self._copy_static_res(rpath, self.out_dir)
def export(self):
"""Entry point method to export (create) the website.
"""
self._copy_static()
self._create_tree_data()
self._copy_storage()
def tmp(self):
print(self.package) | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/screate.py | screate.py |
__author__ = 'Paul Landes'
from typing import Union
from dataclasses import dataclass, field
import logging
import re
from zensols.zotsite import Visitor, Library, Collection, ZoteroObject, Item
logger = logging.getLogger(__name__)
@dataclass
class PruneVisitor(Visitor):
"""This that filters out ``Collection`` instances based on a regular
expression. Optionally, ``Item`` level nodes are *included* if based on
``match_children``.
"""
prune_pattern: Union[re.Pattern, str] = field(default=None)
"""A regular expression used to filter ``Collection`` nodes."""
match_children: bool = field(default=False)
"""if ``True``, then also match ``Item`` level nodes."""
def __post_init__(self):
if isinstance(self.prune_pattern, str):
self.prune_pattern = re.compile(self.prune_pattern)
self._matched_coll = None
self._keep = []
self._keep_ids = set()
@property
def should_walk(self) -> bool:
return self.prune_pattern is not None
def _add_child_item_ids(self, parent: ZoteroObject):
self._keep_ids.add(parent.id)
for child in parent.children:
self._add_child_item_ids(child)
def _add(self, node: ZoteroObject):
# we descent parents first so, as long as we check first we won't
# duplicate on Item level nodes
if node.id not in self._keep_ids:
self._add_child_item_ids(node)
self._keep.append(node)
def enter_parent(self, parent: ZoteroObject):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'entering: {parent} ({parent.__class__.__name__})')
if isinstance(parent, Collection):
if self._matched_coll is None and \
self.prune_pattern.match(parent.name):
logger.debug(f'found: {parent.name}')
self._matched_coll = parent
self._add(parent)
def visit_child(self, child: ZoteroObject):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'visiting: {child}')
if isinstance(child, Item) and \
self.match_children and \
self._matched_coll is None and \
self.prune_pattern.match(child.name):
self._add(child)
def leave_parent(self, parent: ZoteroObject):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'leaving: {format(parent)}')
if isinstance(parent, Collection):
if self._matched_coll == parent:
logger.debug(f'leaving: {self._matched_coll}')
self._matched_coll = None
elif isinstance(parent, Library):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'leaving lib {parent}, setting col: {self._keep}')
parent.items = ()
parent.collections = self._keep | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/prunevisitor.py | prunevisitor.py |
__author__ = 'Paul Landes'
import logging
import shutil
from pathlib import Path
from . import Visitor, ItemMapper, Item, ZoteroObject, Library
logger = logging.getLogger(__name__)
class FileSystemCopyVisitor(Visitor):
"""This class copies all Item objects to their destination.
"""
def __init__(self, lib: Library, out_dir: Path, robust: bool,
itemmapper: ItemMapper):
"""Initialize the visitor object.
:param lib: the object graph returned from
``DatabaseReader.get_library``.
:param out_dir: the target directory to copy data
:param robust: whether to raise an exception on file system errors
:param itemmapper: used for file name substitution so the widget uses
the correct names (i.e. underscore substitution)
"""
self._out_path = out_dir
self._robust = robust
self._itemmapper = itemmapper
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'out_path: {self._out_path}')
def enter_parent(self, parent: ZoteroObject):
pass
def visit_child(self, child: ZoteroObject):
if isinstance(child, Item):
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'child: {child.path}')
if child.path is not None:
src: Path = child.path
dst = self._out_path / self._itemmapper.get_file_name(child)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'copy: {src} -> {dst}')
parent = dst.parent
if not dst.is_file():
if not parent.is_dir():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'create: {parent}')
parent.mkdir(parents=True, exist_ok=True)
src, dst = str(src), str(dst)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(f'shcopy: {src} -> {dst}')
try:
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
except OSError as e:
if self._robust:
logger.error(f'could not copy {src} to {dst}: {e}')
else:
raise e
def leave_parent(self, parent: ZoteroObject):
pass | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/fscpvisitor.py | fscpvisitor.py |
function ZoteroManager(levels, meta, isView) {
this.levels = levels;
this.meta = meta;
this.isView = isView;
// create the metadata table, which is the key/value pairs given from a zotero
// collection (sub folder) or attachment
function createMetaTable(meta) {
var tbl = document.createElement('table');
var tbdy = document.createElement('tbody');
var thead = document.createElement('thead');
var tr = document.createElement('tr');
var mlen = meta.length;
tbl.classList.add('table');
tbl.classList.add('meta-table');
tbl.classList.add('border');
thead.classList.add('meta-thead');
var th = document.createElement('th');
th.appendChild(document.createTextNode('Description'));
tr.appendChild(th);
th = document.createElement('th');
th.appendChild(document.createTextNode('Value'));
tr.appendChild(th);
thead.appendChild(tr);
// add metadata key/value pairs as rows in the table
for (var i = 0; i < mlen; i++) {
var td = document.createElement('td');
var tval = document.createElement('div');
var key = meta[i][0];
var val = meta[i][1];
tr = document.createElement('tr');
td.appendChild(document.createTextNode(key));
td.classList.add('meta-table-key');
tr.appendChild(td)
td = document.createElement('td');
td.appendChild(tval);
if (key == 'URL') {
var anch = document.createElement('a');
anch.setAttribute('href', val);
anch.appendChild(document.createTextNode(anch));
tval.appendChild(anch);
} else {
tval.appendChild(document.createTextNode(val));
}
tval.classList.add('meta-table-val');
td.appendChild(tval);
tr.appendChild(td)
tbdy.appendChild(tr);
}
tbl.appendChild(thead);
tbl.appendChild(tbdy);
return tbl;
}
// create the header pane containing title of attachment, collection or note
// params:
// node: the node currently selected in the left nav
// root: root element to append to, which is the table td element from the main
// table
function headerPane(node, root) {
var topPanel = document.createElement('div');
var head = document.createElement('h1');
var btn = document.createElement('button');
var topElem;
root.classList.add('max-cell');
// title of content at top
head.classList.add('bd-title')
head.appendChild(document.createTextNode(node.item_title));
// header element
topElem = document.createElement('div');
topElem.appendChild(head);
topElem.classList.add('nav-item');
topPanel.appendChild(topElem);
// view/download button
btn.classList.add('btn');
btn.classList.add('btn-primary');
btn.classList.add('btn-sm');
btn.classList.add('content-head-pane-btn');
btn.setAttribute('type', 'button');
btn.id = "view-button";
btn.appendChild(document.createTextNode('View'));
topElem = document.createElement('div');
topElem.appendChild(btn);
topElem.classList.add('nav-item');
topPanel.appendChild(topElem);
// header element
topPanel.classList.add('d-flex');
topPanel.classList.add('justify-content-between');
topPanel.classList.add('content-head-pane');
// add the pdf/html attachemnt if it exists, otherwise direct the user
// via; add tool tip if no attachement
btn.action = node.resource;
if (node.resource) {
btn.onClick = node.resource;
btn.setAttribute('onClick', "location.href='" +
node.resource + "'");
} else {
btn.classList.add('disabled');
btn.setAttribute('data-toggle', 'tooltip');
btn.setAttribute('data-placement', 'left');
btn.setAttribute('data-html', 'true');
btn.setAttribute('title', 'Not an attachment,<br/>try child node');
// there's got to be a better way...
$(function () {
$('[data-toggle="tooltip"]').tooltip()
});
}
root.appendChild(topPanel);
}
// populate the link button and update main screen link
function updateLink(node) {
var itemDocLinkButton = document.getElementById(
"item-document-link-button");
var link = null;
if (node) {
link = createDocumentLink(node);
}
if (link) {
itemDocLinkButton.setAttribute('data-original-title', link);
itemDocLinkButton.classList.remove('disabled');
} else {
itemDocLinkButton.removeAttribute('data-original-title');
itemDocLinkButton.classList.add('disabled');
}
}
// create a link that points to the current document
function createDocumentLink(node) {
type = 'item';
if (node['node_type'] == 'item') {
var link;
if (type == 'doc') {
var link = document.location.href;
var idx = link.lastIndexOf('/');
link = link.substring(0, idx);
link = link + '/' + node.resource;
} else {
var proto = window.location.protocol;
var host = window.location.host;
var path = window.location.pathname;
link = proto + "//" + host + path + '?id=' + node['item-id'];
}
return link;
}
}
// show an alert message on the screen for 2s
function showAlert(message, subMessage, type) {
var alertClass = 'alert-' + type;
$("#alert-box").html('<strong>' + message + '</strong>: ' + subMessage);
$("#alert-box").removeClass(alertClass).addClass(alertClass);
$('#alert-box').slideDown("fast");
setTimeout(function() {
$('#alert-box').slideUp("fast");
}, 2000);
}
// when clicking the link button to copy the link
function itemDocLinkClicked(node) {
console.log('item document link clicked: ' + node);
link = createDocumentLink(node);
if (link) {
console.log('copying link : ' + link);
setClipboardText(link);
linkButton = document.getElementById("item-document-link-button");
showAlert('Link copied', link, 'success');
}
}
// create the table with the collection information of children paper node
function createCollectionTable(node) {
function createTable(cols, rows) {
var tab = document.createElement('table');
var thead = document.createElement('thead');
var tbody = document.createElement('tbody');
var tr = document.createElement('tr');
var tabItemToJs = mapItemToJs();
function addHeaderCols(cols) {
for (var i = 0; i < cols.length; i++) {
var th = document.createElement('th');
th.appendChild(document.createTextNode(cols[i]));
tr.appendChild(th);
}
}
function addRow(cells) {
var tr = document.createElement('tr');
for (var i = 0; i < cells.length; i++) {
var cell = cells[i];
var itemId = cell[0];
var cellText = cell[1];
var td = document.createElement('td');
var tnode = document.createTextNode(cellText);
td.classList.add(['word-wrap', 'break-word', 'meta-table-val']);
tr.appendChild(td);
if (i == 0) {
var link = document.createElement('a');
link.href = itemId;
link.onclick = function(e) {
e.preventDefault();
showItem(itemId, tabItemToJs);
}
link.appendChild(tnode);
td.appendChild(link);
} else {
td.appendChild(tnode);
}
}
tbody.appendChild(tr);
}
tab.id = 'collections-table';
tab.cellspaceing = 0;
tab.width = '100%';
thead.classList.add('meta-thead');
tab.appendChild(thead);
tab.appendChild(tbody);
thead.appendChild(tr);
addHeaderCols(cols);
for (var rix = 0; rix < rows.length; rix++) {
addRow(rows[rix]);
}
return tab;
}
var cols = ['Title', 'Creators', 'Date'];
var childs = node.nodes;
var rows = [];
var tab = null;
for (var i = 0; i < childs.length; i++) {
var c = childs[i];
var meta = c.metadata;
if (meta != null) {
var metaByCol = {};
var row = [];
for (var mix = 0; mix < meta.length; mix++) {
var mpair = meta[mix];
metaByCol[mpair[0]] = mpair[1];
}
for (var cix = 0; cix < cols.length; cix++) {
var col = cols[cix];
var cval = metaByCol[col];
if (cval == null) cval = '';
row.push([c['item-id'], cval]);
}
rows.push(row);
}
}
if (rows.length > 0) tab = createTable(cols, rows);
return tab
}
// create the main (right) content pane in the main top level table
// params:
// node: the node currently selected in the left nav
function createMain(node) {
console.log('create main: ' + node);
var cont = document.getElementById("zotero-main-content");
while (cont.firstChild) {
cont.removeChild(cont.firstChild);
}
if (node) {
var meta = node.metadata;
var sel = node.state.selected;
var nodeType;
var hasContent;
// determine the type of node in the tree we're visiting
if (node.item_type == 'attachment') {
nodeType = 'attachment';
} else if (node.item_type == 'note') {
nodeType = 'note';
} else if (meta != null) {
nodeType = 'meta';
}
hasNote = ((nodeType == 'note') && sel);
hasContent = (((nodeType == 'attachment') && sel) ||
(nodeType == 'meta'));
cont.className = '';
} else {
hasContent = false;
hasNote = false;
nodeType = null;
}
var initCollectionsTable = false;
// add the header pane
if (hasContent) {
headerPane(node, cont);
} else if (!hasNote) {
// add collection table if we find the metadata level node;
// otherwise give the "No Content" message
var noc = document.createElement('div');
var ctab = createCollectionTable(node);
if (ctab != null) {
console.log('adding collection table');
var root = document.createElement('div');
var title = document.createElement('div');
var header = document.createElement('h1');
header.classList.add('bd-title');
header.appendChild(document.createTextNode(node.item_title));
root.classList.add('nav-item');
ctab.classList.add('table', 'border', 'meta-table');
title.appendChild(header);
root.appendChild(title);
root.appendChild(ctab);
noc.appendChild(root);
initCollectionsTable = true;
} else {
console.log('no data collection data found');
cont.classList.add('center-cell');
noc.classList.add('disabled-text');
noc.appendChild(document.createTextNode('No Content'));
}
cont.appendChild(noc);
}
if (initCollectionsTable) {
if (!$.fn.DataTable.isDataTable('#collections-table')) {
$('#collections-table').DataTable({
// https://datatables.net/examples/basic_init/dom.html
dom: '<tp>',
'pageLength': 50,
});
}
}
// add metadata if there is any
if (meta &&
(((nodeType == 'attachment') && sel) || !(nodeType == 'attachment'))) {
var metaTable = createMetaTable(meta);
cont.appendChild(metaTable);
}
// add notes if there are any
if (hasNote) {
console.log('adding note: ' + node.resource);
var card = document.createElement('div');
var cardBlock = document.createElement('div');
var h = document.createElement('h3');
card.classList.add('card');
card.classList.add('center-pane');
card.classList.add('note-pane');
h.classList.add('card-header');
h.appendChild(document.createTextNode(node.item_title));
card.appendChild(h);
cardBlock.classList.add('card-block');
var p = document.createElement('p');
p.classList.add('card-text');
var divElem = document.createElement('div');
var text = node.item_note;
divElem.innerHTML = text;
p.appendChild(divElem);
cardBlock.appendChild(p);
card.appendChild(cardBlock);
cont.classList.add('content-note');
cont.appendChild(card);
}
// add the (usually PDF orsnapshot site) attachemnt
if ((nodeType == 'attachment') && sel) {
console.log('adding resource: ' + node.resource);
var aelem = document.createElement('div');
if (node.resource.endsWith('.html')) {
$.ajax({
url: node.resource,
type: 'GET',
dataType: 'html',
success: function(data) {
aelem.innerHTML = data;
}});
} else {
var objElem = document.createElement('object');
aelem.classList.add('embed-responsive');
aelem.classList.add('border');
aelem.classList.add('rounded');
aelem.classList.add('pdf-pane');
objElem.setAttribute('data', node.resource);
objElem.setAttribute('type', 'application/pdf');
objElem.appendChild(document.createTextNode('No PDF plugin'));
aelem.appendChild(objElem);
}
cont.appendChild(aelem);
}
updateLink(node);
}
// called when the user types in the search box and narrows the tree search
function onSearchChange(text) {
console.log('search updated: ' + text);
var tree = $('#tree').treeview(true);
if (text.length == 0) {
tree.clearSearch();
} else {
var options = {ignoreCase: true,
exactMatch: false,
revealResults: true}
nodes = tree.search(text, options);
}
}
// map item zotero IDs to tree node IDs
function mapItemToJsNodes(itemToJs, nodes) {
for (var i = 0; i < nodes.length; i++) {
node = nodes[i]
key = node['item-id'];
itemToJs[key] = node.nodeId;
if (typeof node.nodes != 'undefined') {
mapItemToJsNodes(itemToJs, node.nodes);
}
}
return itemToJs;
}
function mapItemToJs() {
var tree = $('#tree').treeview(true);
var itemToJs = {};
var sibs = tree.getSiblings(0);
mapItemToJsNodes(itemToJs, [tree.getNode(0)]);
mapItemToJsNodes(itemToJs, tree.getSiblings(0));
return itemToJs;
}
function showItem(itemId, itemToJs) {
console.log('show item by id: ' + itemId);
var tree = $('#tree').treeview(true);
if (itemToJs[itemId] == undefined) {
console.log(' no such item ID: ' + itemId);
} else {
var nodeId = itemToJs[itemId];
tree.revealNode(nodeId);
tree.selectNode(nodeId);
}
}
// show the nodes given by the search and hidw all others
// used when the user uses the search button or presses enter
function searchNarrow() {
var tree = $('#tree').treeview(true);
var field = document.getElementById("termSearch");
var text = field.value;
if (text.length > 0) {
console.log('searching on text: ' + text);
var options = {ignoreCase: true,
exactMatch: false,
revealResults: false}
var nodes = tree.getExpanded();
var nlen = nodes.length;
for (var i = 0; i < nlen; i++) {
var node = nodes[i];
console.log('collapsing: ' + node);
tree.collapseNode(node.nodeId, {levels: 1});
}
nodes = tree.search(text, options);
nlen = nodes.length;
tree.collapseAll({silent: true});
for (var i = 0; i < nlen; i++) {
var node = nodes[i];
console.log('expanding (' + node.nodeId + '), ' + node.text);
tree.revealNode(node.nodeId, {levels: 1});
}
if (nlen == 1) {
tree.selectNode(node.nodeId);
}
}
}
function updateMain(event, node) {
console.log('updating: ' + node.nodeId + '( ' + node['item-id'] + ')');
console.log(node);
createMain(node);
lastNode = node;
}
function insertVersion() {
var verTextElem = $('#project-link-version a');
var verAnch = $('#project-link-version a')[0];
var verText = 'v' + meta.version;
verAnch.href = verAnch.href + verText;
verTextElem.text(verText);
}
this.reset = function() {
console.log('resetting');
var tree = $('#tree').treeview(true);
tree.collapseAll();
createMain(null);
updateLink(null);
lastNode = null;
}
// initialization called on page load
this.init = function(itemId) {
console.log('version: ' + meta.version);
$('#tree').treeview({
data: tree,
levels: levels,
onNodeSelected: updateMain,
onNodeUnselected: updateMain,
onNodeExpanded: updateMain,
onNodeCollapsed: updateMain,
nodeDisabled: updateMain,
nodeEnabled: updateMain,
});
$('#termSearch').on('keyup', function(e) {
if (e.keyCode == 13) {
searchNarrow();
} else {
onSearchChange(this.value);
}
});
$('#item-document-link-button').click(function() {
if (typeof lastNode != 'undefined') {
itemDocLinkClicked(lastNode);
}
});
linkButton = document.getElementById("item-document-link-button");
btn = linkButton;
btn.setAttribute('link-data-toggle', 'tooltip');
btn.setAttribute('link-data-placement', 'right');
btn.setAttribute('link-data-html', 'true');
$(function () {
$('[link-data-toggle="tooltip"]').tooltip()
});
insertVersion();
var itemToJs = mapItemToJs()
console.log(itemToJs);
if (itemId) {
showItem(itemId, itemToJs);
}
console.log('isView: ' + isView);
if (isView) {
$('#view-button').click();
}
}
} | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/resources/site/src/js/zotero.js | zotero.js |
!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery"),require("popper.js")):"function"==typeof define&&define.amd?define(["exports","jquery","popper.js"],e):e((t="undefined"!=typeof globalThis?globalThis:t||self).bootstrap={},t.jQuery,t.Popper)}(this,(function(t,e,n){"use strict";function i(t){return t&&"object"==typeof t&&"default"in t?t:{default:t}}var o=i(e),a=i(n);function s(t,e){for(var n=0;n<e.length;n++){var i=e[n];i.enumerable=i.enumerable||!1,i.configurable=!0,"value"in i&&(i.writable=!0),Object.defineProperty(t,i.key,i)}}function l(t,e,n){return e&&s(t.prototype,e),n&&s(t,n),Object.defineProperty(t,"prototype",{writable:!1}),t}function r(){return r=Object.assign?Object.assign.bind():function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var i in n)Object.prototype.hasOwnProperty.call(n,i)&&(t[i]=n[i])}return t},r.apply(this,arguments)}function u(t,e){return u=Object.setPrototypeOf?Object.setPrototypeOf.bind():function(t,e){return t.__proto__=e,t},u(t,e)}var f="transitionend";var d={TRANSITION_END:"bsTransitionEnd",getUID:function(t){do{t+=~~(1e6*Math.random())}while(document.getElementById(t));return t},getSelectorFromElement:function(t){var e=t.getAttribute("data-target");if(!e||"#"===e){var n=t.getAttribute("href");e=n&&"#"!==n?n.trim():""}try{return document.querySelector(e)?e:null}catch(t){return null}},getTransitionDurationFromElement:function(t){if(!t)return 0;var e=o.default(t).css("transition-duration"),n=o.default(t).css("transition-delay"),i=parseFloat(e),a=parseFloat(n);return i||a?(e=e.split(",")[0],n=n.split(",")[0],1e3*(parseFloat(e)+parseFloat(n))):0},reflow:function(t){return t.offsetHeight},triggerTransitionEnd:function(t){o.default(t).trigger(f)},supportsTransitionEnd:function(){return Boolean(f)},isElement:function(t){return(t[0]||t).nodeType},typeCheckConfig:function(t,e,n){for(var i in n)if(Object.prototype.hasOwnProperty.call(n,i)){var o=n[i],a=e[i],s=a&&d.isElement(a)?"element":null===(l=a)||"undefined"==typeof l?""+l:{}.toString.call(l).match(/\s([a-z]+)/i)[1].toLowerCase();if(!new RegExp(o).test(s))throw new Error(t.toUpperCase()+': Option "'+i+'" provided type "'+s+'" but expected type "'+o+'".')}var l},findShadowRoot:function(t){if(!document.documentElement.attachShadow)return null;if("function"==typeof t.getRootNode){var e=t.getRootNode();return e instanceof ShadowRoot?e:null}return t instanceof ShadowRoot?t:t.parentNode?d.findShadowRoot(t.parentNode):null},jQueryDetection:function(){if("undefined"==typeof o.default)throw new TypeError("Bootstrap's JavaScript requires jQuery. jQuery must be included before Bootstrap's JavaScript.");var t=o.default.fn.jquery.split(" ")[0].split(".");if(t[0]<2&&t[1]<9||1===t[0]&&9===t[1]&&t[2]<1||t[0]>=4)throw new Error("Bootstrap's JavaScript requires at least jQuery v1.9.1 but less than v4.0.0")}};d.jQueryDetection(),o.default.fn.emulateTransitionEnd=function(t){var e=this,n=!1;return o.default(this).one(d.TRANSITION_END,(function(){n=!0})),setTimeout((function(){n||d.triggerTransitionEnd(e)}),t),this},o.default.event.special[d.TRANSITION_END]={bindType:f,delegateType:f,handle:function(t){if(o.default(t.target).is(this))return t.handleObj.handler.apply(this,arguments)}};var c="bs.alert",h=o.default.fn.alert,g=function(){function t(t){this._element=t}var e=t.prototype;return e.close=function(t){var e=this._element;t&&(e=this._getRootElement(t)),this._triggerCloseEvent(e).isDefaultPrevented()||this._removeElement(e)},e.dispose=function(){o.default.removeData(this._element,c),this._element=null},e._getRootElement=function(t){var e=d.getSelectorFromElement(t),n=!1;return e&&(n=document.querySelector(e)),n||(n=o.default(t).closest(".alert")[0]),n},e._triggerCloseEvent=function(t){var e=o.default.Event("close.bs.alert");return o.default(t).trigger(e),e},e._removeElement=function(t){var e=this;if(o.default(t).removeClass("show"),o.default(t).hasClass("fade")){var n=d.getTransitionDurationFromElement(t);o.default(t).one(d.TRANSITION_END,(function(n){return e._destroyElement(t,n)})).emulateTransitionEnd(n)}else this._destroyElement(t)},e._destroyElement=function(t){o.default(t).detach().trigger("closed.bs.alert").remove()},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this),i=n.data(c);i||(i=new t(this),n.data(c,i)),"close"===e&&i[e](this)}))},t._handleDismiss=function(t){return function(e){e&&e.preventDefault(),t.close(this)}},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}}]),t}();o.default(document).on("click.bs.alert.data-api",'[data-dismiss="alert"]',g._handleDismiss(new g)),o.default.fn.alert=g._jQueryInterface,o.default.fn.alert.Constructor=g,o.default.fn.alert.noConflict=function(){return o.default.fn.alert=h,g._jQueryInterface};var m="bs.button",p=o.default.fn.button,_="active",v='[data-toggle^="button"]',y='input:not([type="hidden"])',b=".btn",E=function(){function t(t){this._element=t,this.shouldAvoidTriggerChange=!1}var e=t.prototype;return e.toggle=function(){var t=!0,e=!0,n=o.default(this._element).closest('[data-toggle="buttons"]')[0];if(n){var i=this._element.querySelector(y);if(i){if("radio"===i.type)if(i.checked&&this._element.classList.contains(_))t=!1;else{var a=n.querySelector(".active");a&&o.default(a).removeClass(_)}t&&("checkbox"!==i.type&&"radio"!==i.type||(i.checked=!this._element.classList.contains(_)),this.shouldAvoidTriggerChange||o.default(i).trigger("change")),i.focus(),e=!1}}this._element.hasAttribute("disabled")||this._element.classList.contains("disabled")||(e&&this._element.setAttribute("aria-pressed",!this._element.classList.contains(_)),t&&o.default(this._element).toggleClass(_))},e.dispose=function(){o.default.removeData(this._element,m),this._element=null},t._jQueryInterface=function(e,n){return this.each((function(){var i=o.default(this),a=i.data(m);a||(a=new t(this),i.data(m,a)),a.shouldAvoidTriggerChange=n,"toggle"===e&&a[e]()}))},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}}]),t}();o.default(document).on("click.bs.button.data-api",v,(function(t){var e=t.target,n=e;if(o.default(e).hasClass("btn")||(e=o.default(e).closest(b)[0]),!e||e.hasAttribute("disabled")||e.classList.contains("disabled"))t.preventDefault();else{var i=e.querySelector(y);if(i&&(i.hasAttribute("disabled")||i.classList.contains("disabled")))return void t.preventDefault();"INPUT"!==n.tagName&&"LABEL"===e.tagName||E._jQueryInterface.call(o.default(e),"toggle","INPUT"===n.tagName)}})).on("focus.bs.button.data-api blur.bs.button.data-api",v,(function(t){var e=o.default(t.target).closest(b)[0];o.default(e).toggleClass("focus",/^focus(in)?$/.test(t.type))})),o.default(window).on("load.bs.button.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-toggle="buttons"] .btn')),e=0,n=t.length;e<n;e++){var i=t[e],o=i.querySelector(y);o.checked||o.hasAttribute("checked")?i.classList.add(_):i.classList.remove(_)}for(var a=0,s=(t=[].slice.call(document.querySelectorAll('[data-toggle="button"]'))).length;a<s;a++){var l=t[a];"true"===l.getAttribute("aria-pressed")?l.classList.add(_):l.classList.remove(_)}})),o.default.fn.button=E._jQueryInterface,o.default.fn.button.Constructor=E,o.default.fn.button.noConflict=function(){return o.default.fn.button=p,E._jQueryInterface};var T="carousel",w="bs.carousel",C=o.default.fn[T],S="active",N="next",D="prev",A="slid.bs.carousel",I=".active.carousel-item",k={interval:5e3,keyboard:!0,slide:!1,pause:"hover",wrap:!0,touch:!0},O={interval:"(number|boolean)",keyboard:"boolean",slide:"(boolean|string)",pause:"(string|boolean)",wrap:"boolean",touch:"boolean"},j={TOUCH:"touch",PEN:"pen"},P=function(){function t(t,e){this._items=null,this._interval=null,this._activeElement=null,this._isPaused=!1,this._isSliding=!1,this.touchTimeout=null,this.touchStartX=0,this.touchDeltaX=0,this._config=this._getConfig(e),this._element=t,this._indicatorsElement=this._element.querySelector(".carousel-indicators"),this._touchSupported="ontouchstart"in document.documentElement||navigator.maxTouchPoints>0,this._pointerEvent=Boolean(window.PointerEvent||window.MSPointerEvent),this._addEventListeners()}var e=t.prototype;return e.next=function(){this._isSliding||this._slide(N)},e.nextWhenVisible=function(){var t=o.default(this._element);!document.hidden&&t.is(":visible")&&"hidden"!==t.css("visibility")&&this.next()},e.prev=function(){this._isSliding||this._slide(D)},e.pause=function(t){t||(this._isPaused=!0),this._element.querySelector(".carousel-item-next, .carousel-item-prev")&&(d.triggerTransitionEnd(this._element),this.cycle(!0)),clearInterval(this._interval),this._interval=null},e.cycle=function(t){t||(this._isPaused=!1),this._interval&&(clearInterval(this._interval),this._interval=null),this._config.interval&&!this._isPaused&&(this._updateInterval(),this._interval=setInterval((document.visibilityState?this.nextWhenVisible:this.next).bind(this),this._config.interval))},e.to=function(t){var e=this;this._activeElement=this._element.querySelector(I);var n=this._getItemIndex(this._activeElement);if(!(t>this._items.length-1||t<0))if(this._isSliding)o.default(this._element).one(A,(function(){return e.to(t)}));else{if(n===t)return this.pause(),void this.cycle();var i=t>n?N:D;this._slide(i,this._items[t])}},e.dispose=function(){o.default(this._element).off(".bs.carousel"),o.default.removeData(this._element,w),this._items=null,this._config=null,this._element=null,this._interval=null,this._isPaused=null,this._isSliding=null,this._activeElement=null,this._indicatorsElement=null},e._getConfig=function(t){return t=r({},k,t),d.typeCheckConfig(T,t,O),t},e._handleSwipe=function(){var t=Math.abs(this.touchDeltaX);if(!(t<=40)){var e=t/this.touchDeltaX;this.touchDeltaX=0,e>0&&this.prev(),e<0&&this.next()}},e._addEventListeners=function(){var t=this;this._config.keyboard&&o.default(this._element).on("keydown.bs.carousel",(function(e){return t._keydown(e)})),"hover"===this._config.pause&&o.default(this._element).on("mouseenter.bs.carousel",(function(e){return t.pause(e)})).on("mouseleave.bs.carousel",(function(e){return t.cycle(e)})),this._config.touch&&this._addTouchEventListeners()},e._addTouchEventListeners=function(){var t=this;if(this._touchSupported){var e=function(e){t._pointerEvent&&j[e.originalEvent.pointerType.toUpperCase()]?t.touchStartX=e.originalEvent.clientX:t._pointerEvent||(t.touchStartX=e.originalEvent.touches[0].clientX)},n=function(e){t._pointerEvent&&j[e.originalEvent.pointerType.toUpperCase()]&&(t.touchDeltaX=e.originalEvent.clientX-t.touchStartX),t._handleSwipe(),"hover"===t._config.pause&&(t.pause(),t.touchTimeout&&clearTimeout(t.touchTimeout),t.touchTimeout=setTimeout((function(e){return t.cycle(e)}),500+t._config.interval))};o.default(this._element.querySelectorAll(".carousel-item img")).on("dragstart.bs.carousel",(function(t){return t.preventDefault()})),this._pointerEvent?(o.default(this._element).on("pointerdown.bs.carousel",(function(t){return e(t)})),o.default(this._element).on("pointerup.bs.carousel",(function(t){return n(t)})),this._element.classList.add("pointer-event")):(o.default(this._element).on("touchstart.bs.carousel",(function(t){return e(t)})),o.default(this._element).on("touchmove.bs.carousel",(function(e){return function(e){t.touchDeltaX=e.originalEvent.touches&&e.originalEvent.touches.length>1?0:e.originalEvent.touches[0].clientX-t.touchStartX}(e)})),o.default(this._element).on("touchend.bs.carousel",(function(t){return n(t)})))}},e._keydown=function(t){if(!/input|textarea/i.test(t.target.tagName))switch(t.which){case 37:t.preventDefault(),this.prev();break;case 39:t.preventDefault(),this.next()}},e._getItemIndex=function(t){return this._items=t&&t.parentNode?[].slice.call(t.parentNode.querySelectorAll(".carousel-item")):[],this._items.indexOf(t)},e._getItemByDirection=function(t,e){var n=t===N,i=t===D,o=this._getItemIndex(e),a=this._items.length-1;if((i&&0===o||n&&o===a)&&!this._config.wrap)return e;var s=(o+(t===D?-1:1))%this._items.length;return-1===s?this._items[this._items.length-1]:this._items[s]},e._triggerSlideEvent=function(t,e){var n=this._getItemIndex(t),i=this._getItemIndex(this._element.querySelector(I)),a=o.default.Event("slide.bs.carousel",{relatedTarget:t,direction:e,from:i,to:n});return o.default(this._element).trigger(a),a},e._setActiveIndicatorElement=function(t){if(this._indicatorsElement){var e=[].slice.call(this._indicatorsElement.querySelectorAll(".active"));o.default(e).removeClass(S);var n=this._indicatorsElement.children[this._getItemIndex(t)];n&&o.default(n).addClass(S)}},e._updateInterval=function(){var t=this._activeElement||this._element.querySelector(I);if(t){var e=parseInt(t.getAttribute("data-interval"),10);e?(this._config.defaultInterval=this._config.defaultInterval||this._config.interval,this._config.interval=e):this._config.interval=this._config.defaultInterval||this._config.interval}},e._slide=function(t,e){var n,i,a,s=this,l=this._element.querySelector(I),r=this._getItemIndex(l),u=e||l&&this._getItemByDirection(t,l),f=this._getItemIndex(u),c=Boolean(this._interval);if(t===N?(n="carousel-item-left",i="carousel-item-next",a="left"):(n="carousel-item-right",i="carousel-item-prev",a="right"),u&&o.default(u).hasClass(S))this._isSliding=!1;else if(!this._triggerSlideEvent(u,a).isDefaultPrevented()&&l&&u){this._isSliding=!0,c&&this.pause(),this._setActiveIndicatorElement(u),this._activeElement=u;var h=o.default.Event(A,{relatedTarget:u,direction:a,from:r,to:f});if(o.default(this._element).hasClass("slide")){o.default(u).addClass(i),d.reflow(u),o.default(l).addClass(n),o.default(u).addClass(n);var g=d.getTransitionDurationFromElement(l);o.default(l).one(d.TRANSITION_END,(function(){o.default(u).removeClass(n+" "+i).addClass(S),o.default(l).removeClass("active "+i+" "+n),s._isSliding=!1,setTimeout((function(){return o.default(s._element).trigger(h)}),0)})).emulateTransitionEnd(g)}else o.default(l).removeClass(S),o.default(u).addClass(S),this._isSliding=!1,o.default(this._element).trigger(h);c&&this.cycle()}},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this).data(w),i=r({},k,o.default(this).data());"object"==typeof e&&(i=r({},i,e));var a="string"==typeof e?e:i.slide;if(n||(n=new t(this,i),o.default(this).data(w,n)),"number"==typeof e)n.to(e);else if("string"==typeof a){if("undefined"==typeof n[a])throw new TypeError('No method named "'+a+'"');n[a]()}else i.interval&&i.ride&&(n.pause(),n.cycle())}))},t._dataApiClickHandler=function(e){var n=d.getSelectorFromElement(this);if(n){var i=o.default(n)[0];if(i&&o.default(i).hasClass("carousel")){var a=r({},o.default(i).data(),o.default(this).data()),s=this.getAttribute("data-slide-to");s&&(a.interval=!1),t._jQueryInterface.call(o.default(i),a),s&&o.default(i).data(w).to(s),e.preventDefault()}}},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return k}}]),t}();o.default(document).on("click.bs.carousel.data-api","[data-slide], [data-slide-to]",P._dataApiClickHandler),o.default(window).on("load.bs.carousel.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-ride="carousel"]')),e=0,n=t.length;e<n;e++){var i=o.default(t[e]);P._jQueryInterface.call(i,i.data())}})),o.default.fn[T]=P._jQueryInterface,o.default.fn[T].Constructor=P,o.default.fn[T].noConflict=function(){return o.default.fn[T]=C,P._jQueryInterface};var L="collapse",R="bs.collapse",x=o.default.fn[L],q="show",F="collapse",Q="collapsing",B="collapsed",H="width",U='[data-toggle="collapse"]',M={toggle:!0,parent:""},W={toggle:"boolean",parent:"(string|element)"},V=function(){function t(t,e){this._isTransitioning=!1,this._element=t,this._config=this._getConfig(e),this._triggerArray=[].slice.call(document.querySelectorAll('[data-toggle="collapse"][href="#'+t.id+'"],[data-toggle="collapse"][data-target="#'+t.id+'"]'));for(var n=[].slice.call(document.querySelectorAll(U)),i=0,o=n.length;i<o;i++){var a=n[i],s=d.getSelectorFromElement(a),l=[].slice.call(document.querySelectorAll(s)).filter((function(e){return e===t}));null!==s&&l.length>0&&(this._selector=s,this._triggerArray.push(a))}this._parent=this._config.parent?this._getParent():null,this._config.parent||this._addAriaAndCollapsedClass(this._element,this._triggerArray),this._config.toggle&&this.toggle()}var e=t.prototype;return e.toggle=function(){o.default(this._element).hasClass(q)?this.hide():this.show()},e.show=function(){var e,n,i=this;if(!(this._isTransitioning||o.default(this._element).hasClass(q)||(this._parent&&0===(e=[].slice.call(this._parent.querySelectorAll(".show, .collapsing")).filter((function(t){return"string"==typeof i._config.parent?t.getAttribute("data-parent")===i._config.parent:t.classList.contains(F)}))).length&&(e=null),e&&(n=o.default(e).not(this._selector).data(R))&&n._isTransitioning))){var a=o.default.Event("show.bs.collapse");if(o.default(this._element).trigger(a),!a.isDefaultPrevented()){e&&(t._jQueryInterface.call(o.default(e).not(this._selector),"hide"),n||o.default(e).data(R,null));var s=this._getDimension();o.default(this._element).removeClass(F).addClass(Q),this._element.style[s]=0,this._triggerArray.length&&o.default(this._triggerArray).removeClass(B).attr("aria-expanded",!0),this.setTransitioning(!0);var l="scroll"+(s[0].toUpperCase()+s.slice(1)),r=d.getTransitionDurationFromElement(this._element);o.default(this._element).one(d.TRANSITION_END,(function(){o.default(i._element).removeClass(Q).addClass("collapse show"),i._element.style[s]="",i.setTransitioning(!1),o.default(i._element).trigger("shown.bs.collapse")})).emulateTransitionEnd(r),this._element.style[s]=this._element[l]+"px"}}},e.hide=function(){var t=this;if(!this._isTransitioning&&o.default(this._element).hasClass(q)){var e=o.default.Event("hide.bs.collapse");if(o.default(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._getDimension();this._element.style[n]=this._element.getBoundingClientRect()[n]+"px",d.reflow(this._element),o.default(this._element).addClass(Q).removeClass("collapse show");var i=this._triggerArray.length;if(i>0)for(var a=0;a<i;a++){var s=this._triggerArray[a],l=d.getSelectorFromElement(s);null!==l&&(o.default([].slice.call(document.querySelectorAll(l))).hasClass(q)||o.default(s).addClass(B).attr("aria-expanded",!1))}this.setTransitioning(!0),this._element.style[n]="";var r=d.getTransitionDurationFromElement(this._element);o.default(this._element).one(d.TRANSITION_END,(function(){t.setTransitioning(!1),o.default(t._element).removeClass(Q).addClass(F).trigger("hidden.bs.collapse")})).emulateTransitionEnd(r)}}},e.setTransitioning=function(t){this._isTransitioning=t},e.dispose=function(){o.default.removeData(this._element,R),this._config=null,this._parent=null,this._element=null,this._triggerArray=null,this._isTransitioning=null},e._getConfig=function(t){return(t=r({},M,t)).toggle=Boolean(t.toggle),d.typeCheckConfig(L,t,W),t},e._getDimension=function(){return o.default(this._element).hasClass(H)?H:"height"},e._getParent=function(){var e,n=this;d.isElement(this._config.parent)?(e=this._config.parent,"undefined"!=typeof this._config.parent.jquery&&(e=this._config.parent[0])):e=document.querySelector(this._config.parent);var i='[data-toggle="collapse"][data-parent="'+this._config.parent+'"]',a=[].slice.call(e.querySelectorAll(i));return o.default(a).each((function(e,i){n._addAriaAndCollapsedClass(t._getTargetFromElement(i),[i])})),e},e._addAriaAndCollapsedClass=function(t,e){var n=o.default(t).hasClass(q);e.length&&o.default(e).toggleClass(B,!n).attr("aria-expanded",n)},t._getTargetFromElement=function(t){var e=d.getSelectorFromElement(t);return e?document.querySelector(e):null},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this),i=n.data(R),a=r({},M,n.data(),"object"==typeof e&&e?e:{});if(!i&&a.toggle&&"string"==typeof e&&/show|hide/.test(e)&&(a.toggle=!1),i||(i=new t(this,a),n.data(R,i)),"string"==typeof e){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}}))},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return M}}]),t}();o.default(document).on("click.bs.collapse.data-api",U,(function(t){"A"===t.currentTarget.tagName&&t.preventDefault();var e=o.default(this),n=d.getSelectorFromElement(this),i=[].slice.call(document.querySelectorAll(n));o.default(i).each((function(){var t=o.default(this),n=t.data(R)?"toggle":e.data();V._jQueryInterface.call(t,n)}))})),o.default.fn[L]=V._jQueryInterface,o.default.fn[L].Constructor=V,o.default.fn[L].noConflict=function(){return o.default.fn[L]=x,V._jQueryInterface};var z="dropdown",K="bs.dropdown",X=o.default.fn[z],Y=new RegExp("38|40|27"),$="disabled",J="show",G="dropdown-menu-right",Z="hide.bs.dropdown",tt="hidden.bs.dropdown",et="click.bs.dropdown.data-api",nt="keydown.bs.dropdown.data-api",it='[data-toggle="dropdown"]',ot=".dropdown-menu",at={offset:0,flip:!0,boundary:"scrollParent",reference:"toggle",display:"dynamic",popperConfig:null},st={offset:"(number|string|function)",flip:"boolean",boundary:"(string|element)",reference:"(string|element)",display:"string",popperConfig:"(null|object)"},lt=function(){function t(t,e){this._element=t,this._popper=null,this._config=this._getConfig(e),this._menu=this._getMenuElement(),this._inNavbar=this._detectNavbar(),this._addEventListeners()}var e=t.prototype;return e.toggle=function(){if(!this._element.disabled&&!o.default(this._element).hasClass($)){var e=o.default(this._menu).hasClass(J);t._clearMenus(),e||this.show(!0)}},e.show=function(e){if(void 0===e&&(e=!1),!(this._element.disabled||o.default(this._element).hasClass($)||o.default(this._menu).hasClass(J))){var n={relatedTarget:this._element},i=o.default.Event("show.bs.dropdown",n),s=t._getParentFromElement(this._element);if(o.default(s).trigger(i),!i.isDefaultPrevented()){if(!this._inNavbar&&e){if("undefined"==typeof a.default)throw new TypeError("Bootstrap's dropdowns require Popper (https://popper.js.org)");var l=this._element;"parent"===this._config.reference?l=s:d.isElement(this._config.reference)&&(l=this._config.reference,"undefined"!=typeof this._config.reference.jquery&&(l=this._config.reference[0])),"scrollParent"!==this._config.boundary&&o.default(s).addClass("position-static"),this._popper=new a.default(l,this._menu,this._getPopperConfig())}"ontouchstart"in document.documentElement&&0===o.default(s).closest(".navbar-nav").length&&o.default(document.body).children().on("mouseover",null,o.default.noop),this._element.focus(),this._element.setAttribute("aria-expanded",!0),o.default(this._menu).toggleClass(J),o.default(s).toggleClass(J).trigger(o.default.Event("shown.bs.dropdown",n))}}},e.hide=function(){if(!this._element.disabled&&!o.default(this._element).hasClass($)&&o.default(this._menu).hasClass(J)){var e={relatedTarget:this._element},n=o.default.Event(Z,e),i=t._getParentFromElement(this._element);o.default(i).trigger(n),n.isDefaultPrevented()||(this._popper&&this._popper.destroy(),o.default(this._menu).toggleClass(J),o.default(i).toggleClass(J).trigger(o.default.Event(tt,e)))}},e.dispose=function(){o.default.removeData(this._element,K),o.default(this._element).off(".bs.dropdown"),this._element=null,this._menu=null,null!==this._popper&&(this._popper.destroy(),this._popper=null)},e.update=function(){this._inNavbar=this._detectNavbar(),null!==this._popper&&this._popper.scheduleUpdate()},e._addEventListeners=function(){var t=this;o.default(this._element).on("click.bs.dropdown",(function(e){e.preventDefault(),e.stopPropagation(),t.toggle()}))},e._getConfig=function(t){return t=r({},this.constructor.Default,o.default(this._element).data(),t),d.typeCheckConfig(z,t,this.constructor.DefaultType),t},e._getMenuElement=function(){if(!this._menu){var e=t._getParentFromElement(this._element);e&&(this._menu=e.querySelector(ot))}return this._menu},e._getPlacement=function(){var t=o.default(this._element.parentNode),e="bottom-start";return t.hasClass("dropup")?e=o.default(this._menu).hasClass(G)?"top-end":"top-start":t.hasClass("dropright")?e="right-start":t.hasClass("dropleft")?e="left-start":o.default(this._menu).hasClass(G)&&(e="bottom-end"),e},e._detectNavbar=function(){return o.default(this._element).closest(".navbar").length>0},e._getOffset=function(){var t=this,e={};return"function"==typeof this._config.offset?e.fn=function(e){return e.offsets=r({},e.offsets,t._config.offset(e.offsets,t._element)),e}:e.offset=this._config.offset,e},e._getPopperConfig=function(){var t={placement:this._getPlacement(),modifiers:{offset:this._getOffset(),flip:{enabled:this._config.flip},preventOverflow:{boundariesElement:this._config.boundary}}};return"static"===this._config.display&&(t.modifiers.applyStyle={enabled:!1}),r({},t,this._config.popperConfig)},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this).data(K);if(n||(n=new t(this,"object"==typeof e?e:null),o.default(this).data(K,n)),"string"==typeof e){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}}))},t._clearMenus=function(e){if(!e||3!==e.which&&("keyup"!==e.type||9===e.which))for(var n=[].slice.call(document.querySelectorAll(it)),i=0,a=n.length;i<a;i++){var s=t._getParentFromElement(n[i]),l=o.default(n[i]).data(K),r={relatedTarget:n[i]};if(e&&"click"===e.type&&(r.clickEvent=e),l){var u=l._menu;if(o.default(s).hasClass(J)&&!(e&&("click"===e.type&&/input|textarea/i.test(e.target.tagName)||"keyup"===e.type&&9===e.which)&&o.default.contains(s,e.target))){var f=o.default.Event(Z,r);o.default(s).trigger(f),f.isDefaultPrevented()||("ontouchstart"in document.documentElement&&o.default(document.body).children().off("mouseover",null,o.default.noop),n[i].setAttribute("aria-expanded","false"),l._popper&&l._popper.destroy(),o.default(u).removeClass(J),o.default(s).removeClass(J).trigger(o.default.Event(tt,r)))}}}},t._getParentFromElement=function(t){var e,n=d.getSelectorFromElement(t);return n&&(e=document.querySelector(n)),e||t.parentNode},t._dataApiKeydownHandler=function(e){if(!(/input|textarea/i.test(e.target.tagName)?32===e.which||27!==e.which&&(40!==e.which&&38!==e.which||o.default(e.target).closest(ot).length):!Y.test(e.which))&&!this.disabled&&!o.default(this).hasClass($)){var n=t._getParentFromElement(this),i=o.default(n).hasClass(J);if(i||27!==e.which){if(e.preventDefault(),e.stopPropagation(),!i||27===e.which||32===e.which)return 27===e.which&&o.default(n.querySelector(it)).trigger("focus"),void o.default(this).trigger("click");var a=[].slice.call(n.querySelectorAll(".dropdown-menu .dropdown-item:not(.disabled):not(:disabled)")).filter((function(t){return o.default(t).is(":visible")}));if(0!==a.length){var s=a.indexOf(e.target);38===e.which&&s>0&&s--,40===e.which&&s<a.length-1&&s++,s<0&&(s=0),a[s].focus()}}}},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return at}},{key:"DefaultType",get:function(){return st}}]),t}();o.default(document).on(nt,it,lt._dataApiKeydownHandler).on(nt,ot,lt._dataApiKeydownHandler).on(et+" keyup.bs.dropdown.data-api",lt._clearMenus).on(et,it,(function(t){t.preventDefault(),t.stopPropagation(),lt._jQueryInterface.call(o.default(this),"toggle")})).on(et,".dropdown form",(function(t){t.stopPropagation()})),o.default.fn[z]=lt._jQueryInterface,o.default.fn[z].Constructor=lt,o.default.fn[z].noConflict=function(){return o.default.fn[z]=X,lt._jQueryInterface};var rt="bs.modal",ut=o.default.fn.modal,ft="modal-open",dt="fade",ct="show",ht="modal-static",gt="hidden.bs.modal",mt="show.bs.modal",pt="focusin.bs.modal",_t="resize.bs.modal",vt="click.dismiss.bs.modal",yt="keydown.dismiss.bs.modal",bt="mousedown.dismiss.bs.modal",Et=".fixed-top, .fixed-bottom, .is-fixed, .sticky-top",Tt={backdrop:!0,keyboard:!0,focus:!0,show:!0},wt={backdrop:"(boolean|string)",keyboard:"boolean",focus:"boolean",show:"boolean"},Ct=function(){function t(t,e){this._config=this._getConfig(e),this._element=t,this._dialog=t.querySelector(".modal-dialog"),this._backdrop=null,this._isShown=!1,this._isBodyOverflowing=!1,this._ignoreBackdropClick=!1,this._isTransitioning=!1,this._scrollbarWidth=0}var e=t.prototype;return e.toggle=function(t){return this._isShown?this.hide():this.show(t)},e.show=function(t){var e=this;if(!this._isShown&&!this._isTransitioning){var n=o.default.Event(mt,{relatedTarget:t});o.default(this._element).trigger(n),n.isDefaultPrevented()||(this._isShown=!0,o.default(this._element).hasClass(dt)&&(this._isTransitioning=!0),this._checkScrollbar(),this._setScrollbar(),this._adjustDialog(),this._setEscapeEvent(),this._setResizeEvent(),o.default(this._element).on(vt,'[data-dismiss="modal"]',(function(t){return e.hide(t)})),o.default(this._dialog).on(bt,(function(){o.default(e._element).one("mouseup.dismiss.bs.modal",(function(t){o.default(t.target).is(e._element)&&(e._ignoreBackdropClick=!0)}))})),this._showBackdrop((function(){return e._showElement(t)})))}},e.hide=function(t){var e=this;if(t&&t.preventDefault(),this._isShown&&!this._isTransitioning){var n=o.default.Event("hide.bs.modal");if(o.default(this._element).trigger(n),this._isShown&&!n.isDefaultPrevented()){this._isShown=!1;var i=o.default(this._element).hasClass(dt);if(i&&(this._isTransitioning=!0),this._setEscapeEvent(),this._setResizeEvent(),o.default(document).off(pt),o.default(this._element).removeClass(ct),o.default(this._element).off(vt),o.default(this._dialog).off(bt),i){var a=d.getTransitionDurationFromElement(this._element);o.default(this._element).one(d.TRANSITION_END,(function(t){return e._hideModal(t)})).emulateTransitionEnd(a)}else this._hideModal()}}},e.dispose=function(){[window,this._element,this._dialog].forEach((function(t){return o.default(t).off(".bs.modal")})),o.default(document).off(pt),o.default.removeData(this._element,rt),this._config=null,this._element=null,this._dialog=null,this._backdrop=null,this._isShown=null,this._isBodyOverflowing=null,this._ignoreBackdropClick=null,this._isTransitioning=null,this._scrollbarWidth=null},e.handleUpdate=function(){this._adjustDialog()},e._getConfig=function(t){return t=r({},Tt,t),d.typeCheckConfig("modal",t,wt),t},e._triggerBackdropTransition=function(){var t=this,e=o.default.Event("hidePrevented.bs.modal");if(o.default(this._element).trigger(e),!e.isDefaultPrevented()){var n=this._element.scrollHeight>document.documentElement.clientHeight;n||(this._element.style.overflowY="hidden"),this._element.classList.add(ht);var i=d.getTransitionDurationFromElement(this._dialog);o.default(this._element).off(d.TRANSITION_END),o.default(this._element).one(d.TRANSITION_END,(function(){t._element.classList.remove(ht),n||o.default(t._element).one(d.TRANSITION_END,(function(){t._element.style.overflowY=""})).emulateTransitionEnd(t._element,i)})).emulateTransitionEnd(i),this._element.focus()}},e._showElement=function(t){var e=this,n=o.default(this._element).hasClass(dt),i=this._dialog?this._dialog.querySelector(".modal-body"):null;this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE||document.body.appendChild(this._element),this._element.style.display="block",this._element.removeAttribute("aria-hidden"),this._element.setAttribute("aria-modal",!0),this._element.setAttribute("role","dialog"),o.default(this._dialog).hasClass("modal-dialog-scrollable")&&i?i.scrollTop=0:this._element.scrollTop=0,n&&d.reflow(this._element),o.default(this._element).addClass(ct),this._config.focus&&this._enforceFocus();var a=o.default.Event("shown.bs.modal",{relatedTarget:t}),s=function(){e._config.focus&&e._element.focus(),e._isTransitioning=!1,o.default(e._element).trigger(a)};if(n){var l=d.getTransitionDurationFromElement(this._dialog);o.default(this._dialog).one(d.TRANSITION_END,s).emulateTransitionEnd(l)}else s()},e._enforceFocus=function(){var t=this;o.default(document).off(pt).on(pt,(function(e){document!==e.target&&t._element!==e.target&&0===o.default(t._element).has(e.target).length&&t._element.focus()}))},e._setEscapeEvent=function(){var t=this;this._isShown?o.default(this._element).on(yt,(function(e){t._config.keyboard&&27===e.which?(e.preventDefault(),t.hide()):t._config.keyboard||27!==e.which||t._triggerBackdropTransition()})):this._isShown||o.default(this._element).off(yt)},e._setResizeEvent=function(){var t=this;this._isShown?o.default(window).on(_t,(function(e){return t.handleUpdate(e)})):o.default(window).off(_t)},e._hideModal=function(){var t=this;this._element.style.display="none",this._element.setAttribute("aria-hidden",!0),this._element.removeAttribute("aria-modal"),this._element.removeAttribute("role"),this._isTransitioning=!1,this._showBackdrop((function(){o.default(document.body).removeClass(ft),t._resetAdjustments(),t._resetScrollbar(),o.default(t._element).trigger(gt)}))},e._removeBackdrop=function(){this._backdrop&&(o.default(this._backdrop).remove(),this._backdrop=null)},e._showBackdrop=function(t){var e=this,n=o.default(this._element).hasClass(dt)?dt:"";if(this._isShown&&this._config.backdrop){if(this._backdrop=document.createElement("div"),this._backdrop.className="modal-backdrop",n&&this._backdrop.classList.add(n),o.default(this._backdrop).appendTo(document.body),o.default(this._element).on(vt,(function(t){e._ignoreBackdropClick?e._ignoreBackdropClick=!1:t.target===t.currentTarget&&("static"===e._config.backdrop?e._triggerBackdropTransition():e.hide())})),n&&d.reflow(this._backdrop),o.default(this._backdrop).addClass(ct),!t)return;if(!n)return void t();var i=d.getTransitionDurationFromElement(this._backdrop);o.default(this._backdrop).one(d.TRANSITION_END,t).emulateTransitionEnd(i)}else if(!this._isShown&&this._backdrop){o.default(this._backdrop).removeClass(ct);var a=function(){e._removeBackdrop(),t&&t()};if(o.default(this._element).hasClass(dt)){var s=d.getTransitionDurationFromElement(this._backdrop);o.default(this._backdrop).one(d.TRANSITION_END,a).emulateTransitionEnd(s)}else a()}else t&&t()},e._adjustDialog=function(){var t=this._element.scrollHeight>document.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},e._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},e._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=Math.round(t.left+t.right)<window.innerWidth,this._scrollbarWidth=this._getScrollbarWidth()},e._setScrollbar=function(){var t=this;if(this._isBodyOverflowing){var e=[].slice.call(document.querySelectorAll(Et)),n=[].slice.call(document.querySelectorAll(".sticky-top"));o.default(e).each((function(e,n){var i=n.style.paddingRight,a=o.default(n).css("padding-right");o.default(n).data("padding-right",i).css("padding-right",parseFloat(a)+t._scrollbarWidth+"px")})),o.default(n).each((function(e,n){var i=n.style.marginRight,a=o.default(n).css("margin-right");o.default(n).data("margin-right",i).css("margin-right",parseFloat(a)-t._scrollbarWidth+"px")}));var i=document.body.style.paddingRight,a=o.default(document.body).css("padding-right");o.default(document.body).data("padding-right",i).css("padding-right",parseFloat(a)+this._scrollbarWidth+"px")}o.default(document.body).addClass(ft)},e._resetScrollbar=function(){var t=[].slice.call(document.querySelectorAll(Et));o.default(t).each((function(t,e){var n=o.default(e).data("padding-right");o.default(e).removeData("padding-right"),e.style.paddingRight=n||""}));var e=[].slice.call(document.querySelectorAll(".sticky-top"));o.default(e).each((function(t,e){var n=o.default(e).data("margin-right");"undefined"!=typeof n&&o.default(e).css("margin-right",n).removeData("margin-right")}));var n=o.default(document.body).data("padding-right");o.default(document.body).removeData("padding-right"),document.body.style.paddingRight=n||""},e._getScrollbarWidth=function(){var t=document.createElement("div");t.className="modal-scrollbar-measure",document.body.appendChild(t);var e=t.getBoundingClientRect().width-t.clientWidth;return document.body.removeChild(t),e},t._jQueryInterface=function(e,n){return this.each((function(){var i=o.default(this).data(rt),a=r({},Tt,o.default(this).data(),"object"==typeof e&&e?e:{});if(i||(i=new t(this,a),o.default(this).data(rt,i)),"string"==typeof e){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e](n)}else a.show&&i.show(n)}))},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Tt}}]),t}();o.default(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',(function(t){var e,n=this,i=d.getSelectorFromElement(this);i&&(e=document.querySelector(i));var a=o.default(e).data(rt)?"toggle":r({},o.default(e).data(),o.default(this).data());"A"!==this.tagName&&"AREA"!==this.tagName||t.preventDefault();var s=o.default(e).one(mt,(function(t){t.isDefaultPrevented()||s.one(gt,(function(){o.default(n).is(":visible")&&n.focus()}))}));Ct._jQueryInterface.call(o.default(e),a,this)})),o.default.fn.modal=Ct._jQueryInterface,o.default.fn.modal.Constructor=Ct,o.default.fn.modal.noConflict=function(){return o.default.fn.modal=ut,Ct._jQueryInterface};var St=["background","cite","href","itemtype","longdesc","poster","src","xlink:href"],Nt=/^(?:(?:https?|mailto|ftp|tel|file|sms):|[^#&/:?]*(?:[#/?]|$))/i,Dt=/^data:(?:image\/(?:bmp|gif|jpeg|jpg|png|tiff|webp)|video\/(?:mpeg|mp4|ogg|webm)|audio\/(?:mp3|oga|ogg|opus));base64,[\d+/a-z]+=*$/i;function At(t,e,n){if(0===t.length)return t;if(n&&"function"==typeof n)return n(t);for(var i=(new window.DOMParser).parseFromString(t,"text/html"),o=Object.keys(e),a=[].slice.call(i.body.querySelectorAll("*")),s=function(t,n){var i=a[t],s=i.nodeName.toLowerCase();if(-1===o.indexOf(i.nodeName.toLowerCase()))return i.parentNode.removeChild(i),"continue";var l=[].slice.call(i.attributes),r=[].concat(e["*"]||[],e[s]||[]);l.forEach((function(t){(function(t,e){var n=t.nodeName.toLowerCase();if(-1!==e.indexOf(n))return-1===St.indexOf(n)||Boolean(Nt.test(t.nodeValue)||Dt.test(t.nodeValue));for(var i=e.filter((function(t){return t instanceof RegExp})),o=0,a=i.length;o<a;o++)if(i[o].test(n))return!0;return!1})(t,r)||i.removeAttribute(t.nodeName)}))},l=0,r=a.length;l<r;l++)s(l);return i.body.innerHTML}var It="tooltip",kt="bs.tooltip",Ot=o.default.fn.tooltip,jt=new RegExp("(^|\\s)bs-tooltip\\S+","g"),Pt=["sanitize","whiteList","sanitizeFn"],Lt="fade",Rt="show",xt="show",qt="out",Ft="hover",Qt="focus",Bt={AUTO:"auto",TOP:"top",RIGHT:"right",BOTTOM:"bottom",LEFT:"left"},Ht={animation:!0,template:'<div class="tooltip" role="tooltip"><div class="arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",customClass:"",sanitize:!0,sanitizeFn:null,whiteList:{"*":["class","dir","id","lang","role",/^aria-[\w-]*$/i],a:["target","href","title","rel"],area:[],b:[],br:[],col:[],code:[],div:[],em:[],hr:[],h1:[],h2:[],h3:[],h4:[],h5:[],h6:[],i:[],img:["src","srcset","alt","title","width","height"],li:[],ol:[],p:[],pre:[],s:[],small:[],span:[],sub:[],sup:[],strong:[],u:[],ul:[]},popperConfig:null},Ut={animation:"boolean",template:"string",title:"(string|element|function)",trigger:"string",delay:"(number|object)",html:"boolean",selector:"(string|boolean)",placement:"(string|function)",offset:"(number|string|function)",container:"(string|element|boolean)",fallbackPlacement:"(string|array)",boundary:"(string|element)",customClass:"(string|function)",sanitize:"boolean",sanitizeFn:"(null|function)",whiteList:"object",popperConfig:"(null|object)"},Mt={HIDE:"hide.bs.tooltip",HIDDEN:"hidden.bs.tooltip",SHOW:"show.bs.tooltip",SHOWN:"shown.bs.tooltip",INSERTED:"inserted.bs.tooltip",CLICK:"click.bs.tooltip",FOCUSIN:"focusin.bs.tooltip",FOCUSOUT:"focusout.bs.tooltip",MOUSEENTER:"mouseenter.bs.tooltip",MOUSELEAVE:"mouseleave.bs.tooltip"},Wt=function(){function t(t,e){if("undefined"==typeof a.default)throw new TypeError("Bootstrap's tooltips require Popper (https://popper.js.org)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var e=t.prototype;return e.enable=function(){this._isEnabled=!0},e.disable=function(){this._isEnabled=!1},e.toggleEnabled=function(){this._isEnabled=!this._isEnabled},e.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=o.default(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),o.default(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(o.default(this.getTipElement()).hasClass(Rt))return void this._leave(null,this);this._enter(null,this)}},e.dispose=function(){clearTimeout(this._timeout),o.default.removeData(this.element,this.constructor.DATA_KEY),o.default(this.element).off(this.constructor.EVENT_KEY),o.default(this.element).closest(".modal").off("hide.bs.modal",this._hideModalHandler),this.tip&&o.default(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,this._activeTrigger=null,this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},e.show=function(){var t=this;if("none"===o.default(this.element).css("display"))throw new Error("Please use show on visible elements");var e=o.default.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){o.default(this.element).trigger(e);var n=d.findShadowRoot(this.element),i=o.default.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(e.isDefaultPrevented()||!i)return;var s=this.getTipElement(),l=d.getUID(this.constructor.NAME);s.setAttribute("id",l),this.element.setAttribute("aria-describedby",l),this.setContent(),this.config.animation&&o.default(s).addClass(Lt);var r="function"==typeof this.config.placement?this.config.placement.call(this,s,this.element):this.config.placement,u=this._getAttachment(r);this.addAttachmentClass(u);var f=this._getContainer();o.default(s).data(this.constructor.DATA_KEY,this),o.default.contains(this.element.ownerDocument.documentElement,this.tip)||o.default(s).appendTo(f),o.default(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new a.default(this.element,s,this._getPopperConfig(u)),o.default(s).addClass(Rt),o.default(s).addClass(this.config.customClass),"ontouchstart"in document.documentElement&&o.default(document.body).children().on("mouseover",null,o.default.noop);var c=function(){t.config.animation&&t._fixTransition();var e=t._hoverState;t._hoverState=null,o.default(t.element).trigger(t.constructor.Event.SHOWN),e===qt&&t._leave(null,t)};if(o.default(this.tip).hasClass(Lt)){var h=d.getTransitionDurationFromElement(this.tip);o.default(this.tip).one(d.TRANSITION_END,c).emulateTransitionEnd(h)}else c()}},e.hide=function(t){var e=this,n=this.getTipElement(),i=o.default.Event(this.constructor.Event.HIDE),a=function(){e._hoverState!==xt&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),o.default(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(o.default(this.element).trigger(i),!i.isDefaultPrevented()){if(o.default(n).removeClass(Rt),"ontouchstart"in document.documentElement&&o.default(document.body).children().off("mouseover",null,o.default.noop),this._activeTrigger.click=!1,this._activeTrigger.focus=!1,this._activeTrigger.hover=!1,o.default(this.tip).hasClass(Lt)){var s=d.getTransitionDurationFromElement(n);o.default(n).one(d.TRANSITION_END,a).emulateTransitionEnd(s)}else a();this._hoverState=""}},e.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},e.isWithContent=function(){return Boolean(this.getTitle())},e.addAttachmentClass=function(t){o.default(this.getTipElement()).addClass("bs-tooltip-"+t)},e.getTipElement=function(){return this.tip=this.tip||o.default(this.config.template)[0],this.tip},e.setContent=function(){var t=this.getTipElement();this.setElementContent(o.default(t.querySelectorAll(".tooltip-inner")),this.getTitle()),o.default(t).removeClass("fade show")},e.setElementContent=function(t,e){"object"!=typeof e||!e.nodeType&&!e.jquery?this.config.html?(this.config.sanitize&&(e=At(e,this.config.whiteList,this.config.sanitizeFn)),t.html(e)):t.text(e):this.config.html?o.default(e).parent().is(t)||t.empty().append(e):t.text(o.default(e).text())},e.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},e._getPopperConfig=function(t){var e=this;return r({},{placement:t,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:".arrow"},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){return e._handlePopperPlacementChange(t)}},this.config.popperConfig)},e._getOffset=function(){var t=this,e={};return"function"==typeof this.config.offset?e.fn=function(e){return e.offsets=r({},e.offsets,t.config.offset(e.offsets,t.element)),e}:e.offset=this.config.offset,e},e._getContainer=function(){return!1===this.config.container?document.body:d.isElement(this.config.container)?o.default(this.config.container):o.default(document).find(this.config.container)},e._getAttachment=function(t){return Bt[t.toUpperCase()]},e._setListeners=function(){var t=this;this.config.trigger.split(" ").forEach((function(e){if("click"===e)o.default(t.element).on(t.constructor.Event.CLICK,t.config.selector,(function(e){return t.toggle(e)}));else if("manual"!==e){var n=e===Ft?t.constructor.Event.MOUSEENTER:t.constructor.Event.FOCUSIN,i=e===Ft?t.constructor.Event.MOUSELEAVE:t.constructor.Event.FOCUSOUT;o.default(t.element).on(n,t.config.selector,(function(e){return t._enter(e)})).on(i,t.config.selector,(function(e){return t._leave(e)}))}})),this._hideModalHandler=function(){t.element&&t.hide()},o.default(this.element).closest(".modal").on("hide.bs.modal",this._hideModalHandler),this.config.selector?this.config=r({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},e._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},e._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||o.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),o.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Qt:Ft]=!0),o.default(e.getTipElement()).hasClass(Rt)||e._hoverState===xt?e._hoverState=xt:(clearTimeout(e._timeout),e._hoverState=xt,e.config.delay&&e.config.delay.show?e._timeout=setTimeout((function(){e._hoverState===xt&&e.show()}),e.config.delay.show):e.show())},e._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||o.default(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),o.default(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Qt:Ft]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=qt,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout((function(){e._hoverState===qt&&e.hide()}),e.config.delay.hide):e.hide())},e._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},e._getConfig=function(t){var e=o.default(this.element).data();return Object.keys(e).forEach((function(t){-1!==Pt.indexOf(t)&&delete e[t]})),"number"==typeof(t=r({},this.constructor.Default,e,"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),d.typeCheckConfig(It,t,this.constructor.DefaultType),t.sanitize&&(t.template=At(t.template,t.whiteList,t.sanitizeFn)),t},e._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},e._cleanTipClass=function(){var t=o.default(this.getTipElement()),e=t.attr("class").match(jt);null!==e&&e.length&&t.removeClass(e.join(""))},e._handlePopperPlacementChange=function(t){this.tip=t.instance.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},e._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(o.default(t).removeClass(Lt),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this),i=n.data(kt),a="object"==typeof e&&e;if((i||!/dispose|hide/.test(e))&&(i||(i=new t(this,a),n.data(kt,i)),"string"==typeof e)){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}}))},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Ht}},{key:"NAME",get:function(){return It}},{key:"DATA_KEY",get:function(){return kt}},{key:"Event",get:function(){return Mt}},{key:"EVENT_KEY",get:function(){return".bs.tooltip"}},{key:"DefaultType",get:function(){return Ut}}]),t}();o.default.fn.tooltip=Wt._jQueryInterface,o.default.fn.tooltip.Constructor=Wt,o.default.fn.tooltip.noConflict=function(){return o.default.fn.tooltip=Ot,Wt._jQueryInterface};var Vt="bs.popover",zt=o.default.fn.popover,Kt=new RegExp("(^|\\s)bs-popover\\S+","g"),Xt=r({},Wt.Default,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-header"></h3><div class="popover-body"></div></div>'}),Yt=r({},Wt.DefaultType,{content:"(string|element|function)"}),$t={HIDE:"hide.bs.popover",HIDDEN:"hidden.bs.popover",SHOW:"show.bs.popover",SHOWN:"shown.bs.popover",INSERTED:"inserted.bs.popover",CLICK:"click.bs.popover",FOCUSIN:"focusin.bs.popover",FOCUSOUT:"focusout.bs.popover",MOUSEENTER:"mouseenter.bs.popover",MOUSELEAVE:"mouseleave.bs.popover"},Jt=function(t){var e,n;function i(){return t.apply(this,arguments)||this}n=t,(e=i).prototype=Object.create(n.prototype),e.prototype.constructor=e,u(e,n);var a=i.prototype;return a.isWithContent=function(){return this.getTitle()||this._getContent()},a.addAttachmentClass=function(t){o.default(this.getTipElement()).addClass("bs-popover-"+t)},a.getTipElement=function(){return this.tip=this.tip||o.default(this.config.template)[0],this.tip},a.setContent=function(){var t=o.default(this.getTipElement());this.setElementContent(t.find(".popover-header"),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(".popover-body"),e),t.removeClass("fade show")},a._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},a._cleanTipClass=function(){var t=o.default(this.getTipElement()),e=t.attr("class").match(Kt);null!==e&&e.length>0&&t.removeClass(e.join(""))},i._jQueryInterface=function(t){return this.each((function(){var e=o.default(this).data(Vt),n="object"==typeof t?t:null;if((e||!/dispose|hide/.test(t))&&(e||(e=new i(this,n),o.default(this).data(Vt,e)),"string"==typeof t)){if("undefined"==typeof e[t])throw new TypeError('No method named "'+t+'"');e[t]()}}))},l(i,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return Xt}},{key:"NAME",get:function(){return"popover"}},{key:"DATA_KEY",get:function(){return Vt}},{key:"Event",get:function(){return $t}},{key:"EVENT_KEY",get:function(){return".bs.popover"}},{key:"DefaultType",get:function(){return Yt}}]),i}(Wt);o.default.fn.popover=Jt._jQueryInterface,o.default.fn.popover.Constructor=Jt,o.default.fn.popover.noConflict=function(){return o.default.fn.popover=zt,Jt._jQueryInterface};var Gt="scrollspy",Zt="bs.scrollspy",te=o.default.fn[Gt],ee="active",ne="position",ie=".nav, .list-group",oe={offset:10,method:"auto",target:""},ae={offset:"number",method:"string",target:"(string|element)"},se=function(){function t(t,e){var n=this;this._element=t,this._scrollElement="BODY"===t.tagName?window:t,this._config=this._getConfig(e),this._selector=this._config.target+" .nav-link,"+this._config.target+" .list-group-item,"+this._config.target+" .dropdown-item",this._offsets=[],this._targets=[],this._activeTarget=null,this._scrollHeight=0,o.default(this._scrollElement).on("scroll.bs.scrollspy",(function(t){return n._process(t)})),this.refresh(),this._process()}var e=t.prototype;return e.refresh=function(){var t=this,e=this._scrollElement===this._scrollElement.window?"offset":ne,n="auto"===this._config.method?e:this._config.method,i=n===ne?this._getScrollTop():0;this._offsets=[],this._targets=[],this._scrollHeight=this._getScrollHeight(),[].slice.call(document.querySelectorAll(this._selector)).map((function(t){var e,a=d.getSelectorFromElement(t);if(a&&(e=document.querySelector(a)),e){var s=e.getBoundingClientRect();if(s.width||s.height)return[o.default(e)[n]().top+i,a]}return null})).filter(Boolean).sort((function(t,e){return t[0]-e[0]})).forEach((function(e){t._offsets.push(e[0]),t._targets.push(e[1])}))},e.dispose=function(){o.default.removeData(this._element,Zt),o.default(this._scrollElement).off(".bs.scrollspy"),this._element=null,this._scrollElement=null,this._config=null,this._selector=null,this._offsets=null,this._targets=null,this._activeTarget=null,this._scrollHeight=null},e._getConfig=function(t){if("string"!=typeof(t=r({},oe,"object"==typeof t&&t?t:{})).target&&d.isElement(t.target)){var e=o.default(t.target).attr("id");e||(e=d.getUID(Gt),o.default(t.target).attr("id",e)),t.target="#"+e}return d.typeCheckConfig(Gt,t,ae),t},e._getScrollTop=function(){return this._scrollElement===window?this._scrollElement.pageYOffset:this._scrollElement.scrollTop},e._getScrollHeight=function(){return this._scrollElement.scrollHeight||Math.max(document.body.scrollHeight,document.documentElement.scrollHeight)},e._getOffsetHeight=function(){return this._scrollElement===window?window.innerHeight:this._scrollElement.getBoundingClientRect().height},e._process=function(){var t=this._getScrollTop()+this._config.offset,e=this._getScrollHeight(),n=this._config.offset+e-this._getOffsetHeight();if(this._scrollHeight!==e&&this.refresh(),t>=n){var i=this._targets[this._targets.length-1];this._activeTarget!==i&&this._activate(i)}else{if(this._activeTarget&&t<this._offsets[0]&&this._offsets[0]>0)return this._activeTarget=null,void this._clear();for(var o=this._offsets.length;o--;)this._activeTarget!==this._targets[o]&&t>=this._offsets[o]&&("undefined"==typeof this._offsets[o+1]||t<this._offsets[o+1])&&this._activate(this._targets[o])}},e._activate=function(t){this._activeTarget=t,this._clear();var e=this._selector.split(",").map((function(e){return e+'[data-target="'+t+'"],'+e+'[href="'+t+'"]'})),n=o.default([].slice.call(document.querySelectorAll(e.join(","))));n.hasClass("dropdown-item")?(n.closest(".dropdown").find(".dropdown-toggle").addClass(ee),n.addClass(ee)):(n.addClass(ee),n.parents(ie).prev(".nav-link, .list-group-item").addClass(ee),n.parents(ie).prev(".nav-item").children(".nav-link").addClass(ee)),o.default(this._scrollElement).trigger("activate.bs.scrollspy",{relatedTarget:t})},e._clear=function(){[].slice.call(document.querySelectorAll(this._selector)).filter((function(t){return t.classList.contains(ee)})).forEach((function(t){return t.classList.remove(ee)}))},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this).data(Zt);if(n||(n=new t(this,"object"==typeof e&&e),o.default(this).data(Zt,n)),"string"==typeof e){if("undefined"==typeof n[e])throw new TypeError('No method named "'+e+'"');n[e]()}}))},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"Default",get:function(){return oe}}]),t}();o.default(window).on("load.bs.scrollspy.data-api",(function(){for(var t=[].slice.call(document.querySelectorAll('[data-spy="scroll"]')),e=t.length;e--;){var n=o.default(t[e]);se._jQueryInterface.call(n,n.data())}})),o.default.fn[Gt]=se._jQueryInterface,o.default.fn[Gt].Constructor=se,o.default.fn[Gt].noConflict=function(){return o.default.fn[Gt]=te,se._jQueryInterface};var le="bs.tab",re=o.default.fn.tab,ue="active",fe="fade",de="show",ce=".active",he="> li > .active",ge=function(){function t(t){this._element=t}var e=t.prototype;return e.show=function(){var t=this;if(!(this._element.parentNode&&this._element.parentNode.nodeType===Node.ELEMENT_NODE&&o.default(this._element).hasClass(ue)||o.default(this._element).hasClass("disabled")||this._element.hasAttribute("disabled"))){var e,n,i=o.default(this._element).closest(".nav, .list-group")[0],a=d.getSelectorFromElement(this._element);if(i){var s="UL"===i.nodeName||"OL"===i.nodeName?he:ce;n=(n=o.default.makeArray(o.default(i).find(s)))[n.length-1]}var l=o.default.Event("hide.bs.tab",{relatedTarget:this._element}),r=o.default.Event("show.bs.tab",{relatedTarget:n});if(n&&o.default(n).trigger(l),o.default(this._element).trigger(r),!r.isDefaultPrevented()&&!l.isDefaultPrevented()){a&&(e=document.querySelector(a)),this._activate(this._element,i);var u=function(){var e=o.default.Event("hidden.bs.tab",{relatedTarget:t._element}),i=o.default.Event("shown.bs.tab",{relatedTarget:n});o.default(n).trigger(e),o.default(t._element).trigger(i)};e?this._activate(e,e.parentNode,u):u()}}},e.dispose=function(){o.default.removeData(this._element,le),this._element=null},e._activate=function(t,e,n){var i=this,a=(!e||"UL"!==e.nodeName&&"OL"!==e.nodeName?o.default(e).children(ce):o.default(e).find(he))[0],s=n&&a&&o.default(a).hasClass(fe),l=function(){return i._transitionComplete(t,a,n)};if(a&&s){var r=d.getTransitionDurationFromElement(a);o.default(a).removeClass(de).one(d.TRANSITION_END,l).emulateTransitionEnd(r)}else l()},e._transitionComplete=function(t,e,n){if(e){o.default(e).removeClass(ue);var i=o.default(e.parentNode).find("> .dropdown-menu .active")[0];i&&o.default(i).removeClass(ue),"tab"===e.getAttribute("role")&&e.setAttribute("aria-selected",!1)}o.default(t).addClass(ue),"tab"===t.getAttribute("role")&&t.setAttribute("aria-selected",!0),d.reflow(t),t.classList.contains(fe)&&t.classList.add(de);var a=t.parentNode;if(a&&"LI"===a.nodeName&&(a=a.parentNode),a&&o.default(a).hasClass("dropdown-menu")){var s=o.default(t).closest(".dropdown")[0];if(s){var l=[].slice.call(s.querySelectorAll(".dropdown-toggle"));o.default(l).addClass(ue)}t.setAttribute("aria-expanded",!0)}n&&n()},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this),i=n.data(le);if(i||(i=new t(this),n.data(le,i)),"string"==typeof e){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e]()}}))},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}}]),t}();o.default(document).on("click.bs.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"], [data-toggle="list"]',(function(t){t.preventDefault(),ge._jQueryInterface.call(o.default(this),"show")})),o.default.fn.tab=ge._jQueryInterface,o.default.fn.tab.Constructor=ge,o.default.fn.tab.noConflict=function(){return o.default.fn.tab=re,ge._jQueryInterface};var me="bs.toast",pe=o.default.fn.toast,_e="hide",ve="show",ye="showing",be="click.dismiss.bs.toast",Ee={animation:!0,autohide:!0,delay:500},Te={animation:"boolean",autohide:"boolean",delay:"number"},we=function(){function t(t,e){this._element=t,this._config=this._getConfig(e),this._timeout=null,this._setListeners()}var e=t.prototype;return e.show=function(){var t=this,e=o.default.Event("show.bs.toast");if(o.default(this._element).trigger(e),!e.isDefaultPrevented()){this._clearTimeout(),this._config.animation&&this._element.classList.add("fade");var n=function(){t._element.classList.remove(ye),t._element.classList.add(ve),o.default(t._element).trigger("shown.bs.toast"),t._config.autohide&&(t._timeout=setTimeout((function(){t.hide()}),t._config.delay))};if(this._element.classList.remove(_e),d.reflow(this._element),this._element.classList.add(ye),this._config.animation){var i=d.getTransitionDurationFromElement(this._element);o.default(this._element).one(d.TRANSITION_END,n).emulateTransitionEnd(i)}else n()}},e.hide=function(){if(this._element.classList.contains(ve)){var t=o.default.Event("hide.bs.toast");o.default(this._element).trigger(t),t.isDefaultPrevented()||this._close()}},e.dispose=function(){this._clearTimeout(),this._element.classList.contains(ve)&&this._element.classList.remove(ve),o.default(this._element).off(be),o.default.removeData(this._element,me),this._element=null,this._config=null},e._getConfig=function(t){return t=r({},Ee,o.default(this._element).data(),"object"==typeof t&&t?t:{}),d.typeCheckConfig("toast",t,this.constructor.DefaultType),t},e._setListeners=function(){var t=this;o.default(this._element).on(be,'[data-dismiss="toast"]',(function(){return t.hide()}))},e._close=function(){var t=this,e=function(){t._element.classList.add(_e),o.default(t._element).trigger("hidden.bs.toast")};if(this._element.classList.remove(ve),this._config.animation){var n=d.getTransitionDurationFromElement(this._element);o.default(this._element).one(d.TRANSITION_END,e).emulateTransitionEnd(n)}else e()},e._clearTimeout=function(){clearTimeout(this._timeout),this._timeout=null},t._jQueryInterface=function(e){return this.each((function(){var n=o.default(this),i=n.data(me);if(i||(i=new t(this,"object"==typeof e&&e),n.data(me,i)),"string"==typeof e){if("undefined"==typeof i[e])throw new TypeError('No method named "'+e+'"');i[e](this)}}))},l(t,null,[{key:"VERSION",get:function(){return"4.6.2"}},{key:"DefaultType",get:function(){return Te}},{key:"Default",get:function(){return Ee}}]),t}();o.default.fn.toast=we._jQueryInterface,o.default.fn.toast.Constructor=we,o.default.fn.toast.noConflict=function(){return o.default.fn.toast=pe,we._jQueryInterface},t.Alert=g,t.Button=E,t.Carousel=P,t.Collapse=V,t.Dropdown=lt,t.Modal=Ct,t.Popover=Jt,t.Scrollspy=se,t.Tab=ge,t.Toast=we,t.Tooltip=Wt,t.Util=d,Object.defineProperty(t,"__esModule",{value:!0})}));
//# sourceMappingURL=bootstrap.min.js.map | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/resources/site/lib/js/bootstrap.min.js | bootstrap.min.js |
!function(a,b,c,d){"use strict";var e="treeview",f={};f.settings={injectStyle:!0,levels:2,expandIcon:"glyphicon glyphicon-plus",collapseIcon:"glyphicon glyphicon-minus",emptyIcon:"glyphicon",nodeIcon:"",selectedIcon:"",checkedIcon:"glyphicon glyphicon-check",uncheckedIcon:"glyphicon glyphicon-unchecked",color:d,backColor:d,borderColor:d,onhoverColor:"#F5F5F5",selectedColor:"#FFFFFF",selectedBackColor:"#428bca",searchResultColor:"#D9534F",searchResultBackColor:d,enableLinks:!1,highlightSelected:!0,highlightSearchResults:!0,showBorder:!0,showIcon:!0,showCheckbox:!1,showTags:!1,multiSelect:!1,onNodeChecked:d,onNodeCollapsed:d,onNodeDisabled:d,onNodeEnabled:d,onNodeExpanded:d,onNodeSelected:d,onNodeUnchecked:d,onNodeUnselected:d,onSearchComplete:d,onSearchCleared:d},f.options={silent:!1,ignoreChildren:!1},f.searchOptions={ignoreCase:!0,exactMatch:!1,revealResults:!0};var g=function(b,c){return this.$element=a(b),this.elementId=b.id,this.styleId=this.elementId+"-style",this.init(c),{options:this.options,init:a.proxy(this.init,this),remove:a.proxy(this.remove,this),getNode:a.proxy(this.getNode,this),getParent:a.proxy(this.getParent,this),getSiblings:a.proxy(this.getSiblings,this),getSelected:a.proxy(this.getSelected,this),getUnselected:a.proxy(this.getUnselected,this),getExpanded:a.proxy(this.getExpanded,this),getCollapsed:a.proxy(this.getCollapsed,this),getChecked:a.proxy(this.getChecked,this),getUnchecked:a.proxy(this.getUnchecked,this),getDisabled:a.proxy(this.getDisabled,this),getEnabled:a.proxy(this.getEnabled,this),selectNode:a.proxy(this.selectNode,this),unselectNode:a.proxy(this.unselectNode,this),toggleNodeSelected:a.proxy(this.toggleNodeSelected,this),collapseAll:a.proxy(this.collapseAll,this),collapseNode:a.proxy(this.collapseNode,this),expandAll:a.proxy(this.expandAll,this),expandNode:a.proxy(this.expandNode,this),toggleNodeExpanded:a.proxy(this.toggleNodeExpanded,this),revealNode:a.proxy(this.revealNode,this),checkAll:a.proxy(this.checkAll,this),checkNode:a.proxy(this.checkNode,this),uncheckAll:a.proxy(this.uncheckAll,this),uncheckNode:a.proxy(this.uncheckNode,this),toggleNodeChecked:a.proxy(this.toggleNodeChecked,this),disableAll:a.proxy(this.disableAll,this),disableNode:a.proxy(this.disableNode,this),enableAll:a.proxy(this.enableAll,this),enableNode:a.proxy(this.enableNode,this),toggleNodeDisabled:a.proxy(this.toggleNodeDisabled,this),search:a.proxy(this.search,this),clearSearch:a.proxy(this.clearSearch,this)}};g.prototype.init=function(b){this.tree=[],this.nodes=[],b.data&&("string"==typeof b.data&&(b.data=a.parseJSON(b.data)),this.tree=a.extend(!0,[],b.data),delete b.data),this.options=a.extend({},f.settings,b),this.destroy(),this.subscribeEvents(),this.setInitialStates({nodes:this.tree},0),this.render()},g.prototype.remove=function(){this.destroy(),a.removeData(this,e),a("#"+this.styleId).remove()},g.prototype.destroy=function(){this.initialized&&(this.$wrapper.remove(),this.$wrapper=null,this.unsubscribeEvents(),this.initialized=!1)},g.prototype.unsubscribeEvents=function(){this.$element.off("click"),this.$element.off("nodeChecked"),this.$element.off("nodeCollapsed"),this.$element.off("nodeDisabled"),this.$element.off("nodeEnabled"),this.$element.off("nodeExpanded"),this.$element.off("nodeSelected"),this.$element.off("nodeUnchecked"),this.$element.off("nodeUnselected"),this.$element.off("searchComplete"),this.$element.off("searchCleared")},g.prototype.subscribeEvents=function(){this.unsubscribeEvents(),this.$element.on("click",a.proxy(this.clickHandler,this)),"function"==typeof this.options.onNodeChecked&&this.$element.on("nodeChecked",this.options.onNodeChecked),"function"==typeof this.options.onNodeCollapsed&&this.$element.on("nodeCollapsed",this.options.onNodeCollapsed),"function"==typeof this.options.onNodeDisabled&&this.$element.on("nodeDisabled",this.options.onNodeDisabled),"function"==typeof this.options.onNodeEnabled&&this.$element.on("nodeEnabled",this.options.onNodeEnabled),"function"==typeof this.options.onNodeExpanded&&this.$element.on("nodeExpanded",this.options.onNodeExpanded),"function"==typeof this.options.onNodeSelected&&this.$element.on("nodeSelected",this.options.onNodeSelected),"function"==typeof this.options.onNodeUnchecked&&this.$element.on("nodeUnchecked",this.options.onNodeUnchecked),"function"==typeof this.options.onNodeUnselected&&this.$element.on("nodeUnselected",this.options.onNodeUnselected),"function"==typeof this.options.onSearchComplete&&this.$element.on("searchComplete",this.options.onSearchComplete),"function"==typeof this.options.onSearchCleared&&this.$element.on("searchCleared",this.options.onSearchCleared)},g.prototype.setInitialStates=function(b,c){if(b.nodes){c+=1;var d=b,e=this;a.each(b.nodes,function(a,b){b.nodeId=e.nodes.length,b.parentId=d.nodeId,b.hasOwnProperty("selectable")||(b.selectable=!0),b.state=b.state||{},b.state.hasOwnProperty("checked")||(b.state.checked=!1),b.state.hasOwnProperty("disabled")||(b.state.disabled=!1),b.state.hasOwnProperty("expanded")||(!b.state.disabled&&c<e.options.levels&&b.nodes&&b.nodes.length>0?b.state.expanded=!0:b.state.expanded=!1),b.state.hasOwnProperty("selected")||(b.state.selected=!1),e.nodes.push(b),b.nodes&&e.setInitialStates(b,c)})}},g.prototype.clickHandler=function(b){this.options.enableLinks||b.preventDefault();var c=a(b.target),d=this.findNode(c);if(d&&!d.state.disabled){var e=c.attr("class")?c.attr("class").split(" "):[];-1!==e.indexOf("expand-icon")?(this.toggleExpandedState(d,f.options),this.render()):-1!==e.indexOf("check-icon")?(this.toggleCheckedState(d,f.options),this.render()):(d.selectable?this.toggleSelectedState(d,f.options):this.toggleExpandedState(d,f.options),this.render())}},g.prototype.findNode=function(a){var b=a.closest("li.list-group-item").attr("data-nodeid"),c=this.nodes[b];return c||console.log("Error: node does not exist"),c},g.prototype.toggleExpandedState=function(a,b){a&&this.setExpandedState(a,!a.state.expanded,b)},g.prototype.setExpandedState=function(b,c,d){c!==b.state.expanded&&(c&&b.nodes?(b.state.expanded=!0,d.silent||this.$element.trigger("nodeExpanded",a.extend(!0,{},b))):c||(b.state.expanded=!1,d.silent||this.$element.trigger("nodeCollapsed",a.extend(!0,{},b)),b.nodes&&!d.ignoreChildren&&a.each(b.nodes,a.proxy(function(a,b){this.setExpandedState(b,!1,d)},this))))},g.prototype.toggleSelectedState=function(a,b){a&&this.setSelectedState(a,!a.state.selected,b)},g.prototype.setSelectedState=function(b,c,d){c!==b.state.selected&&(c?(this.options.multiSelect||a.each(this.findNodes("true","g","state.selected"),a.proxy(function(a,b){this.setSelectedState(b,!1,d)},this)),b.state.selected=!0,d.silent||this.$element.trigger("nodeSelected",a.extend(!0,{},b))):(b.state.selected=!1,d.silent||this.$element.trigger("nodeUnselected",a.extend(!0,{},b))))},g.prototype.toggleCheckedState=function(a,b){a&&this.setCheckedState(a,!a.state.checked,b)},g.prototype.setCheckedState=function(b,c,d){c!==b.state.checked&&(c?(b.state.checked=!0,d.silent||this.$element.trigger("nodeChecked",a.extend(!0,{},b))):(b.state.checked=!1,d.silent||this.$element.trigger("nodeUnchecked",a.extend(!0,{},b))))},g.prototype.setDisabledState=function(b,c,d){c!==b.state.disabled&&(c?(b.state.disabled=!0,this.setExpandedState(b,!1,d),this.setSelectedState(b,!1,d),this.setCheckedState(b,!1,d),d.silent||this.$element.trigger("nodeDisabled",a.extend(!0,{},b))):(b.state.disabled=!1,d.silent||this.$element.trigger("nodeEnabled",a.extend(!0,{},b))))},g.prototype.render=function(){this.initialized||(this.$element.addClass(e),this.$wrapper=a(this.template.list),this.injectStyle(),this.initialized=!0),this.$element.empty().append(this.$wrapper.empty()),this.buildTree(this.tree,0)},g.prototype.buildTree=function(b,c){if(b){c+=1;var d=this;a.each(b,function(b,e){for(var f=a(d.template.item).addClass("node-"+d.elementId).addClass(e.state.checked?"node-checked":"").addClass(e.state.disabled?"node-disabled":"").addClass(e.state.selected?"node-selected":"").addClass(e.searchResult?"search-result":"").attr("data-nodeid",e.nodeId).attr("style",d.buildStyleOverride(e)),g=0;c-1>g;g++)f.append(d.template.indent);var h=[];if(e.nodes?(h.push("expand-icon"),h.push(e.state.expanded?d.options.collapseIcon:d.options.expandIcon)):h.push(d.options.emptyIcon),f.append(a(d.template.icon).addClass(h.join(" "))),d.options.showIcon){var h=["node-icon"];h.push(e.icon||d.options.nodeIcon),e.state.selected&&(h.pop(),h.push(e.selectedIcon||d.options.selectedIcon||e.icon||d.options.nodeIcon)),f.append(a(d.template.icon).addClass(h.join(" ")))}if(d.options.showCheckbox){var h=["check-icon"];h.push(e.state.checked?d.options.checkedIcon:d.options.uncheckedIcon),f.append(a(d.template.icon).addClass(h.join(" ")))}return f.append(d.options.enableLinks?a(d.template.link).attr("href",e.href).append(e.text):e.text),d.options.showTags&&e.tags&&a.each(e.tags,function(b,c){f.append(a(d.template.badge).append(c))}),d.$wrapper.append(f),e.nodes&&e.state.expanded&&!e.state.disabled?d.buildTree(e.nodes,c):void 0})}},g.prototype.buildStyleOverride=function(a){if(a.state.disabled)return"";var b=a.color,c=a.backColor;return this.options.highlightSelected&&a.state.selected&&(this.options.selectedColor&&(b=this.options.selectedColor),this.options.selectedBackColor&&(c=this.options.selectedBackColor)),this.options.highlightSearchResults&&a.searchResult&&!a.state.disabled&&(this.options.searchResultColor&&(b=this.options.searchResultColor),this.options.searchResultBackColor&&(c=this.options.searchResultBackColor)),"color:"+b+";background-color:"+c+";"},g.prototype.injectStyle=function(){this.options.injectStyle&&!c.getElementById(this.styleId)&&a('<style type="text/css" id="'+this.styleId+'"> '+this.buildStyle()+" </style>").appendTo("head")},g.prototype.buildStyle=function(){var a=".node-"+this.elementId+"{";return this.options.color&&(a+="color:"+this.options.color+";"),this.options.backColor&&(a+="background-color:"+this.options.backColor+";"),this.options.showBorder?this.options.borderColor&&(a+="border:1px solid "+this.options.borderColor+";"):a+="border:none;",a+="}",this.options.onhoverColor&&(a+=".node-"+this.elementId+":not(.node-disabled):hover{background-color:"+this.options.onhoverColor+";}"),this.css+a},g.prototype.template={list:'<ul class="list-group"></ul>',item:'<li class="list-group-item"></li>',indent:'<span class="indent"></span>',icon:'<span class="icon"></span>',link:'<a href="#" style="color:inherit;"></a>',badge:'<span class="badge"></span>'},g.prototype.css=".treeview .list-group-item{cursor:pointer}.treeview span.indent{margin-left:10px;margin-right:10px}.treeview span.icon{width:12px;margin-right:5px}.treeview .node-disabled{color:silver;cursor:not-allowed}",g.prototype.getNode=function(a){return this.nodes[a]},g.prototype.getParent=function(a){var b=this.identifyNode(a);return this.nodes[b.parentId]},g.prototype.getSiblings=function(a){var b=this.identifyNode(a),c=this.getParent(b),d=c?c.nodes:this.tree;return d.filter(function(a){return a.nodeId!==b.nodeId})},g.prototype.getSelected=function(){return this.findNodes("true","g","state.selected")},g.prototype.getUnselected=function(){return this.findNodes("false","g","state.selected")},g.prototype.getExpanded=function(){return this.findNodes("true","g","state.expanded")},g.prototype.getCollapsed=function(){return this.findNodes("false","g","state.expanded")},g.prototype.getChecked=function(){return this.findNodes("true","g","state.checked")},g.prototype.getUnchecked=function(){return this.findNodes("false","g","state.checked")},g.prototype.getDisabled=function(){return this.findNodes("true","g","state.disabled")},g.prototype.getEnabled=function(){return this.findNodes("false","g","state.disabled")},g.prototype.selectNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setSelectedState(a,!0,b)},this)),this.render()},g.prototype.unselectNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setSelectedState(a,!1,b)},this)),this.render()},g.prototype.toggleNodeSelected=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.toggleSelectedState(a,b)},this)),this.render()},g.prototype.collapseAll=function(b){var c=this.findNodes("true","g","state.expanded");this.forEachIdentifier(c,b,a.proxy(function(a,b){this.setExpandedState(a,!1,b)},this)),this.render()},g.prototype.collapseNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setExpandedState(a,!1,b)},this)),this.render()},g.prototype.expandAll=function(b){if(b=a.extend({},f.options,b),b&&b.levels)this.expandLevels(this.tree,b.levels,b);else{var c=this.findNodes("false","g","state.expanded");this.forEachIdentifier(c,b,a.proxy(function(a,b){this.setExpandedState(a,!0,b)},this))}this.render()},g.prototype.expandNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setExpandedState(a,!0,b),a.nodes&&b&&b.levels&&this.expandLevels(a.nodes,b.levels-1,b)},this)),this.render()},g.prototype.expandLevels=function(b,c,d){d=a.extend({},f.options,d),a.each(b,a.proxy(function(a,b){this.setExpandedState(b,c>0?!0:!1,d),b.nodes&&this.expandLevels(b.nodes,c-1,d)},this))},g.prototype.revealNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){for(var c=this.getParent(a);c;)this.setExpandedState(c,!0,b),c=this.getParent(c)},this)),this.render()},g.prototype.toggleNodeExpanded=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.toggleExpandedState(a,b)},this)),this.render()},g.prototype.checkAll=function(b){var c=this.findNodes("false","g","state.checked");this.forEachIdentifier(c,b,a.proxy(function(a,b){this.setCheckedState(a,!0,b)},this)),this.render()},g.prototype.checkNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setCheckedState(a,!0,b)},this)),this.render()},g.prototype.uncheckAll=function(b){var c=this.findNodes("true","g","state.checked");this.forEachIdentifier(c,b,a.proxy(function(a,b){this.setCheckedState(a,!1,b)},this)),this.render()},g.prototype.uncheckNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setCheckedState(a,!1,b)},this)),this.render()},g.prototype.toggleNodeChecked=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.toggleCheckedState(a,b)},this)),this.render()},g.prototype.disableAll=function(b){var c=this.findNodes("false","g","state.disabled");this.forEachIdentifier(c,b,a.proxy(function(a,b){this.setDisabledState(a,!0,b)},this)),this.render()},g.prototype.disableNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setDisabledState(a,!0,b)},this)),this.render()},g.prototype.enableAll=function(b){var c=this.findNodes("true","g","state.disabled");this.forEachIdentifier(c,b,a.proxy(function(a,b){this.setDisabledState(a,!1,b)},this)),this.render()},g.prototype.enableNode=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setDisabledState(a,!1,b)},this)),this.render()},g.prototype.toggleNodeDisabled=function(b,c){this.forEachIdentifier(b,c,a.proxy(function(a,b){this.setDisabledState(a,!a.state.disabled,b)},this)),this.render()},g.prototype.forEachIdentifier=function(b,c,d){c=a.extend({},f.options,c),b instanceof Array||(b=[b]),a.each(b,a.proxy(function(a,b){d(this.identifyNode(b),c)},this))},g.prototype.identifyNode=function(a){return"number"==typeof a?this.nodes[a]:a},g.prototype.search=function(b,c){c=a.extend({},f.searchOptions,c),this.clearSearch({render:!1});var d=[];if(b&&b.length>0){c.exactMatch&&(b="^"+b+"$");var e="g";c.ignoreCase&&(e+="i"),d=this.findNodes(b,e),a.each(d,function(a,b){b.searchResult=!0})}return c.revealResults?this.revealNode(d):this.render(),this.$element.trigger("searchComplete",a.extend(!0,{},d)),d},g.prototype.clearSearch=function(b){b=a.extend({},{render:!0},b);var c=a.each(this.findNodes("true","g","searchResult"),function(a,b){b.searchResult=!1});b.render&&this.render(),this.$element.trigger("searchCleared",a.extend(!0,{},c))},g.prototype.findNodes=function(b,c,d){c=c||"g",d=d||"text";var e=this;return a.grep(this.nodes,function(a){var f=e.getNodeValue(a,d);return"string"==typeof f?f.match(new RegExp(b,c)):void 0})},g.prototype.getNodeValue=function(a,b){var c=b.indexOf(".");if(c>0){var e=a[b.substring(0,c)],f=b.substring(c+1,b.length);return this.getNodeValue(e,f)}return a.hasOwnProperty(b)?a[b].toString():d};var h=function(a){b.console&&b.console.error(a)};a.fn[e]=function(b,c){var d;return this.each(function(){var f=a.data(this,e);"string"==typeof b?f?a.isFunction(f[b])&&"_"!==b.charAt(0)?(c instanceof Array||(c=[c]),d=f[b].apply(f,c)):h("No such method : "+b):h("Not initialized, can not call method : "+b):"boolean"==typeof b?d=f:a.data(this,e,new g(this,a.extend(!0,{},b)))}),d||this}}(jQuery,window,document); | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/resources/site/lib/js/bootstrap-treeview.min.js | bootstrap-treeview.min.js |
(function(e,t){'object'==typeof exports&&'undefined'!=typeof module?module.exports=t():'function'==typeof define&&define.amd?define(t):e.Popper=t()})(this,function(){'use strict';function e(e){return e&&'[object Function]'==={}.toString.call(e)}function t(e,t){if(1!==e.nodeType)return[];var o=getComputedStyle(e,null);return t?o[t]:o}function o(e){return'HTML'===e.nodeName?e:e.parentNode||e.host}function n(e){if(!e)return document.body;switch(e.nodeName){case'HTML':case'BODY':return e.ownerDocument.body;case'#document':return e.body;}var i=t(e),r=i.overflow,p=i.overflowX,s=i.overflowY;return /(auto|scroll)/.test(r+s+p)?e:n(o(e))}function r(e){var o=e&&e.offsetParent,i=o&&o.nodeName;return i&&'BODY'!==i&&'HTML'!==i?-1!==['TD','TABLE'].indexOf(o.nodeName)&&'static'===t(o,'position')?r(o):o:e?e.ownerDocument.documentElement:document.documentElement}function p(e){var t=e.nodeName;return'BODY'!==t&&('HTML'===t||r(e.firstElementChild)===e)}function s(e){return null===e.parentNode?e:s(e.parentNode)}function d(e,t){if(!e||!e.nodeType||!t||!t.nodeType)return document.documentElement;var o=e.compareDocumentPosition(t)&Node.DOCUMENT_POSITION_FOLLOWING,i=o?e:t,n=o?t:e,a=document.createRange();a.setStart(i,0),a.setEnd(n,0);var l=a.commonAncestorContainer;if(e!==l&&t!==l||i.contains(n))return p(l)?l:r(l);var f=s(e);return f.host?d(f.host,t):d(e,s(t).host)}function a(e){var t=1<arguments.length&&void 0!==arguments[1]?arguments[1]:'top',o='top'===t?'scrollTop':'scrollLeft',i=e.nodeName;if('BODY'===i||'HTML'===i){var n=e.ownerDocument.documentElement,r=e.ownerDocument.scrollingElement||n;return r[o]}return e[o]}function l(e,t){var o=2<arguments.length&&void 0!==arguments[2]&&arguments[2],i=a(t,'top'),n=a(t,'left'),r=o?-1:1;return e.top+=i*r,e.bottom+=i*r,e.left+=n*r,e.right+=n*r,e}function f(e,t){var o='x'===t?'Left':'Top',i='Left'==o?'Right':'Bottom';return parseFloat(e['border'+o+'Width'],10)+parseFloat(e['border'+i+'Width'],10)}function m(e,t,o,i){return J(t['offset'+e],t['scroll'+e],o['client'+e],o['offset'+e],o['scroll'+e],ie()?o['offset'+e]+i['margin'+('Height'===e?'Top':'Left')]+i['margin'+('Height'===e?'Bottom':'Right')]:0)}function h(){var e=document.body,t=document.documentElement,o=ie()&&getComputedStyle(t);return{height:m('Height',e,t,o),width:m('Width',e,t,o)}}function c(e){return se({},e,{right:e.left+e.width,bottom:e.top+e.height})}function g(e){var o={};if(ie())try{o=e.getBoundingClientRect();var i=a(e,'top'),n=a(e,'left');o.top+=i,o.left+=n,o.bottom+=i,o.right+=n}catch(e){}else o=e.getBoundingClientRect();var r={left:o.left,top:o.top,width:o.right-o.left,height:o.bottom-o.top},p='HTML'===e.nodeName?h():{},s=p.width||e.clientWidth||r.right-r.left,d=p.height||e.clientHeight||r.bottom-r.top,l=e.offsetWidth-s,m=e.offsetHeight-d;if(l||m){var g=t(e);l-=f(g,'x'),m-=f(g,'y'),r.width-=l,r.height-=m}return c(r)}function u(e,o){var i=ie(),r='HTML'===o.nodeName,p=g(e),s=g(o),d=n(e),a=t(o),f=parseFloat(a.borderTopWidth,10),m=parseFloat(a.borderLeftWidth,10),h=c({top:p.top-s.top-f,left:p.left-s.left-m,width:p.width,height:p.height});if(h.marginTop=0,h.marginLeft=0,!i&&r){var u=parseFloat(a.marginTop,10),b=parseFloat(a.marginLeft,10);h.top-=f-u,h.bottom-=f-u,h.left-=m-b,h.right-=m-b,h.marginTop=u,h.marginLeft=b}return(i?o.contains(d):o===d&&'BODY'!==d.nodeName)&&(h=l(h,o)),h}function b(e){var t=e.ownerDocument.documentElement,o=u(e,t),i=J(t.clientWidth,window.innerWidth||0),n=J(t.clientHeight,window.innerHeight||0),r=a(t),p=a(t,'left'),s={top:r-o.top+o.marginTop,left:p-o.left+o.marginLeft,width:i,height:n};return c(s)}function w(e){var i=e.nodeName;return'BODY'===i||'HTML'===i?!1:'fixed'===t(e,'position')||w(o(e))}function y(e,t,i,r){var p={top:0,left:0},s=d(e,t);if('viewport'===r)p=b(s);else{var a;'scrollParent'===r?(a=n(o(t)),'BODY'===a.nodeName&&(a=e.ownerDocument.documentElement)):'window'===r?a=e.ownerDocument.documentElement:a=r;var l=u(a,s);if('HTML'===a.nodeName&&!w(s)){var f=h(),m=f.height,c=f.width;p.top+=l.top-l.marginTop,p.bottom=m+l.top,p.left+=l.left-l.marginLeft,p.right=c+l.left}else p=l}return p.left+=i,p.top+=i,p.right-=i,p.bottom-=i,p}function E(e){var t=e.width,o=e.height;return t*o}function v(e,t,o,i,n){var r=5<arguments.length&&void 0!==arguments[5]?arguments[5]:0;if(-1===e.indexOf('auto'))return e;var p=y(o,i,r,n),s={top:{width:p.width,height:t.top-p.top},right:{width:p.right-t.right,height:p.height},bottom:{width:p.width,height:p.bottom-t.bottom},left:{width:t.left-p.left,height:p.height}},d=Object.keys(s).map(function(e){return se({key:e},s[e],{area:E(s[e])})}).sort(function(e,t){return t.area-e.area}),a=d.filter(function(e){var t=e.width,i=e.height;return t>=o.clientWidth&&i>=o.clientHeight}),l=0<a.length?a[0].key:d[0].key,f=e.split('-')[1];return l+(f?'-'+f:'')}function O(e,t,o){var i=d(t,o);return u(o,i)}function L(e){var t=getComputedStyle(e),o=parseFloat(t.marginTop)+parseFloat(t.marginBottom),i=parseFloat(t.marginLeft)+parseFloat(t.marginRight),n={width:e.offsetWidth+i,height:e.offsetHeight+o};return n}function x(e){var t={left:'right',right:'left',bottom:'top',top:'bottom'};return e.replace(/left|right|bottom|top/g,function(e){return t[e]})}function S(e,t,o){o=o.split('-')[0];var i=L(e),n={width:i.width,height:i.height},r=-1!==['right','left'].indexOf(o),p=r?'top':'left',s=r?'left':'top',d=r?'height':'width',a=r?'width':'height';return n[p]=t[p]+t[d]/2-i[d]/2,n[s]=o===s?t[s]-i[a]:t[x(s)],n}function T(e,t){return Array.prototype.find?e.find(t):e.filter(t)[0]}function D(e,t,o){if(Array.prototype.findIndex)return e.findIndex(function(e){return e[t]===o});var i=T(e,function(e){return e[t]===o});return e.indexOf(i)}function C(t,o,i){var n=void 0===i?t:t.slice(0,D(t,'name',i));return n.forEach(function(t){t['function']&&console.warn('`modifier.function` is deprecated, use `modifier.fn`!');var i=t['function']||t.fn;t.enabled&&e(i)&&(o.offsets.popper=c(o.offsets.popper),o.offsets.reference=c(o.offsets.reference),o=i(o,t))}),o}function N(){if(!this.state.isDestroyed){var e={instance:this,styles:{},arrowStyles:{},attributes:{},flipped:!1,offsets:{}};e.offsets.reference=O(this.state,this.popper,this.reference),e.placement=v(this.options.placement,e.offsets.reference,this.popper,this.reference,this.options.modifiers.flip.boundariesElement,this.options.modifiers.flip.padding),e.originalPlacement=e.placement,e.offsets.popper=S(this.popper,e.offsets.reference,e.placement),e.offsets.popper.position='absolute',e=C(this.modifiers,e),this.state.isCreated?this.options.onUpdate(e):(this.state.isCreated=!0,this.options.onCreate(e))}}function k(e,t){return e.some(function(e){var o=e.name,i=e.enabled;return i&&o===t})}function W(e){for(var t=[!1,'ms','Webkit','Moz','O'],o=e.charAt(0).toUpperCase()+e.slice(1),n=0;n<t.length-1;n++){var i=t[n],r=i?''+i+o:e;if('undefined'!=typeof document.body.style[r])return r}return null}function P(){return this.state.isDestroyed=!0,k(this.modifiers,'applyStyle')&&(this.popper.removeAttribute('x-placement'),this.popper.style.left='',this.popper.style.position='',this.popper.style.top='',this.popper.style[W('transform')]=''),this.disableEventListeners(),this.options.removeOnDestroy&&this.popper.parentNode.removeChild(this.popper),this}function B(e){var t=e.ownerDocument;return t?t.defaultView:window}function H(e,t,o,i){var r='BODY'===e.nodeName,p=r?e.ownerDocument.defaultView:e;p.addEventListener(t,o,{passive:!0}),r||H(n(p.parentNode),t,o,i),i.push(p)}function A(e,t,o,i){o.updateBound=i,B(e).addEventListener('resize',o.updateBound,{passive:!0});var r=n(e);return H(r,'scroll',o.updateBound,o.scrollParents),o.scrollElement=r,o.eventsEnabled=!0,o}function I(){this.state.eventsEnabled||(this.state=A(this.reference,this.options,this.state,this.scheduleUpdate))}function M(e,t){return B(e).removeEventListener('resize',t.updateBound),t.scrollParents.forEach(function(e){e.removeEventListener('scroll',t.updateBound)}),t.updateBound=null,t.scrollParents=[],t.scrollElement=null,t.eventsEnabled=!1,t}function R(){this.state.eventsEnabled&&(cancelAnimationFrame(this.scheduleUpdate),this.state=M(this.reference,this.state))}function U(e){return''!==e&&!isNaN(parseFloat(e))&&isFinite(e)}function Y(e,t){Object.keys(t).forEach(function(o){var i='';-1!==['width','height','top','right','bottom','left'].indexOf(o)&&U(t[o])&&(i='px'),e.style[o]=t[o]+i})}function j(e,t){Object.keys(t).forEach(function(o){var i=t[o];!1===i?e.removeAttribute(o):e.setAttribute(o,t[o])})}function F(e,t,o){var i=T(e,function(e){var o=e.name;return o===t}),n=!!i&&e.some(function(e){return e.name===o&&e.enabled&&e.order<i.order});if(!n){var r='`'+t+'`';console.warn('`'+o+'`'+' modifier is required by '+r+' modifier in order to work, be sure to include it before '+r+'!')}return n}function K(e){return'end'===e?'start':'start'===e?'end':e}function q(e){var t=1<arguments.length&&void 0!==arguments[1]&&arguments[1],o=ae.indexOf(e),i=ae.slice(o+1).concat(ae.slice(0,o));return t?i.reverse():i}function V(e,t,o,i){var n=e.match(/((?:\-|\+)?\d*\.?\d*)(.*)/),r=+n[1],p=n[2];if(!r)return e;if(0===p.indexOf('%')){var s;switch(p){case'%p':s=o;break;case'%':case'%r':default:s=i;}var d=c(s);return d[t]/100*r}if('vh'===p||'vw'===p){var a;return a='vh'===p?J(document.documentElement.clientHeight,window.innerHeight||0):J(document.documentElement.clientWidth,window.innerWidth||0),a/100*r}return r}function z(e,t,o,i){var n=[0,0],r=-1!==['right','left'].indexOf(i),p=e.split(/(\+|\-)/).map(function(e){return e.trim()}),s=p.indexOf(T(p,function(e){return-1!==e.search(/,|\s/)}));p[s]&&-1===p[s].indexOf(',')&&console.warn('Offsets separated by white space(s) are deprecated, use a comma (,) instead.');var d=/\s*,\s*|\s+/,a=-1===s?[p]:[p.slice(0,s).concat([p[s].split(d)[0]]),[p[s].split(d)[1]].concat(p.slice(s+1))];return a=a.map(function(e,i){var n=(1===i?!r:r)?'height':'width',p=!1;return e.reduce(function(e,t){return''===e[e.length-1]&&-1!==['+','-'].indexOf(t)?(e[e.length-1]=t,p=!0,e):p?(e[e.length-1]+=t,p=!1,e):e.concat(t)},[]).map(function(e){return V(e,n,t,o)})}),a.forEach(function(e,t){e.forEach(function(o,i){U(o)&&(n[t]+=o*('-'===e[i-1]?-1:1))})}),n}function G(e,t){var o,i=t.offset,n=e.placement,r=e.offsets,p=r.popper,s=r.reference,d=n.split('-')[0];return o=U(+i)?[+i,0]:z(i,p,s,d),'left'===d?(p.top+=o[0],p.left-=o[1]):'right'===d?(p.top+=o[0],p.left+=o[1]):'top'===d?(p.left+=o[0],p.top-=o[1]):'bottom'===d&&(p.left+=o[0],p.top+=o[1]),e.popper=p,e}for(var _=Math.min,X=Math.floor,J=Math.max,Q='undefined'!=typeof window&&'undefined'!=typeof document,Z=['Edge','Trident','Firefox'],$=0,ee=0;ee<Z.length;ee+=1)if(Q&&0<=navigator.userAgent.indexOf(Z[ee])){$=1;break}var i,te=Q&&window.Promise,oe=te?function(e){var t=!1;return function(){t||(t=!0,window.Promise.resolve().then(function(){t=!1,e()}))}}:function(e){var t=!1;return function(){t||(t=!0,setTimeout(function(){t=!1,e()},$))}},ie=function(){return void 0==i&&(i=-1!==navigator.appVersion.indexOf('MSIE 10')),i},ne=function(e,t){if(!(e instanceof t))throw new TypeError('Cannot call a class as a function')},re=function(){function e(e,t){for(var o,n=0;n<t.length;n++)o=t[n],o.enumerable=o.enumerable||!1,o.configurable=!0,'value'in o&&(o.writable=!0),Object.defineProperty(e,o.key,o)}return function(t,o,i){return o&&e(t.prototype,o),i&&e(t,i),t}}(),pe=function(e,t,o){return t in e?Object.defineProperty(e,t,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[t]=o,e},se=Object.assign||function(e){for(var t,o=1;o<arguments.length;o++)for(var i in t=arguments[o],t)Object.prototype.hasOwnProperty.call(t,i)&&(e[i]=t[i]);return e},de=['auto-start','auto','auto-end','top-start','top','top-end','right-start','right','right-end','bottom-end','bottom','bottom-start','left-end','left','left-start'],ae=de.slice(3),le={FLIP:'flip',CLOCKWISE:'clockwise',COUNTERCLOCKWISE:'counterclockwise'},fe=function(){function t(o,i){var n=this,r=2<arguments.length&&void 0!==arguments[2]?arguments[2]:{};ne(this,t),this.scheduleUpdate=function(){return requestAnimationFrame(n.update)},this.update=oe(this.update.bind(this)),this.options=se({},t.Defaults,r),this.state={isDestroyed:!1,isCreated:!1,scrollParents:[]},this.reference=o&&o.jquery?o[0]:o,this.popper=i&&i.jquery?i[0]:i,this.options.modifiers={},Object.keys(se({},t.Defaults.modifiers,r.modifiers)).forEach(function(e){n.options.modifiers[e]=se({},t.Defaults.modifiers[e]||{},r.modifiers?r.modifiers[e]:{})}),this.modifiers=Object.keys(this.options.modifiers).map(function(e){return se({name:e},n.options.modifiers[e])}).sort(function(e,t){return e.order-t.order}),this.modifiers.forEach(function(t){t.enabled&&e(t.onLoad)&&t.onLoad(n.reference,n.popper,n.options,t,n.state)}),this.update();var p=this.options.eventsEnabled;p&&this.enableEventListeners(),this.state.eventsEnabled=p}return re(t,[{key:'update',value:function(){return N.call(this)}},{key:'destroy',value:function(){return P.call(this)}},{key:'enableEventListeners',value:function(){return I.call(this)}},{key:'disableEventListeners',value:function(){return R.call(this)}}]),t}();return fe.Utils=('undefined'==typeof window?global:window).PopperUtils,fe.placements=de,fe.Defaults={placement:'bottom',eventsEnabled:!0,removeOnDestroy:!1,onCreate:function(){},onUpdate:function(){},modifiers:{shift:{order:100,enabled:!0,fn:function(e){var t=e.placement,o=t.split('-')[0],i=t.split('-')[1];if(i){var n=e.offsets,r=n.reference,p=n.popper,s=-1!==['bottom','top'].indexOf(o),d=s?'left':'top',a=s?'width':'height',l={start:pe({},d,r[d]),end:pe({},d,r[d]+r[a]-p[a])};e.offsets.popper=se({},p,l[i])}return e}},offset:{order:200,enabled:!0,fn:G,offset:0},preventOverflow:{order:300,enabled:!0,fn:function(e,t){var o=t.boundariesElement||r(e.instance.popper);e.instance.reference===o&&(o=r(o));var i=y(e.instance.popper,e.instance.reference,t.padding,o);t.boundaries=i;var n=t.priority,p=e.offsets.popper,s={primary:function(e){var o=p[e];return p[e]<i[e]&&!t.escapeWithReference&&(o=J(p[e],i[e])),pe({},e,o)},secondary:function(e){var o='right'===e?'left':'top',n=p[o];return p[e]>i[e]&&!t.escapeWithReference&&(n=_(p[o],i[e]-('right'===e?p.width:p.height))),pe({},o,n)}};return n.forEach(function(e){var t=-1===['left','top'].indexOf(e)?'secondary':'primary';p=se({},p,s[t](e))}),e.offsets.popper=p,e},priority:['left','right','top','bottom'],padding:5,boundariesElement:'scrollParent'},keepTogether:{order:400,enabled:!0,fn:function(e){var t=e.offsets,o=t.popper,i=t.reference,n=e.placement.split('-')[0],r=X,p=-1!==['top','bottom'].indexOf(n),s=p?'right':'bottom',d=p?'left':'top',a=p?'width':'height';return o[s]<r(i[d])&&(e.offsets.popper[d]=r(i[d])-o[a]),o[d]>r(i[s])&&(e.offsets.popper[d]=r(i[s])),e}},arrow:{order:500,enabled:!0,fn:function(e,o){var i;if(!F(e.instance.modifiers,'arrow','keepTogether'))return e;var n=o.element;if('string'==typeof n){if(n=e.instance.popper.querySelector(n),!n)return e;}else if(!e.instance.popper.contains(n))return console.warn('WARNING: `arrow.element` must be child of its popper element!'),e;var r=e.placement.split('-')[0],p=e.offsets,s=p.popper,d=p.reference,a=-1!==['left','right'].indexOf(r),l=a?'height':'width',f=a?'Top':'Left',m=f.toLowerCase(),h=a?'left':'top',g=a?'bottom':'right',u=L(n)[l];d[g]-u<s[m]&&(e.offsets.popper[m]-=s[m]-(d[g]-u)),d[m]+u>s[g]&&(e.offsets.popper[m]+=d[m]+u-s[g]),e.offsets.popper=c(e.offsets.popper);var b=d[m]+d[l]/2-u/2,w=t(e.instance.popper),y=parseFloat(w['margin'+f],10),E=parseFloat(w['border'+f+'Width'],10),v=b-e.offsets.popper[m]-y-E;return v=J(_(s[l]-u,v),0),e.arrowElement=n,e.offsets.arrow=(i={},pe(i,m,Math.round(v)),pe(i,h,''),i),e},element:'[x-arrow]'},flip:{order:600,enabled:!0,fn:function(e,t){if(k(e.instance.modifiers,'inner'))return e;if(e.flipped&&e.placement===e.originalPlacement)return e;var o=y(e.instance.popper,e.instance.reference,t.padding,t.boundariesElement),i=e.placement.split('-')[0],n=x(i),r=e.placement.split('-')[1]||'',p=[];switch(t.behavior){case le.FLIP:p=[i,n];break;case le.CLOCKWISE:p=q(i);break;case le.COUNTERCLOCKWISE:p=q(i,!0);break;default:p=t.behavior;}return p.forEach(function(s,d){if(i!==s||p.length===d+1)return e;i=e.placement.split('-')[0],n=x(i);var a=e.offsets.popper,l=e.offsets.reference,f=X,m='left'===i&&f(a.right)>f(l.left)||'right'===i&&f(a.left)<f(l.right)||'top'===i&&f(a.bottom)>f(l.top)||'bottom'===i&&f(a.top)<f(l.bottom),h=f(a.left)<f(o.left),c=f(a.right)>f(o.right),g=f(a.top)<f(o.top),u=f(a.bottom)>f(o.bottom),b='left'===i&&h||'right'===i&&c||'top'===i&&g||'bottom'===i&&u,w=-1!==['top','bottom'].indexOf(i),y=!!t.flipVariations&&(w&&'start'===r&&h||w&&'end'===r&&c||!w&&'start'===r&&g||!w&&'end'===r&&u);(m||b||y)&&(e.flipped=!0,(m||b)&&(i=p[d+1]),y&&(r=K(r)),e.placement=i+(r?'-'+r:''),e.offsets.popper=se({},e.offsets.popper,S(e.instance.popper,e.offsets.reference,e.placement)),e=C(e.instance.modifiers,e,'flip'))}),e},behavior:'flip',padding:5,boundariesElement:'viewport'},inner:{order:700,enabled:!1,fn:function(e){var t=e.placement,o=t.split('-')[0],i=e.offsets,n=i.popper,r=i.reference,p=-1!==['left','right'].indexOf(o),s=-1===['top','left'].indexOf(o);return n[p?'left':'top']=r[o]-(s?n[p?'width':'height']:0),e.placement=x(t),e.offsets.popper=c(n),e}},hide:{order:800,enabled:!0,fn:function(e){if(!F(e.instance.modifiers,'hide','preventOverflow'))return e;var t=e.offsets.reference,o=T(e.instance.modifiers,function(e){return'preventOverflow'===e.name}).boundaries;if(t.bottom<o.top||t.left>o.right||t.top>o.bottom||t.right<o.left){if(!0===e.hide)return e;e.hide=!0,e.attributes['x-out-of-boundaries']=''}else{if(!1===e.hide)return e;e.hide=!1,e.attributes['x-out-of-boundaries']=!1}return e}},computeStyle:{order:850,enabled:!0,fn:function(e,t){var o=t.x,i=t.y,n=e.offsets.popper,p=T(e.instance.modifiers,function(e){return'applyStyle'===e.name}).gpuAcceleration;void 0!==p&&console.warn('WARNING: `gpuAcceleration` option moved to `computeStyle` modifier and will not be supported in future versions of Popper.js!');var s,d,a=void 0===p?t.gpuAcceleration:p,l=r(e.instance.popper),f=g(l),m={position:n.position},h={left:X(n.left),top:X(n.top),bottom:X(n.bottom),right:X(n.right)},c='bottom'===o?'top':'bottom',u='right'===i?'left':'right',b=W('transform');if(d='bottom'==c?-f.height+h.bottom:h.top,s='right'==u?-f.width+h.right:h.left,a&&b)m[b]='translate3d('+s+'px, '+d+'px, 0)',m[c]=0,m[u]=0,m.willChange='transform';else{var w='bottom'==c?-1:1,y='right'==u?-1:1;m[c]=d*w,m[u]=s*y,m.willChange=c+', '+u}var E={"x-placement":e.placement};return e.attributes=se({},E,e.attributes),e.styles=se({},m,e.styles),e.arrowStyles=se({},e.offsets.arrow,e.arrowStyles),e},gpuAcceleration:!0,x:'bottom',y:'right'},applyStyle:{order:900,enabled:!0,fn:function(e){return Y(e.instance.popper,e.styles),j(e.instance.popper,e.attributes),e.arrowElement&&Object.keys(e.arrowStyles).length&&Y(e.arrowElement,e.arrowStyles),e},onLoad:function(e,t,o,i,n){var r=O(n,t,e),p=v(o.placement,r,t,e,o.modifiers.flip.boundariesElement,o.modifiers.flip.padding);return t.setAttribute('x-placement',p),Y(t,{position:'absolute'}),o},gpuAcceleration:void 0}}},fe});
//# sourceMappingURL=popper.min.js.map | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/resources/site/lib/js/popper.min.js | popper.min.js |
SpryMedia Ltd.
This source file is free software, available under the following license:
MIT license - http://datatables.net/license
This source file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the license files for details.
For details please refer to: http://www.datatables.net
DataTables 1.12.1
©2008-2022 SpryMedia Ltd - datatables.net/license
*/
var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.findInternal=function(l,y,A){l instanceof String&&(l=String(l));for(var q=l.length,E=0;E<q;E++){var P=l[E];if(y.call(A,P,E,l))return{i:E,v:P}}return{i:-1,v:void 0}};$jscomp.ASSUME_ES5=!1;$jscomp.ASSUME_NO_NATIVE_MAP=!1;$jscomp.ASSUME_NO_NATIVE_SET=!1;$jscomp.SIMPLE_FROUND_POLYFILL=!1;$jscomp.ISOLATE_POLYFILLS=!1;
$jscomp.defineProperty=$jscomp.ASSUME_ES5||"function"==typeof Object.defineProperties?Object.defineProperty:function(l,y,A){if(l==Array.prototype||l==Object.prototype)return l;l[y]=A.value;return l};$jscomp.getGlobal=function(l){l=["object"==typeof globalThis&&globalThis,l,"object"==typeof window&&window,"object"==typeof self&&self,"object"==typeof global&&global];for(var y=0;y<l.length;++y){var A=l[y];if(A&&A.Math==Math)return A}throw Error("Cannot find global object");};$jscomp.global=$jscomp.getGlobal(this);
$jscomp.IS_SYMBOL_NATIVE="function"===typeof Symbol&&"symbol"===typeof Symbol("x");$jscomp.TRUST_ES6_POLYFILLS=!$jscomp.ISOLATE_POLYFILLS||$jscomp.IS_SYMBOL_NATIVE;$jscomp.polyfills={};$jscomp.propertyToPolyfillSymbol={};$jscomp.POLYFILL_PREFIX="$jscp$";var $jscomp$lookupPolyfilledValue=function(l,y){var A=$jscomp.propertyToPolyfillSymbol[y];if(null==A)return l[y];A=l[A];return void 0!==A?A:l[y]};
$jscomp.polyfill=function(l,y,A,q){y&&($jscomp.ISOLATE_POLYFILLS?$jscomp.polyfillIsolated(l,y,A,q):$jscomp.polyfillUnisolated(l,y,A,q))};$jscomp.polyfillUnisolated=function(l,y,A,q){A=$jscomp.global;l=l.split(".");for(q=0;q<l.length-1;q++){var E=l[q];if(!(E in A))return;A=A[E]}l=l[l.length-1];q=A[l];y=y(q);y!=q&&null!=y&&$jscomp.defineProperty(A,l,{configurable:!0,writable:!0,value:y})};
$jscomp.polyfillIsolated=function(l,y,A,q){var E=l.split(".");l=1===E.length;q=E[0];q=!l&&q in $jscomp.polyfills?$jscomp.polyfills:$jscomp.global;for(var P=0;P<E.length-1;P++){var la=E[P];if(!(la in q))return;q=q[la]}E=E[E.length-1];A=$jscomp.IS_SYMBOL_NATIVE&&"es6"===A?q[E]:null;y=y(A);null!=y&&(l?$jscomp.defineProperty($jscomp.polyfills,E,{configurable:!0,writable:!0,value:y}):y!==A&&($jscomp.propertyToPolyfillSymbol[E]=$jscomp.IS_SYMBOL_NATIVE?$jscomp.global.Symbol(E):$jscomp.POLYFILL_PREFIX+E,
E=$jscomp.propertyToPolyfillSymbol[E],$jscomp.defineProperty(q,E,{configurable:!0,writable:!0,value:y})))};$jscomp.polyfill("Array.prototype.find",function(l){return l?l:function(y,A){return $jscomp.findInternal(this,y,A).v}},"es6","es3");
(function(l){"function"===typeof define&&define.amd?define(["jquery"],function(y){return l(y,window,document)}):"object"===typeof exports?module.exports=function(y,A){y||(y=window);A||(A="undefined"!==typeof window?require("jquery"):require("jquery")(y));return l(A,y,y.document)}:window.DataTable=l(jQuery,window,document)})(function(l,y,A,q){function E(a){var b,c,d={};l.each(a,function(e,h){(b=e.match(/^([^A-Z]+?)([A-Z])/))&&-1!=="a aa ai ao as b fn i m o s ".indexOf(b[1]+" ")&&(c=e.replace(b[0],
b[2].toLowerCase()),d[c]=e,"o"===b[1]&&E(a[e]))});a._hungarianMap=d}function P(a,b,c){a._hungarianMap||E(a);var d;l.each(b,function(e,h){d=a._hungarianMap[e];d===q||!c&&b[d]!==q||("o"===d.charAt(0)?(b[d]||(b[d]={}),l.extend(!0,b[d],b[e]),P(a[d],b[d],c)):b[d]=b[e])})}function la(a){var b=u.defaults.oLanguage,c=b.sDecimal;c&&bb(c);if(a){var d=a.sZeroRecords;!a.sEmptyTable&&d&&"No data available in table"===b.sEmptyTable&&Y(a,a,"sZeroRecords","sEmptyTable");!a.sLoadingRecords&&d&&"Loading..."===b.sLoadingRecords&&
Y(a,a,"sZeroRecords","sLoadingRecords");a.sInfoThousands&&(a.sThousands=a.sInfoThousands);(a=a.sDecimal)&&c!==a&&bb(a)}}function Db(a){S(a,"ordering","bSort");S(a,"orderMulti","bSortMulti");S(a,"orderClasses","bSortClasses");S(a,"orderCellsTop","bSortCellsTop");S(a,"order","aaSorting");S(a,"orderFixed","aaSortingFixed");S(a,"paging","bPaginate");S(a,"pagingType","sPaginationType");S(a,"pageLength","iDisplayLength");S(a,"searching","bFilter");"boolean"===typeof a.sScrollX&&(a.sScrollX=a.sScrollX?"100%":
"");"boolean"===typeof a.scrollX&&(a.scrollX=a.scrollX?"100%":"");if(a=a.aoSearchCols)for(var b=0,c=a.length;b<c;b++)a[b]&&P(u.models.oSearch,a[b])}function Eb(a){S(a,"orderable","bSortable");S(a,"orderData","aDataSort");S(a,"orderSequence","asSorting");S(a,"orderDataType","sortDataType");var b=a.aDataSort;"number"!==typeof b||Array.isArray(b)||(a.aDataSort=[b])}function Fb(a){if(!u.__browser){var b={};u.__browser=b;var c=l("<div/>").css({position:"fixed",top:0,left:-1*l(y).scrollLeft(),height:1,
width:1,overflow:"hidden"}).append(l("<div/>").css({position:"absolute",top:1,left:1,width:100,overflow:"scroll"}).append(l("<div/>").css({width:"100%",height:10}))).appendTo("body"),d=c.children(),e=d.children();b.barWidth=d[0].offsetWidth-d[0].clientWidth;b.bScrollOversize=100===e[0].offsetWidth&&100!==d[0].clientWidth;b.bScrollbarLeft=1!==Math.round(e.offset().left);b.bBounding=c[0].getBoundingClientRect().width?!0:!1;c.remove()}l.extend(a.oBrowser,u.__browser);a.oScroll.iBarWidth=u.__browser.barWidth}
function Gb(a,b,c,d,e,h){var f=!1;if(c!==q){var g=c;f=!0}for(;d!==e;)a.hasOwnProperty(d)&&(g=f?b(g,a[d],d,a):a[d],f=!0,d+=h);return g}function cb(a,b){var c=u.defaults.column,d=a.aoColumns.length;c=l.extend({},u.models.oColumn,c,{nTh:b?b:A.createElement("th"),sTitle:c.sTitle?c.sTitle:b?b.innerHTML:"",aDataSort:c.aDataSort?c.aDataSort:[d],mData:c.mData?c.mData:d,idx:d});a.aoColumns.push(c);c=a.aoPreSearchCols;c[d]=l.extend({},u.models.oSearch,c[d]);Ia(a,d,l(b).data())}function Ia(a,b,c){b=a.aoColumns[b];
var d=a.oClasses,e=l(b.nTh);if(!b.sWidthOrig){b.sWidthOrig=e.attr("width")||null;var h=(e.attr("style")||"").match(/width:\s*(\d+[pxem%]+)/);h&&(b.sWidthOrig=h[1])}c!==q&&null!==c&&(Eb(c),P(u.defaults.column,c,!0),c.mDataProp===q||c.mData||(c.mData=c.mDataProp),c.sType&&(b._sManualType=c.sType),c.className&&!c.sClass&&(c.sClass=c.className),c.sClass&&e.addClass(c.sClass),h=b.sClass,l.extend(b,c),Y(b,c,"sWidth","sWidthOrig"),h!==b.sClass&&(b.sClass=h+" "+b.sClass),c.iDataSort!==q&&(b.aDataSort=[c.iDataSort]),
Y(b,c,"aDataSort"));var f=b.mData,g=ma(f),k=b.mRender?ma(b.mRender):null;c=function(m){return"string"===typeof m&&-1!==m.indexOf("@")};b._bAttrSrc=l.isPlainObject(f)&&(c(f.sort)||c(f.type)||c(f.filter));b._setter=null;b.fnGetData=function(m,n,p){var t=g(m,n,q,p);return k&&n?k(t,n,m,p):t};b.fnSetData=function(m,n,p){return ha(f)(m,n,p)};"number"!==typeof f&&(a._rowReadObject=!0);a.oFeatures.bSort||(b.bSortable=!1,e.addClass(d.sSortableNone));a=-1!==l.inArray("asc",b.asSorting);c=-1!==l.inArray("desc",
b.asSorting);b.bSortable&&(a||c)?a&&!c?(b.sSortingClass=d.sSortableAsc,b.sSortingClassJUI=d.sSortJUIAscAllowed):!a&&c?(b.sSortingClass=d.sSortableDesc,b.sSortingClassJUI=d.sSortJUIDescAllowed):(b.sSortingClass=d.sSortable,b.sSortingClassJUI=d.sSortJUI):(b.sSortingClass=d.sSortableNone,b.sSortingClassJUI="")}function sa(a){if(!1!==a.oFeatures.bAutoWidth){var b=a.aoColumns;db(a);for(var c=0,d=b.length;c<d;c++)b[c].nTh.style.width=b[c].sWidth}b=a.oScroll;""===b.sY&&""===b.sX||Ja(a);F(a,null,"column-sizing",
[a])}function ta(a,b){a=Ka(a,"bVisible");return"number"===typeof a[b]?a[b]:null}function ua(a,b){a=Ka(a,"bVisible");b=l.inArray(b,a);return-1!==b?b:null}function na(a){var b=0;l.each(a.aoColumns,function(c,d){d.bVisible&&"none"!==l(d.nTh).css("display")&&b++});return b}function Ka(a,b){var c=[];l.map(a.aoColumns,function(d,e){d[b]&&c.push(e)});return c}function eb(a){var b=a.aoColumns,c=a.aoData,d=u.ext.type.detect,e,h,f;var g=0;for(e=b.length;g<e;g++){var k=b[g];var m=[];if(!k.sType&&k._sManualType)k.sType=
k._sManualType;else if(!k.sType){var n=0;for(h=d.length;n<h;n++){var p=0;for(f=c.length;p<f;p++){m[p]===q&&(m[p]=T(a,p,g,"type"));var t=d[n](m[p],a);if(!t&&n!==d.length-1)break;if("html"===t&&!aa(m[p]))break}if(t){k.sType=t;break}}k.sType||(k.sType="string")}}}function Hb(a,b,c,d){var e,h,f,g=a.aoColumns;if(b)for(e=b.length-1;0<=e;e--){var k=b[e];var m=k.target!==q?k.target:k.targets!==q?k.targets:k.aTargets;Array.isArray(m)||(m=[m]);var n=0;for(h=m.length;n<h;n++)if("number"===typeof m[n]&&0<=m[n]){for(;g.length<=
m[n];)cb(a);d(m[n],k)}else if("number"===typeof m[n]&&0>m[n])d(g.length+m[n],k);else if("string"===typeof m[n]){var p=0;for(f=g.length;p<f;p++)("_all"==m[n]||l(g[p].nTh).hasClass(m[n]))&&d(p,k)}}if(c)for(e=0,a=c.length;e<a;e++)d(e,c[e])}function ia(a,b,c,d){var e=a.aoData.length,h=l.extend(!0,{},u.models.oRow,{src:c?"dom":"data",idx:e});h._aData=b;a.aoData.push(h);for(var f=a.aoColumns,g=0,k=f.length;g<k;g++)f[g].sType=null;a.aiDisplayMaster.push(e);b=a.rowIdFn(b);b!==q&&(a.aIds[b]=h);!c&&a.oFeatures.bDeferRender||
fb(a,e,c,d);return e}function La(a,b){var c;b instanceof l||(b=l(b));return b.map(function(d,e){c=gb(a,e);return ia(a,c.data,e,c.cells)})}function T(a,b,c,d){"search"===d?d="filter":"order"===d&&(d="sort");var e=a.iDraw,h=a.aoColumns[c],f=a.aoData[b]._aData,g=h.sDefaultContent,k=h.fnGetData(f,d,{settings:a,row:b,col:c});if(k===q)return a.iDrawError!=e&&null===g&&(ea(a,0,"Requested unknown parameter "+("function"==typeof h.mData?"{function}":"'"+h.mData+"'")+" for row "+b+", column "+c,4),a.iDrawError=
e),g;if((k===f||null===k)&&null!==g&&d!==q)k=g;else if("function"===typeof k)return k.call(f);if(null===k&&"display"===d)return"";"filter"===d&&(a=u.ext.type.search,a[h.sType]&&(k=a[h.sType](k)));return k}function Ib(a,b,c,d){a.aoColumns[c].fnSetData(a.aoData[b]._aData,d,{settings:a,row:b,col:c})}function hb(a){return l.map(a.match(/(\\.|[^\.])+/g)||[""],function(b){return b.replace(/\\\./g,".")})}function ib(a){return U(a.aoData,"_aData")}function Ma(a){a.aoData.length=0;a.aiDisplayMaster.length=
0;a.aiDisplay.length=0;a.aIds={}}function Na(a,b,c){for(var d=-1,e=0,h=a.length;e<h;e++)a[e]==b?d=e:a[e]>b&&a[e]--; -1!=d&&c===q&&a.splice(d,1)}function va(a,b,c,d){var e=a.aoData[b],h,f=function(k,m){for(;k.childNodes.length;)k.removeChild(k.firstChild);k.innerHTML=T(a,b,m,"display")};if("dom"!==c&&(c&&"auto"!==c||"dom"!==e.src)){var g=e.anCells;if(g)if(d!==q)f(g[d],d);else for(c=0,h=g.length;c<h;c++)f(g[c],c)}else e._aData=gb(a,e,d,d===q?q:e._aData).data;e._aSortData=null;e._aFilterData=null;f=
a.aoColumns;if(d!==q)f[d].sType=null;else{c=0;for(h=f.length;c<h;c++)f[c].sType=null;jb(a,e)}}function gb(a,b,c,d){var e=[],h=b.firstChild,f,g=0,k,m=a.aoColumns,n=a._rowReadObject;d=d!==q?d:n?{}:[];var p=function(x,w){if("string"===typeof x){var r=x.indexOf("@");-1!==r&&(r=x.substring(r+1),ha(x)(d,w.getAttribute(r)))}},t=function(x){if(c===q||c===g)f=m[g],k=x.innerHTML.trim(),f&&f._bAttrSrc?(ha(f.mData._)(d,k),p(f.mData.sort,x),p(f.mData.type,x),p(f.mData.filter,x)):n?(f._setter||(f._setter=ha(f.mData)),
f._setter(d,k)):d[g]=k;g++};if(h)for(;h;){var v=h.nodeName.toUpperCase();if("TD"==v||"TH"==v)t(h),e.push(h);h=h.nextSibling}else for(e=b.anCells,h=0,v=e.length;h<v;h++)t(e[h]);(b=b.firstChild?b:b.nTr)&&(b=b.getAttribute("id"))&&ha(a.rowId)(d,b);return{data:d,cells:e}}function fb(a,b,c,d){var e=a.aoData[b],h=e._aData,f=[],g,k;if(null===e.nTr){var m=c||A.createElement("tr");e.nTr=m;e.anCells=f;m._DT_RowIndex=b;jb(a,e);var n=0;for(g=a.aoColumns.length;n<g;n++){var p=a.aoColumns[n];e=(k=c?!1:!0)?A.createElement(p.sCellType):
d[n];e._DT_CellIndex={row:b,column:n};f.push(e);if(k||!(!p.mRender&&p.mData===n||l.isPlainObject(p.mData)&&p.mData._===n+".display"))e.innerHTML=T(a,b,n,"display");p.sClass&&(e.className+=" "+p.sClass);p.bVisible&&!c?m.appendChild(e):!p.bVisible&&c&&e.parentNode.removeChild(e);p.fnCreatedCell&&p.fnCreatedCell.call(a.oInstance,e,T(a,b,n),h,b,n)}F(a,"aoRowCreatedCallback",null,[m,h,b,f])}}function jb(a,b){var c=b.nTr,d=b._aData;if(c){if(a=a.rowIdFn(d))c.id=a;d.DT_RowClass&&(a=d.DT_RowClass.split(" "),
b.__rowc=b.__rowc?Oa(b.__rowc.concat(a)):a,l(c).removeClass(b.__rowc.join(" ")).addClass(d.DT_RowClass));d.DT_RowAttr&&l(c).attr(d.DT_RowAttr);d.DT_RowData&&l(c).data(d.DT_RowData)}}function Jb(a){var b,c,d=a.nTHead,e=a.nTFoot,h=0===l("th, td",d).length,f=a.oClasses,g=a.aoColumns;h&&(c=l("<tr/>").appendTo(d));var k=0;for(b=g.length;k<b;k++){var m=g[k];var n=l(m.nTh).addClass(m.sClass);h&&n.appendTo(c);a.oFeatures.bSort&&(n.addClass(m.sSortingClass),!1!==m.bSortable&&(n.attr("tabindex",a.iTabIndex).attr("aria-controls",
a.sTableId),kb(a,m.nTh,k)));m.sTitle!=n[0].innerHTML&&n.html(m.sTitle);lb(a,"header")(a,n,m,f)}h&&wa(a.aoHeader,d);l(d).children("tr").children("th, td").addClass(f.sHeaderTH);l(e).children("tr").children("th, td").addClass(f.sFooterTH);if(null!==e)for(a=a.aoFooter[0],k=0,b=a.length;k<b;k++)m=g[k],m.nTf=a[k].cell,m.sClass&&l(m.nTf).addClass(m.sClass)}function xa(a,b,c){var d,e,h=[],f=[],g=a.aoColumns.length;if(b){c===q&&(c=!1);var k=0;for(d=b.length;k<d;k++){h[k]=b[k].slice();h[k].nTr=b[k].nTr;for(e=
g-1;0<=e;e--)a.aoColumns[e].bVisible||c||h[k].splice(e,1);f.push([])}k=0;for(d=h.length;k<d;k++){if(a=h[k].nTr)for(;e=a.firstChild;)a.removeChild(e);e=0;for(b=h[k].length;e<b;e++){var m=g=1;if(f[k][e]===q){a.appendChild(h[k][e].cell);for(f[k][e]=1;h[k+g]!==q&&h[k][e].cell==h[k+g][e].cell;)f[k+g][e]=1,g++;for(;h[k][e+m]!==q&&h[k][e].cell==h[k][e+m].cell;){for(c=0;c<g;c++)f[k+c][e+m]=1;m++}l(h[k][e].cell).attr("rowspan",g).attr("colspan",m)}}}}}function ja(a,b){var c="ssp"==Q(a),d=a.iInitDisplayStart;
d!==q&&-1!==d&&(a._iDisplayStart=c?d:d>=a.fnRecordsDisplay()?0:d,a.iInitDisplayStart=-1);c=F(a,"aoPreDrawCallback","preDraw",[a]);if(-1!==l.inArray(!1,c))V(a,!1);else{c=[];var e=0;d=a.asStripeClasses;var h=d.length,f=a.oLanguage,g="ssp"==Q(a),k=a.aiDisplay,m=a._iDisplayStart,n=a.fnDisplayEnd();a.bDrawing=!0;if(a.bDeferLoading)a.bDeferLoading=!1,a.iDraw++,V(a,!1);else if(!g)a.iDraw++;else if(!a.bDestroying&&!b){Kb(a);return}if(0!==k.length)for(b=g?a.aoData.length:n,f=g?0:m;f<b;f++){g=k[f];var p=a.aoData[g];
null===p.nTr&&fb(a,g);var t=p.nTr;if(0!==h){var v=d[e%h];p._sRowStripe!=v&&(l(t).removeClass(p._sRowStripe).addClass(v),p._sRowStripe=v)}F(a,"aoRowCallback",null,[t,p._aData,e,f,g]);c.push(t);e++}else e=f.sZeroRecords,1==a.iDraw&&"ajax"==Q(a)?e=f.sLoadingRecords:f.sEmptyTable&&0===a.fnRecordsTotal()&&(e=f.sEmptyTable),c[0]=l("<tr/>",{"class":h?d[0]:""}).append(l("<td />",{valign:"top",colSpan:na(a),"class":a.oClasses.sRowEmpty}).html(e))[0];F(a,"aoHeaderCallback","header",[l(a.nTHead).children("tr")[0],
ib(a),m,n,k]);F(a,"aoFooterCallback","footer",[l(a.nTFoot).children("tr")[0],ib(a),m,n,k]);d=l(a.nTBody);d.children().detach();d.append(l(c));F(a,"aoDrawCallback","draw",[a]);a.bSorted=!1;a.bFiltered=!1;a.bDrawing=!1}}function ka(a,b){var c=a.oFeatures,d=c.bFilter;c.bSort&&Lb(a);d?ya(a,a.oPreviousSearch):a.aiDisplay=a.aiDisplayMaster.slice();!0!==b&&(a._iDisplayStart=0);a._drawHold=b;ja(a);a._drawHold=!1}function Mb(a){var b=a.oClasses,c=l(a.nTable);c=l("<div/>").insertBefore(c);var d=a.oFeatures,
e=l("<div/>",{id:a.sTableId+"_wrapper","class":b.sWrapper+(a.nTFoot?"":" "+b.sNoFooter)});a.nHolding=c[0];a.nTableWrapper=e[0];a.nTableReinsertBefore=a.nTable.nextSibling;for(var h=a.sDom.split(""),f,g,k,m,n,p,t=0;t<h.length;t++){f=null;g=h[t];if("<"==g){k=l("<div/>")[0];m=h[t+1];if("'"==m||'"'==m){n="";for(p=2;h[t+p]!=m;)n+=h[t+p],p++;"H"==n?n=b.sJUIHeader:"F"==n&&(n=b.sJUIFooter);-1!=n.indexOf(".")?(m=n.split("."),k.id=m[0].substr(1,m[0].length-1),k.className=m[1]):"#"==n.charAt(0)?k.id=n.substr(1,
n.length-1):k.className=n;t+=p}e.append(k);e=l(k)}else if(">"==g)e=e.parent();else if("l"==g&&d.bPaginate&&d.bLengthChange)f=Nb(a);else if("f"==g&&d.bFilter)f=Ob(a);else if("r"==g&&d.bProcessing)f=Pb(a);else if("t"==g)f=Qb(a);else if("i"==g&&d.bInfo)f=Rb(a);else if("p"==g&&d.bPaginate)f=Sb(a);else if(0!==u.ext.feature.length)for(k=u.ext.feature,p=0,m=k.length;p<m;p++)if(g==k[p].cFeature){f=k[p].fnInit(a);break}f&&(k=a.aanFeatures,k[g]||(k[g]=[]),k[g].push(f),e.append(f))}c.replaceWith(e);a.nHolding=
null}function wa(a,b){b=l(b).children("tr");var c,d,e;a.splice(0,a.length);var h=0;for(e=b.length;h<e;h++)a.push([]);h=0;for(e=b.length;h<e;h++){var f=b[h];for(c=f.firstChild;c;){if("TD"==c.nodeName.toUpperCase()||"TH"==c.nodeName.toUpperCase()){var g=1*c.getAttribute("colspan");var k=1*c.getAttribute("rowspan");g=g&&0!==g&&1!==g?g:1;k=k&&0!==k&&1!==k?k:1;var m=0;for(d=a[h];d[m];)m++;var n=m;var p=1===g?!0:!1;for(d=0;d<g;d++)for(m=0;m<k;m++)a[h+m][n+d]={cell:c,unique:p},a[h+m].nTr=f}c=c.nextSibling}}}
function Pa(a,b,c){var d=[];c||(c=a.aoHeader,b&&(c=[],wa(c,b)));b=0;for(var e=c.length;b<e;b++)for(var h=0,f=c[b].length;h<f;h++)!c[b][h].unique||d[h]&&a.bSortCellsTop||(d[h]=c[b][h].cell);return d}function Qa(a,b,c){F(a,"aoServerParams","serverParams",[b]);if(b&&Array.isArray(b)){var d={},e=/(.*?)\[\]$/;l.each(b,function(n,p){(n=p.name.match(e))?(n=n[0],d[n]||(d[n]=[]),d[n].push(p.value)):d[p.name]=p.value});b=d}var h=a.ajax,f=a.oInstance,g=function(n){var p=a.jqXHR?a.jqXHR.status:null;if(null===
n||"number"===typeof p&&204==p)n={},za(a,n,[]);(p=n.error||n.sError)&&ea(a,0,p);a.json=n;F(a,null,"xhr",[a,n,a.jqXHR]);c(n)};if(l.isPlainObject(h)&&h.data){var k=h.data;var m="function"===typeof k?k(b,a):k;b="function"===typeof k&&m?m:l.extend(!0,b,m);delete h.data}m={data:b,success:g,dataType:"json",cache:!1,type:a.sServerMethod,error:function(n,p,t){t=F(a,null,"xhr",[a,null,a.jqXHR]);-1===l.inArray(!0,t)&&("parsererror"==p?ea(a,0,"Invalid JSON response",1):4===n.readyState&&ea(a,0,"Ajax error",
7));V(a,!1)}};a.oAjaxData=b;F(a,null,"preXhr",[a,b]);a.fnServerData?a.fnServerData.call(f,a.sAjaxSource,l.map(b,function(n,p){return{name:p,value:n}}),g,a):a.sAjaxSource||"string"===typeof h?a.jqXHR=l.ajax(l.extend(m,{url:h||a.sAjaxSource})):"function"===typeof h?a.jqXHR=h.call(f,b,g,a):(a.jqXHR=l.ajax(l.extend(m,h)),h.data=k)}function Kb(a){a.iDraw++;V(a,!0);Qa(a,Tb(a),function(b){Ub(a,b)})}function Tb(a){var b=a.aoColumns,c=b.length,d=a.oFeatures,e=a.oPreviousSearch,h=a.aoPreSearchCols,f=[],g=oa(a);
var k=a._iDisplayStart;var m=!1!==d.bPaginate?a._iDisplayLength:-1;var n=function(x,w){f.push({name:x,value:w})};n("sEcho",a.iDraw);n("iColumns",c);n("sColumns",U(b,"sName").join(","));n("iDisplayStart",k);n("iDisplayLength",m);var p={draw:a.iDraw,columns:[],order:[],start:k,length:m,search:{value:e.sSearch,regex:e.bRegex}};for(k=0;k<c;k++){var t=b[k];var v=h[k];m="function"==typeof t.mData?"function":t.mData;p.columns.push({data:m,name:t.sName,searchable:t.bSearchable,orderable:t.bSortable,search:{value:v.sSearch,
regex:v.bRegex}});n("mDataProp_"+k,m);d.bFilter&&(n("sSearch_"+k,v.sSearch),n("bRegex_"+k,v.bRegex),n("bSearchable_"+k,t.bSearchable));d.bSort&&n("bSortable_"+k,t.bSortable)}d.bFilter&&(n("sSearch",e.sSearch),n("bRegex",e.bRegex));d.bSort&&(l.each(g,function(x,w){p.order.push({column:w.col,dir:w.dir});n("iSortCol_"+x,w.col);n("sSortDir_"+x,w.dir)}),n("iSortingCols",g.length));b=u.ext.legacy.ajax;return null===b?a.sAjaxSource?f:p:b?f:p}function Ub(a,b){var c=function(f,g){return b[f]!==q?b[f]:b[g]},
d=za(a,b),e=c("sEcho","draw"),h=c("iTotalRecords","recordsTotal");c=c("iTotalDisplayRecords","recordsFiltered");if(e!==q){if(1*e<a.iDraw)return;a.iDraw=1*e}d||(d=[]);Ma(a);a._iRecordsTotal=parseInt(h,10);a._iRecordsDisplay=parseInt(c,10);e=0;for(h=d.length;e<h;e++)ia(a,d[e]);a.aiDisplay=a.aiDisplayMaster.slice();ja(a,!0);a._bInitComplete||Ra(a,b);V(a,!1)}function za(a,b,c){a=l.isPlainObject(a.ajax)&&a.ajax.dataSrc!==q?a.ajax.dataSrc:a.sAjaxDataProp;if(!c)return"data"===a?b.aaData||b[a]:""!==a?ma(a)(b):
b;ha(a)(b,c)}function Ob(a){var b=a.oClasses,c=a.sTableId,d=a.oLanguage,e=a.oPreviousSearch,h=a.aanFeatures,f='<input type="search" class="'+b.sFilterInput+'"/>',g=d.sSearch;g=g.match(/_INPUT_/)?g.replace("_INPUT_",f):g+f;b=l("<div/>",{id:h.f?null:c+"_filter","class":b.sFilter}).append(l("<label/>").append(g));var k=function(n){var p=this.value?this.value:"";e.return&&"Enter"!==n.key||p==e.sSearch||(ya(a,{sSearch:p,bRegex:e.bRegex,bSmart:e.bSmart,bCaseInsensitive:e.bCaseInsensitive,"return":e.return}),
a._iDisplayStart=0,ja(a))};h=null!==a.searchDelay?a.searchDelay:"ssp"===Q(a)?400:0;var m=l("input",b).val(e.sSearch).attr("placeholder",d.sSearchPlaceholder).on("keyup.DT search.DT input.DT paste.DT cut.DT",h?mb(k,h):k).on("mouseup",function(n){setTimeout(function(){k.call(m[0],n)},10)}).on("keypress.DT",function(n){if(13==n.keyCode)return!1}).attr("aria-controls",c);l(a.nTable).on("search.dt.DT",function(n,p){if(a===p)try{m[0]!==A.activeElement&&m.val(e.sSearch)}catch(t){}});return b[0]}function ya(a,
b,c){var d=a.oPreviousSearch,e=a.aoPreSearchCols,h=function(g){d.sSearch=g.sSearch;d.bRegex=g.bRegex;d.bSmart=g.bSmart;d.bCaseInsensitive=g.bCaseInsensitive;d.return=g.return},f=function(g){return g.bEscapeRegex!==q?!g.bEscapeRegex:g.bRegex};eb(a);if("ssp"!=Q(a)){Vb(a,b.sSearch,c,f(b),b.bSmart,b.bCaseInsensitive,b.return);h(b);for(b=0;b<e.length;b++)Wb(a,e[b].sSearch,b,f(e[b]),e[b].bSmart,e[b].bCaseInsensitive);Xb(a)}else h(b);a.bFiltered=!0;F(a,null,"search",[a])}function Xb(a){for(var b=u.ext.search,
c=a.aiDisplay,d,e,h=0,f=b.length;h<f;h++){for(var g=[],k=0,m=c.length;k<m;k++)e=c[k],d=a.aoData[e],b[h](a,d._aFilterData,e,d._aData,k)&&g.push(e);c.length=0;l.merge(c,g)}}function Wb(a,b,c,d,e,h){if(""!==b){var f=[],g=a.aiDisplay;d=nb(b,d,e,h);for(e=0;e<g.length;e++)b=a.aoData[g[e]]._aFilterData[c],d.test(b)&&f.push(g[e]);a.aiDisplay=f}}function Vb(a,b,c,d,e,h){e=nb(b,d,e,h);var f=a.oPreviousSearch.sSearch,g=a.aiDisplayMaster;h=[];0!==u.ext.search.length&&(c=!0);var k=Yb(a);if(0>=b.length)a.aiDisplay=
g.slice();else{if(k||c||d||f.length>b.length||0!==b.indexOf(f)||a.bSorted)a.aiDisplay=g.slice();b=a.aiDisplay;for(c=0;c<b.length;c++)e.test(a.aoData[b[c]]._sFilterRow)&&h.push(b[c]);a.aiDisplay=h}}function nb(a,b,c,d){a=b?a:ob(a);c&&(a="^(?=.*?"+l.map(a.match(/"[^"]+"|[^ ]+/g)||[""],function(e){if('"'===e.charAt(0)){var h=e.match(/^"(.*)"$/);e=h?h[1]:e}return e.replace('"',"")}).join(")(?=.*?")+").*$");return new RegExp(a,d?"i":"")}function Yb(a){var b=a.aoColumns,c,d;var e=!1;var h=0;for(c=a.aoData.length;h<
c;h++){var f=a.aoData[h];if(!f._aFilterData){var g=[];e=0;for(d=b.length;e<d;e++){var k=b[e];k.bSearchable?(k=T(a,h,e,"filter"),null===k&&(k=""),"string"!==typeof k&&k.toString&&(k=k.toString())):k="";k.indexOf&&-1!==k.indexOf("&")&&(Sa.innerHTML=k,k=Bc?Sa.textContent:Sa.innerText);k.replace&&(k=k.replace(/[\r\n\u2028]/g,""));g.push(k)}f._aFilterData=g;f._sFilterRow=g.join(" ");e=!0}}return e}function Zb(a){return{search:a.sSearch,smart:a.bSmart,regex:a.bRegex,caseInsensitive:a.bCaseInsensitive}}
function $b(a){return{sSearch:a.search,bSmart:a.smart,bRegex:a.regex,bCaseInsensitive:a.caseInsensitive}}function Rb(a){var b=a.sTableId,c=a.aanFeatures.i,d=l("<div/>",{"class":a.oClasses.sInfo,id:c?null:b+"_info"});c||(a.aoDrawCallback.push({fn:ac,sName:"information"}),d.attr("role","status").attr("aria-live","polite"),l(a.nTable).attr("aria-describedby",b+"_info"));return d[0]}function ac(a){var b=a.aanFeatures.i;if(0!==b.length){var c=a.oLanguage,d=a._iDisplayStart+1,e=a.fnDisplayEnd(),h=a.fnRecordsTotal(),
f=a.fnRecordsDisplay(),g=f?c.sInfo:c.sInfoEmpty;f!==h&&(g+=" "+c.sInfoFiltered);g+=c.sInfoPostFix;g=bc(a,g);c=c.fnInfoCallback;null!==c&&(g=c.call(a.oInstance,a,d,e,h,f,g));l(b).html(g)}}function bc(a,b){var c=a.fnFormatNumber,d=a._iDisplayStart+1,e=a._iDisplayLength,h=a.fnRecordsDisplay(),f=-1===e;return b.replace(/_START_/g,c.call(a,d)).replace(/_END_/g,c.call(a,a.fnDisplayEnd())).replace(/_MAX_/g,c.call(a,a.fnRecordsTotal())).replace(/_TOTAL_/g,c.call(a,h)).replace(/_PAGE_/g,c.call(a,f?1:Math.ceil(d/
e))).replace(/_PAGES_/g,c.call(a,f?1:Math.ceil(h/e)))}function Aa(a){var b=a.iInitDisplayStart,c=a.aoColumns;var d=a.oFeatures;var e=a.bDeferLoading;if(a.bInitialised){Mb(a);Jb(a);xa(a,a.aoHeader);xa(a,a.aoFooter);V(a,!0);d.bAutoWidth&&db(a);var h=0;for(d=c.length;h<d;h++){var f=c[h];f.sWidth&&(f.nTh.style.width=K(f.sWidth))}F(a,null,"preInit",[a]);ka(a);c=Q(a);if("ssp"!=c||e)"ajax"==c?Qa(a,[],function(g){var k=za(a,g);for(h=0;h<k.length;h++)ia(a,k[h]);a.iInitDisplayStart=b;ka(a);V(a,!1);Ra(a,g)},
a):(V(a,!1),Ra(a))}else setTimeout(function(){Aa(a)},200)}function Ra(a,b){a._bInitComplete=!0;(b||a.oInit.aaData)&&sa(a);F(a,null,"plugin-init",[a,b]);F(a,"aoInitComplete","init",[a,b])}function pb(a,b){b=parseInt(b,10);a._iDisplayLength=b;qb(a);F(a,null,"length",[a,b])}function Nb(a){var b=a.oClasses,c=a.sTableId,d=a.aLengthMenu,e=Array.isArray(d[0]),h=e?d[0]:d;d=e?d[1]:d;e=l("<select/>",{name:c+"_length","aria-controls":c,"class":b.sLengthSelect});for(var f=0,g=h.length;f<g;f++)e[0][f]=new Option("number"===
typeof d[f]?a.fnFormatNumber(d[f]):d[f],h[f]);var k=l("<div><label/></div>").addClass(b.sLength);a.aanFeatures.l||(k[0].id=c+"_length");k.children().append(a.oLanguage.sLengthMenu.replace("_MENU_",e[0].outerHTML));l("select",k).val(a._iDisplayLength).on("change.DT",function(m){pb(a,l(this).val());ja(a)});l(a.nTable).on("length.dt.DT",function(m,n,p){a===n&&l("select",k).val(p)});return k[0]}function Sb(a){var b=a.sPaginationType,c=u.ext.pager[b],d="function"===typeof c,e=function(f){ja(f)};b=l("<div/>").addClass(a.oClasses.sPaging+
b)[0];var h=a.aanFeatures;d||c.fnInit(a,b,e);h.p||(b.id=a.sTableId+"_paginate",a.aoDrawCallback.push({fn:function(f){if(d){var g=f._iDisplayStart,k=f._iDisplayLength,m=f.fnRecordsDisplay(),n=-1===k;g=n?0:Math.ceil(g/k);k=n?1:Math.ceil(m/k);m=c(g,k);var p;n=0;for(p=h.p.length;n<p;n++)lb(f,"pageButton")(f,h.p[n],n,m,g,k)}else c.fnUpdate(f,e)},sName:"pagination"}));return b}function Ta(a,b,c){var d=a._iDisplayStart,e=a._iDisplayLength,h=a.fnRecordsDisplay();0===h||-1===e?d=0:"number"===typeof b?(d=b*
e,d>h&&(d=0)):"first"==b?d=0:"previous"==b?(d=0<=e?d-e:0,0>d&&(d=0)):"next"==b?d+e<h&&(d+=e):"last"==b?d=Math.floor((h-1)/e)*e:ea(a,0,"Unknown paging action: "+b,5);b=a._iDisplayStart!==d;a._iDisplayStart=d;b&&(F(a,null,"page",[a]),c&&ja(a));return b}function Pb(a){return l("<div/>",{id:a.aanFeatures.r?null:a.sTableId+"_processing","class":a.oClasses.sProcessing}).html(a.oLanguage.sProcessing).append("<div><div></div><div></div><div></div><div></div></div>").insertBefore(a.nTable)[0]}function V(a,
b){a.oFeatures.bProcessing&&l(a.aanFeatures.r).css("display",b?"block":"none");F(a,null,"processing",[a,b])}function Qb(a){var b=l(a.nTable),c=a.oScroll;if(""===c.sX&&""===c.sY)return a.nTable;var d=c.sX,e=c.sY,h=a.oClasses,f=b.children("caption"),g=f.length?f[0]._captionSide:null,k=l(b[0].cloneNode(!1)),m=l(b[0].cloneNode(!1)),n=b.children("tfoot");n.length||(n=null);k=l("<div/>",{"class":h.sScrollWrapper}).append(l("<div/>",{"class":h.sScrollHead}).css({overflow:"hidden",position:"relative",border:0,
width:d?d?K(d):null:"100%"}).append(l("<div/>",{"class":h.sScrollHeadInner}).css({"box-sizing":"content-box",width:c.sXInner||"100%"}).append(k.removeAttr("id").css("margin-left",0).append("top"===g?f:null).append(b.children("thead"))))).append(l("<div/>",{"class":h.sScrollBody}).css({position:"relative",overflow:"auto",width:d?K(d):null}).append(b));n&&k.append(l("<div/>",{"class":h.sScrollFoot}).css({overflow:"hidden",border:0,width:d?d?K(d):null:"100%"}).append(l("<div/>",{"class":h.sScrollFootInner}).append(m.removeAttr("id").css("margin-left",
0).append("bottom"===g?f:null).append(b.children("tfoot")))));b=k.children();var p=b[0];h=b[1];var t=n?b[2]:null;if(d)l(h).on("scroll.DT",function(v){v=this.scrollLeft;p.scrollLeft=v;n&&(t.scrollLeft=v)});l(h).css("max-height",e);c.bCollapse||l(h).css("height",e);a.nScrollHead=p;a.nScrollBody=h;a.nScrollFoot=t;a.aoDrawCallback.push({fn:Ja,sName:"scrolling"});return k[0]}function Ja(a){var b=a.oScroll,c=b.sX,d=b.sXInner,e=b.sY;b=b.iBarWidth;var h=l(a.nScrollHead),f=h[0].style,g=h.children("div"),k=
g[0].style,m=g.children("table");g=a.nScrollBody;var n=l(g),p=g.style,t=l(a.nScrollFoot).children("div"),v=t.children("table"),x=l(a.nTHead),w=l(a.nTable),r=w[0],C=r.style,G=a.nTFoot?l(a.nTFoot):null,ba=a.oBrowser,L=ba.bScrollOversize;U(a.aoColumns,"nTh");var O=[],I=[],H=[],fa=[],Z,Ba=function(D){D=D.style;D.paddingTop="0";D.paddingBottom="0";D.borderTopWidth="0";D.borderBottomWidth="0";D.height=0};var X=g.scrollHeight>g.clientHeight;if(a.scrollBarVis!==X&&a.scrollBarVis!==q)a.scrollBarVis=X,sa(a);
else{a.scrollBarVis=X;w.children("thead, tfoot").remove();if(G){X=G.clone().prependTo(w);var ca=G.find("tr");var Ca=X.find("tr");X.find("[id]").removeAttr("id")}var Ua=x.clone().prependTo(w);x=x.find("tr");X=Ua.find("tr");Ua.find("th, td").removeAttr("tabindex");Ua.find("[id]").removeAttr("id");c||(p.width="100%",h[0].style.width="100%");l.each(Pa(a,Ua),function(D,W){Z=ta(a,D);W.style.width=a.aoColumns[Z].sWidth});G&&da(function(D){D.style.width=""},Ca);h=w.outerWidth();""===c?(C.width="100%",L&&
(w.find("tbody").height()>g.offsetHeight||"scroll"==n.css("overflow-y"))&&(C.width=K(w.outerWidth()-b)),h=w.outerWidth()):""!==d&&(C.width=K(d),h=w.outerWidth());da(Ba,X);da(function(D){var W=y.getComputedStyle?y.getComputedStyle(D).width:K(l(D).width());H.push(D.innerHTML);O.push(W)},X);da(function(D,W){D.style.width=O[W]},x);l(X).css("height",0);G&&(da(Ba,Ca),da(function(D){fa.push(D.innerHTML);I.push(K(l(D).css("width")))},Ca),da(function(D,W){D.style.width=I[W]},ca),l(Ca).height(0));da(function(D,
W){D.innerHTML='<div class="dataTables_sizing">'+H[W]+"</div>";D.childNodes[0].style.height="0";D.childNodes[0].style.overflow="hidden";D.style.width=O[W]},X);G&&da(function(D,W){D.innerHTML='<div class="dataTables_sizing">'+fa[W]+"</div>";D.childNodes[0].style.height="0";D.childNodes[0].style.overflow="hidden";D.style.width=I[W]},Ca);Math.round(w.outerWidth())<Math.round(h)?(ca=g.scrollHeight>g.offsetHeight||"scroll"==n.css("overflow-y")?h+b:h,L&&(g.scrollHeight>g.offsetHeight||"scroll"==n.css("overflow-y"))&&
(C.width=K(ca-b)),""!==c&&""===d||ea(a,1,"Possible column misalignment",6)):ca="100%";p.width=K(ca);f.width=K(ca);G&&(a.nScrollFoot.style.width=K(ca));!e&&L&&(p.height=K(r.offsetHeight+b));c=w.outerWidth();m[0].style.width=K(c);k.width=K(c);d=w.height()>g.clientHeight||"scroll"==n.css("overflow-y");e="padding"+(ba.bScrollbarLeft?"Left":"Right");k[e]=d?b+"px":"0px";G&&(v[0].style.width=K(c),t[0].style.width=K(c),t[0].style[e]=d?b+"px":"0px");w.children("colgroup").insertBefore(w.children("thead"));
n.trigger("scroll");!a.bSorted&&!a.bFiltered||a._drawHold||(g.scrollTop=0)}}function da(a,b,c){for(var d=0,e=0,h=b.length,f,g;e<h;){f=b[e].firstChild;for(g=c?c[e].firstChild:null;f;)1===f.nodeType&&(c?a(f,g,d):a(f,d),d++),f=f.nextSibling,g=c?g.nextSibling:null;e++}}function db(a){var b=a.nTable,c=a.aoColumns,d=a.oScroll,e=d.sY,h=d.sX,f=d.sXInner,g=c.length,k=Ka(a,"bVisible"),m=l("th",a.nTHead),n=b.getAttribute("width"),p=b.parentNode,t=!1,v,x=a.oBrowser;d=x.bScrollOversize;(v=b.style.width)&&-1!==
v.indexOf("%")&&(n=v);for(v=0;v<k.length;v++){var w=c[k[v]];null!==w.sWidth&&(w.sWidth=cc(w.sWidthOrig,p),t=!0)}if(d||!t&&!h&&!e&&g==na(a)&&g==m.length)for(v=0;v<g;v++)k=ta(a,v),null!==k&&(c[k].sWidth=K(m.eq(v).width()));else{g=l(b).clone().css("visibility","hidden").removeAttr("id");g.find("tbody tr").remove();var r=l("<tr/>").appendTo(g.find("tbody"));g.find("thead, tfoot").remove();g.append(l(a.nTHead).clone()).append(l(a.nTFoot).clone());g.find("tfoot th, tfoot td").css("width","");m=Pa(a,g.find("thead")[0]);
for(v=0;v<k.length;v++)w=c[k[v]],m[v].style.width=null!==w.sWidthOrig&&""!==w.sWidthOrig?K(w.sWidthOrig):"",w.sWidthOrig&&h&&l(m[v]).append(l("<div/>").css({width:w.sWidthOrig,margin:0,padding:0,border:0,height:1}));if(a.aoData.length)for(v=0;v<k.length;v++)t=k[v],w=c[t],l(dc(a,t)).clone(!1).append(w.sContentPadding).appendTo(r);l("[name]",g).removeAttr("name");w=l("<div/>").css(h||e?{position:"absolute",top:0,left:0,height:1,right:0,overflow:"hidden"}:{}).append(g).appendTo(p);h&&f?g.width(f):h?
(g.css("width","auto"),g.removeAttr("width"),g.width()<p.clientWidth&&n&&g.width(p.clientWidth)):e?g.width(p.clientWidth):n&&g.width(n);for(v=e=0;v<k.length;v++)p=l(m[v]),f=p.outerWidth()-p.width(),p=x.bBounding?Math.ceil(m[v].getBoundingClientRect().width):p.outerWidth(),e+=p,c[k[v]].sWidth=K(p-f);b.style.width=K(e);w.remove()}n&&(b.style.width=K(n));!n&&!h||a._reszEvt||(b=function(){l(y).on("resize.DT-"+a.sInstance,mb(function(){sa(a)}))},d?setTimeout(b,1E3):b(),a._reszEvt=!0)}function cc(a,b){if(!a)return 0;
a=l("<div/>").css("width",K(a)).appendTo(b||A.body);b=a[0].offsetWidth;a.remove();return b}function dc(a,b){var c=ec(a,b);if(0>c)return null;var d=a.aoData[c];return d.nTr?d.anCells[b]:l("<td/>").html(T(a,c,b,"display"))[0]}function ec(a,b){for(var c,d=-1,e=-1,h=0,f=a.aoData.length;h<f;h++)c=T(a,h,b,"display")+"",c=c.replace(Cc,""),c=c.replace(/ /g," "),c.length>d&&(d=c.length,e=h);return e}function K(a){return null===a?"0px":"number"==typeof a?0>a?"0px":a+"px":a.match(/\d$/)?a+"px":a}function oa(a){var b=
[],c=a.aoColumns;var d=a.aaSortingFixed;var e=l.isPlainObject(d);var h=[];var f=function(n){n.length&&!Array.isArray(n[0])?h.push(n):l.merge(h,n)};Array.isArray(d)&&f(d);e&&d.pre&&f(d.pre);f(a.aaSorting);e&&d.post&&f(d.post);for(a=0;a<h.length;a++){var g=h[a][0];f=c[g].aDataSort;d=0;for(e=f.length;d<e;d++){var k=f[d];var m=c[k].sType||"string";h[a]._idx===q&&(h[a]._idx=l.inArray(h[a][1],c[k].asSorting));b.push({src:g,col:k,dir:h[a][1],index:h[a]._idx,type:m,formatter:u.ext.type.order[m+"-pre"]})}}return b}
function Lb(a){var b,c=[],d=u.ext.type.order,e=a.aoData,h=0,f=a.aiDisplayMaster;eb(a);var g=oa(a);var k=0;for(b=g.length;k<b;k++){var m=g[k];m.formatter&&h++;fc(a,m.col)}if("ssp"!=Q(a)&&0!==g.length){k=0;for(b=f.length;k<b;k++)c[f[k]]=k;h===g.length?f.sort(function(n,p){var t,v=g.length,x=e[n]._aSortData,w=e[p]._aSortData;for(t=0;t<v;t++){var r=g[t];var C=x[r.col];var G=w[r.col];C=C<G?-1:C>G?1:0;if(0!==C)return"asc"===r.dir?C:-C}C=c[n];G=c[p];return C<G?-1:C>G?1:0}):f.sort(function(n,p){var t,v=g.length,
x=e[n]._aSortData,w=e[p]._aSortData;for(t=0;t<v;t++){var r=g[t];var C=x[r.col];var G=w[r.col];r=d[r.type+"-"+r.dir]||d["string-"+r.dir];C=r(C,G);if(0!==C)return C}C=c[n];G=c[p];return C<G?-1:C>G?1:0})}a.bSorted=!0}function gc(a){var b=a.aoColumns,c=oa(a);a=a.oLanguage.oAria;for(var d=0,e=b.length;d<e;d++){var h=b[d];var f=h.asSorting;var g=h.ariaTitle||h.sTitle.replace(/<.*?>/g,"");var k=h.nTh;k.removeAttribute("aria-sort");h.bSortable&&(0<c.length&&c[0].col==d?(k.setAttribute("aria-sort","asc"==
c[0].dir?"ascending":"descending"),h=f[c[0].index+1]||f[0]):h=f[0],g+="asc"===h?a.sSortAscending:a.sSortDescending);k.setAttribute("aria-label",g)}}function rb(a,b,c,d){var e=a.aaSorting,h=a.aoColumns[b].asSorting,f=function(g,k){var m=g._idx;m===q&&(m=l.inArray(g[1],h));return m+1<h.length?m+1:k?null:0};"number"===typeof e[0]&&(e=a.aaSorting=[e]);c&&a.oFeatures.bSortMulti?(c=l.inArray(b,U(e,"0")),-1!==c?(b=f(e[c],!0),null===b&&1===e.length&&(b=0),null===b?e.splice(c,1):(e[c][1]=h[b],e[c]._idx=b)):
(e.push([b,h[0],0]),e[e.length-1]._idx=0)):e.length&&e[0][0]==b?(b=f(e[0]),e.length=1,e[0][1]=h[b],e[0]._idx=b):(e.length=0,e.push([b,h[0]]),e[0]._idx=0);ka(a);"function"==typeof d&&d(a)}function kb(a,b,c,d){var e=a.aoColumns[c];sb(b,{},function(h){!1!==e.bSortable&&(a.oFeatures.bProcessing?(V(a,!0),setTimeout(function(){rb(a,c,h.shiftKey,d);"ssp"!==Q(a)&&V(a,!1)},0)):rb(a,c,h.shiftKey,d))})}function Va(a){var b=a.aLastSort,c=a.oClasses.sSortColumn,d=oa(a),e=a.oFeatures,h;if(e.bSort&&e.bSortClasses){e=
0;for(h=b.length;e<h;e++){var f=b[e].src;l(U(a.aoData,"anCells",f)).removeClass(c+(2>e?e+1:3))}e=0;for(h=d.length;e<h;e++)f=d[e].src,l(U(a.aoData,"anCells",f)).addClass(c+(2>e?e+1:3))}a.aLastSort=d}function fc(a,b){var c=a.aoColumns[b],d=u.ext.order[c.sSortDataType],e;d&&(e=d.call(a.oInstance,a,b,ua(a,b)));for(var h,f=u.ext.type.order[c.sType+"-pre"],g=0,k=a.aoData.length;g<k;g++)if(c=a.aoData[g],c._aSortData||(c._aSortData=[]),!c._aSortData[b]||d)h=d?e[g]:T(a,g,b,"sort"),c._aSortData[b]=f?f(h):h}
function Da(a){if(!a._bLoadingState){var b={time:+new Date,start:a._iDisplayStart,length:a._iDisplayLength,order:l.extend(!0,[],a.aaSorting),search:Zb(a.oPreviousSearch),columns:l.map(a.aoColumns,function(c,d){return{visible:c.bVisible,search:Zb(a.aoPreSearchCols[d])}})};a.oSavedState=b;F(a,"aoStateSaveParams","stateSaveParams",[a,b]);a.oFeatures.bStateSave&&!a.bDestroying&&a.fnStateSaveCallback.call(a.oInstance,a,b)}}function hc(a,b,c){if(a.oFeatures.bStateSave)return b=a.fnStateLoadCallback.call(a.oInstance,
a,function(d){tb(a,d,c)}),b!==q&&tb(a,b,c),!0;c()}function tb(a,b,c){var d,e=a.aoColumns;a._bLoadingState=!0;var h=a._bInitComplete?new u.Api(a):null;if(b&&b.time){var f=F(a,"aoStateLoadParams","stateLoadParams",[a,b]);if(-1!==l.inArray(!1,f))a._bLoadingState=!1;else if(f=a.iStateDuration,0<f&&b.time<+new Date-1E3*f)a._bLoadingState=!1;else if(b.columns&&e.length!==b.columns.length)a._bLoadingState=!1;else{a.oLoadedState=l.extend(!0,{},b);b.length!==q&&(h?h.page.len(b.length):a._iDisplayLength=b.length);
b.start!==q&&(null===h?(a._iDisplayStart=b.start,a.iInitDisplayStart=b.start):Ta(a,b.start/a._iDisplayLength));b.order!==q&&(a.aaSorting=[],l.each(b.order,function(k,m){a.aaSorting.push(m[0]>=e.length?[0,m[1]]:m)}));b.search!==q&&l.extend(a.oPreviousSearch,$b(b.search));if(b.columns){f=0;for(d=b.columns.length;f<d;f++){var g=b.columns[f];g.visible!==q&&(h?h.column(f).visible(g.visible,!1):e[f].bVisible=g.visible);g.search!==q&&l.extend(a.aoPreSearchCols[f],$b(g.search))}h&&h.columns.adjust()}a._bLoadingState=
!1;F(a,"aoStateLoaded","stateLoaded",[a,b])}}else a._bLoadingState=!1;c()}function Wa(a){var b=u.settings;a=l.inArray(a,U(b,"nTable"));return-1!==a?b[a]:null}function ea(a,b,c,d){c="DataTables warning: "+(a?"table id="+a.sTableId+" - ":"")+c;d&&(c+=". For more information about this error, please see http://datatables.net/tn/"+d);if(b)y.console&&console.log&&console.log(c);else if(b=u.ext,b=b.sErrMode||b.errMode,a&&F(a,null,"error",[a,d,c]),"alert"==b)alert(c);else{if("throw"==b)throw Error(c);"function"==
typeof b&&b(a,d,c)}}function Y(a,b,c,d){Array.isArray(c)?l.each(c,function(e,h){Array.isArray(h)?Y(a,b,h[0],h[1]):Y(a,b,h)}):(d===q&&(d=c),b[c]!==q&&(a[d]=b[c]))}function ub(a,b,c){var d;for(d in b)if(b.hasOwnProperty(d)){var e=b[d];l.isPlainObject(e)?(l.isPlainObject(a[d])||(a[d]={}),l.extend(!0,a[d],e)):c&&"data"!==d&&"aaData"!==d&&Array.isArray(e)?a[d]=e.slice():a[d]=e}return a}function sb(a,b,c){l(a).on("click.DT",b,function(d){l(a).trigger("blur");c(d)}).on("keypress.DT",b,function(d){13===d.which&&
(d.preventDefault(),c(d))}).on("selectstart.DT",function(){return!1})}function R(a,b,c,d){c&&a[b].push({fn:c,sName:d})}function F(a,b,c,d){var e=[];b&&(e=l.map(a[b].slice().reverse(),function(h,f){return h.fn.apply(a.oInstance,d)}));null!==c&&(b=l.Event(c+".dt"),l(a.nTable).trigger(b,d),e.push(b.result));return e}function qb(a){var b=a._iDisplayStart,c=a.fnDisplayEnd(),d=a._iDisplayLength;b>=c&&(b=c-d);b-=b%d;if(-1===d||0>b)b=0;a._iDisplayStart=b}function lb(a,b){a=a.renderer;var c=u.ext.renderer[b];
return l.isPlainObject(a)&&a[b]?c[a[b]]||c._:"string"===typeof a?c[a]||c._:c._}function Q(a){return a.oFeatures.bServerSide?"ssp":a.ajax||a.sAjaxSource?"ajax":"dom"}function Ea(a,b){var c=ic.numbers_length,d=Math.floor(c/2);b<=c?a=pa(0,b):a<=d?(a=pa(0,c-2),a.push("ellipsis"),a.push(b-1)):(a>=b-1-d?a=pa(b-(c-2),b):(a=pa(a-d+2,a+d-1),a.push("ellipsis"),a.push(b-1)),a.splice(0,0,"ellipsis"),a.splice(0,0,0));a.DT_el="span";return a}function bb(a){l.each({num:function(b){return Xa(b,a)},"num-fmt":function(b){return Xa(b,
a,vb)},"html-num":function(b){return Xa(b,a,Ya)},"html-num-fmt":function(b){return Xa(b,a,Ya,vb)}},function(b,c){M.type.order[b+a+"-pre"]=c;b.match(/^html\-/)&&(M.type.search[b+a]=M.type.search.html)})}function jc(a,b,c,d,e){return y.moment?a[b](e):y.luxon?a[c](e):d?a[d](e):a}function Za(a,b,c){if(y.moment){var d=y.moment.utc(a,b,c,!0);if(!d.isValid())return null}else if(y.luxon){d=b?y.luxon.DateTime.fromFormat(a,b):y.luxon.DateTime.fromISO(a);if(!d.isValid)return null;d.setLocale(c)}else b?(kc||
alert("DataTables warning: Formatted date without Moment.js or Luxon - https://datatables.net/tn/17"),kc=!0):d=new Date(a);return d}function wb(a){return function(b,c,d,e){0===arguments.length?(d="en",b=c=null):1===arguments.length?(d="en",c=b,b=null):2===arguments.length&&(d=c,c=b,b=null);var h="datetime-"+c;u.ext.type.order[h]||(u.ext.type.detect.unshift(function(f){return f===h?h:!1}),u.ext.type.order[h+"-asc"]=function(f,g){f=f.valueOf();g=g.valueOf();return f===g?0:f<g?-1:1},u.ext.type.order[h+
"-desc"]=function(f,g){f=f.valueOf();g=g.valueOf();return f===g?0:f>g?-1:1});return function(f,g){if(null===f||f===q)"--now"===e?(f=new Date,f=new Date(Date.UTC(f.getFullYear(),f.getMonth(),f.getDate(),f.getHours(),f.getMinutes(),f.getSeconds()))):f="";if("type"===g)return h;if(""===f)return"sort"!==g?"":Za("0000-01-01 00:00:00",null,d);if(null!==c&&b===c&&"sort"!==g&&"type"!==g&&!(f instanceof Date))return f;var k=Za(f,b,d);if(null===k)return f;if("sort"===g)return k;f=null===c?jc(k,"toDate","toJSDate",
"")[a]():jc(k,"format","toFormat","toISOString",c);return"display"===g?$a(f):f}}}function lc(a){return function(){var b=[Wa(this[u.ext.iApiIndex])].concat(Array.prototype.slice.call(arguments));return u.ext.internal[a].apply(this,b)}}var u=function(a,b){if(this instanceof u)return l(a).DataTable(b);b=a;this.$=function(f,g){return this.api(!0).$(f,g)};this._=function(f,g){return this.api(!0).rows(f,g).data()};this.api=function(f){return f?new B(Wa(this[M.iApiIndex])):new B(this)};this.fnAddData=function(f,
g){var k=this.api(!0);f=Array.isArray(f)&&(Array.isArray(f[0])||l.isPlainObject(f[0]))?k.rows.add(f):k.row.add(f);(g===q||g)&&k.draw();return f.flatten().toArray()};this.fnAdjustColumnSizing=function(f){var g=this.api(!0).columns.adjust(),k=g.settings()[0],m=k.oScroll;f===q||f?g.draw(!1):(""!==m.sX||""!==m.sY)&&Ja(k)};this.fnClearTable=function(f){var g=this.api(!0).clear();(f===q||f)&&g.draw()};this.fnClose=function(f){this.api(!0).row(f).child.hide()};this.fnDeleteRow=function(f,g,k){var m=this.api(!0);
f=m.rows(f);var n=f.settings()[0],p=n.aoData[f[0][0]];f.remove();g&&g.call(this,n,p);(k===q||k)&&m.draw();return p};this.fnDestroy=function(f){this.api(!0).destroy(f)};this.fnDraw=function(f){this.api(!0).draw(f)};this.fnFilter=function(f,g,k,m,n,p){n=this.api(!0);null===g||g===q?n.search(f,k,m,p):n.column(g).search(f,k,m,p);n.draw()};this.fnGetData=function(f,g){var k=this.api(!0);if(f!==q){var m=f.nodeName?f.nodeName.toLowerCase():"";return g!==q||"td"==m||"th"==m?k.cell(f,g).data():k.row(f).data()||
null}return k.data().toArray()};this.fnGetNodes=function(f){var g=this.api(!0);return f!==q?g.row(f).node():g.rows().nodes().flatten().toArray()};this.fnGetPosition=function(f){var g=this.api(!0),k=f.nodeName.toUpperCase();return"TR"==k?g.row(f).index():"TD"==k||"TH"==k?(f=g.cell(f).index(),[f.row,f.columnVisible,f.column]):null};this.fnIsOpen=function(f){return this.api(!0).row(f).child.isShown()};this.fnOpen=function(f,g,k){return this.api(!0).row(f).child(g,k).show().child()[0]};this.fnPageChange=
function(f,g){f=this.api(!0).page(f);(g===q||g)&&f.draw(!1)};this.fnSetColumnVis=function(f,g,k){f=this.api(!0).column(f).visible(g);(k===q||k)&&f.columns.adjust().draw()};this.fnSettings=function(){return Wa(this[M.iApiIndex])};this.fnSort=function(f){this.api(!0).order(f).draw()};this.fnSortListener=function(f,g,k){this.api(!0).order.listener(f,g,k)};this.fnUpdate=function(f,g,k,m,n){var p=this.api(!0);k===q||null===k?p.row(g).data(f):p.cell(g,k).data(f);(n===q||n)&&p.columns.adjust();(m===q||m)&&
p.draw();return 0};this.fnVersionCheck=M.fnVersionCheck;var c=this,d=b===q,e=this.length;d&&(b={});this.oApi=this.internal=M.internal;for(var h in u.ext.internal)h&&(this[h]=lc(h));this.each(function(){var f={},g=1<e?ub(f,b,!0):b,k=0,m;f=this.getAttribute("id");var n=!1,p=u.defaults,t=l(this);if("table"!=this.nodeName.toLowerCase())ea(null,0,"Non-table node initialisation ("+this.nodeName+")",2);else{Db(p);Eb(p.column);P(p,p,!0);P(p.column,p.column,!0);P(p,l.extend(g,t.data()),!0);var v=u.settings;
k=0;for(m=v.length;k<m;k++){var x=v[k];if(x.nTable==this||x.nTHead&&x.nTHead.parentNode==this||x.nTFoot&&x.nTFoot.parentNode==this){var w=g.bRetrieve!==q?g.bRetrieve:p.bRetrieve;if(d||w)return x.oInstance;if(g.bDestroy!==q?g.bDestroy:p.bDestroy){x.oInstance.fnDestroy();break}else{ea(x,0,"Cannot reinitialise DataTable",3);return}}if(x.sTableId==this.id){v.splice(k,1);break}}if(null===f||""===f)this.id=f="DataTables_Table_"+u.ext._unique++;var r=l.extend(!0,{},u.models.oSettings,{sDestroyWidth:t[0].style.width,
sInstance:f,sTableId:f});r.nTable=this;r.oApi=c.internal;r.oInit=g;v.push(r);r.oInstance=1===c.length?c:t.dataTable();Db(g);la(g.oLanguage);g.aLengthMenu&&!g.iDisplayLength&&(g.iDisplayLength=Array.isArray(g.aLengthMenu[0])?g.aLengthMenu[0][0]:g.aLengthMenu[0]);g=ub(l.extend(!0,{},p),g);Y(r.oFeatures,g,"bPaginate bLengthChange bFilter bSort bSortMulti bInfo bProcessing bAutoWidth bSortClasses bServerSide bDeferRender".split(" "));Y(r,g,["asStripeClasses","ajax","fnServerData","fnFormatNumber","sServerMethod",
"aaSorting","aaSortingFixed","aLengthMenu","sPaginationType","sAjaxSource","sAjaxDataProp","iStateDuration","sDom","bSortCellsTop","iTabIndex","fnStateLoadCallback","fnStateSaveCallback","renderer","searchDelay","rowId",["iCookieDuration","iStateDuration"],["oSearch","oPreviousSearch"],["aoSearchCols","aoPreSearchCols"],["iDisplayLength","_iDisplayLength"]]);Y(r.oScroll,g,[["sScrollX","sX"],["sScrollXInner","sXInner"],["sScrollY","sY"],["bScrollCollapse","bCollapse"]]);Y(r.oLanguage,g,"fnInfoCallback");
R(r,"aoDrawCallback",g.fnDrawCallback,"user");R(r,"aoServerParams",g.fnServerParams,"user");R(r,"aoStateSaveParams",g.fnStateSaveParams,"user");R(r,"aoStateLoadParams",g.fnStateLoadParams,"user");R(r,"aoStateLoaded",g.fnStateLoaded,"user");R(r,"aoRowCallback",g.fnRowCallback,"user");R(r,"aoRowCreatedCallback",g.fnCreatedRow,"user");R(r,"aoHeaderCallback",g.fnHeaderCallback,"user");R(r,"aoFooterCallback",g.fnFooterCallback,"user");R(r,"aoInitComplete",g.fnInitComplete,"user");R(r,"aoPreDrawCallback",
g.fnPreDrawCallback,"user");r.rowIdFn=ma(g.rowId);Fb(r);var C=r.oClasses;l.extend(C,u.ext.classes,g.oClasses);t.addClass(C.sTable);r.iInitDisplayStart===q&&(r.iInitDisplayStart=g.iDisplayStart,r._iDisplayStart=g.iDisplayStart);null!==g.iDeferLoading&&(r.bDeferLoading=!0,f=Array.isArray(g.iDeferLoading),r._iRecordsDisplay=f?g.iDeferLoading[0]:g.iDeferLoading,r._iRecordsTotal=f?g.iDeferLoading[1]:g.iDeferLoading);var G=r.oLanguage;l.extend(!0,G,g.oLanguage);G.sUrl?(l.ajax({dataType:"json",url:G.sUrl,
success:function(I){P(p.oLanguage,I);la(I);l.extend(!0,G,I,r.oInit.oLanguage);F(r,null,"i18n",[r]);Aa(r)},error:function(){Aa(r)}}),n=!0):F(r,null,"i18n",[r]);null===g.asStripeClasses&&(r.asStripeClasses=[C.sStripeOdd,C.sStripeEven]);f=r.asStripeClasses;var ba=t.children("tbody").find("tr").eq(0);-1!==l.inArray(!0,l.map(f,function(I,H){return ba.hasClass(I)}))&&(l("tbody tr",this).removeClass(f.join(" ")),r.asDestroyStripes=f.slice());f=[];v=this.getElementsByTagName("thead");0!==v.length&&(wa(r.aoHeader,
v[0]),f=Pa(r));if(null===g.aoColumns)for(v=[],k=0,m=f.length;k<m;k++)v.push(null);else v=g.aoColumns;k=0;for(m=v.length;k<m;k++)cb(r,f?f[k]:null);Hb(r,g.aoColumnDefs,v,function(I,H){Ia(r,I,H)});if(ba.length){var L=function(I,H){return null!==I.getAttribute("data-"+H)?H:null};l(ba[0]).children("th, td").each(function(I,H){var fa=r.aoColumns[I];if(fa.mData===I){var Z=L(H,"sort")||L(H,"order");H=L(H,"filter")||L(H,"search");if(null!==Z||null!==H)fa.mData={_:I+".display",sort:null!==Z?I+".@data-"+Z:q,
type:null!==Z?I+".@data-"+Z:q,filter:null!==H?I+".@data-"+H:q},Ia(r,I)}})}var O=r.oFeatures;f=function(){if(g.aaSorting===q){var I=r.aaSorting;k=0;for(m=I.length;k<m;k++)I[k][1]=r.aoColumns[k].asSorting[0]}Va(r);O.bSort&&R(r,"aoDrawCallback",function(){if(r.bSorted){var Z=oa(r),Ba={};l.each(Z,function(X,ca){Ba[ca.src]=ca.dir});F(r,null,"order",[r,Z,Ba]);gc(r)}});R(r,"aoDrawCallback",function(){(r.bSorted||"ssp"===Q(r)||O.bDeferRender)&&Va(r)},"sc");I=t.children("caption").each(function(){this._captionSide=
l(this).css("caption-side")});var H=t.children("thead");0===H.length&&(H=l("<thead/>").appendTo(t));r.nTHead=H[0];var fa=t.children("tbody");0===fa.length&&(fa=l("<tbody/>").insertAfter(H));r.nTBody=fa[0];H=t.children("tfoot");0===H.length&&0<I.length&&(""!==r.oScroll.sX||""!==r.oScroll.sY)&&(H=l("<tfoot/>").appendTo(t));0===H.length||0===H.children().length?t.addClass(C.sNoFooter):0<H.length&&(r.nTFoot=H[0],wa(r.aoFooter,r.nTFoot));if(g.aaData)for(k=0;k<g.aaData.length;k++)ia(r,g.aaData[k]);else(r.bDeferLoading||
"dom"==Q(r))&&La(r,l(r.nTBody).children("tr"));r.aiDisplay=r.aiDisplayMaster.slice();r.bInitialised=!0;!1===n&&Aa(r)};R(r,"aoDrawCallback",Da,"state_save");g.bStateSave?(O.bStateSave=!0,hc(r,g,f)):f()}});c=null;return this},M,z,J,xb={},mc=/[\r\n\u2028]/g,Ya=/<.*?>/g,Dc=/^\d{2,4}[\.\/\-]\d{1,2}[\.\/\-]\d{1,2}([T ]{1}\d{1,2}[:\.]\d{2}([\.:]\d{2})?)?$/,Ec=/(\/|\.|\*|\+|\?|\||\(|\)|\[|\]|\{|\}|\\|\$|\^|\-)/g,vb=/['\u00A0,$£€¥%\u2009\u202F\u20BD\u20a9\u20BArfkɃΞ]/gi,aa=function(a){return a&&!0!==a&&"-"!==
a?!1:!0},nc=function(a){var b=parseInt(a,10);return!isNaN(b)&&isFinite(a)?b:null},oc=function(a,b){xb[b]||(xb[b]=new RegExp(ob(b),"g"));return"string"===typeof a&&"."!==b?a.replace(/\./g,"").replace(xb[b],"."):a},yb=function(a,b,c){var d="string"===typeof a;if(aa(a))return!0;b&&d&&(a=oc(a,b));c&&d&&(a=a.replace(vb,""));return!isNaN(parseFloat(a))&&isFinite(a)},pc=function(a,b,c){return aa(a)?!0:aa(a)||"string"===typeof a?yb(a.replace(Ya,""),b,c)?!0:null:null},U=function(a,b,c){var d=[],e=0,h=a.length;
if(c!==q)for(;e<h;e++)a[e]&&a[e][b]&&d.push(a[e][b][c]);else for(;e<h;e++)a[e]&&d.push(a[e][b]);return d},Fa=function(a,b,c,d){var e=[],h=0,f=b.length;if(d!==q)for(;h<f;h++)a[b[h]][c]&&e.push(a[b[h]][c][d]);else for(;h<f;h++)e.push(a[b[h]][c]);return e},pa=function(a,b){var c=[];if(b===q){b=0;var d=a}else d=b,b=a;for(a=b;a<d;a++)c.push(a);return c},qc=function(a){for(var b=[],c=0,d=a.length;c<d;c++)a[c]&&b.push(a[c]);return b},Oa=function(a){a:{if(!(2>a.length)){var b=a.slice().sort();for(var c=b[0],
d=1,e=b.length;d<e;d++){if(b[d]===c){b=!1;break a}c=b[d]}}b=!0}if(b)return a.slice();b=[];e=a.length;var h,f=0;d=0;a:for(;d<e;d++){c=a[d];for(h=0;h<f;h++)if(b[h]===c)continue a;b.push(c);f++}return b},rc=function(a,b){if(Array.isArray(b))for(var c=0;c<b.length;c++)rc(a,b[c]);else a.push(b);return a},sc=function(a,b){b===q&&(b=0);return-1!==this.indexOf(a,b)};Array.isArray||(Array.isArray=function(a){return"[object Array]"===Object.prototype.toString.call(a)});Array.prototype.includes||(Array.prototype.includes=
sc);String.prototype.trim||(String.prototype.trim=function(){return this.replace(/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,"")});String.prototype.includes||(String.prototype.includes=sc);u.util={throttle:function(a,b){var c=b!==q?b:200,d,e;return function(){var h=this,f=+new Date,g=arguments;d&&f<d+c?(clearTimeout(e),e=setTimeout(function(){d=q;a.apply(h,g)},c)):(d=f,a.apply(h,g))}},escapeRegex:function(a){return a.replace(Ec,"\\$1")},set:function(a){if(l.isPlainObject(a))return u.util.set(a._);if(null===
a)return function(){};if("function"===typeof a)return function(c,d,e){a(c,"set",d,e)};if("string"!==typeof a||-1===a.indexOf(".")&&-1===a.indexOf("[")&&-1===a.indexOf("("))return function(c,d){c[a]=d};var b=function(c,d,e){e=hb(e);var h=e[e.length-1];for(var f,g,k=0,m=e.length-1;k<m;k++){if("__proto__"===e[k]||"constructor"===e[k])throw Error("Cannot set prototype values");f=e[k].match(Ga);g=e[k].match(qa);if(f){e[k]=e[k].replace(Ga,"");c[e[k]]=[];h=e.slice();h.splice(0,k+1);f=h.join(".");if(Array.isArray(d))for(g=
0,m=d.length;g<m;g++)h={},b(h,d[g],f),c[e[k]].push(h);else c[e[k]]=d;return}g&&(e[k]=e[k].replace(qa,""),c=c[e[k]](d));if(null===c[e[k]]||c[e[k]]===q)c[e[k]]={};c=c[e[k]]}if(h.match(qa))c[h.replace(qa,"")](d);else c[h.replace(Ga,"")]=d};return function(c,d){return b(c,d,a)}},get:function(a){if(l.isPlainObject(a)){var b={};l.each(a,function(d,e){e&&(b[d]=u.util.get(e))});return function(d,e,h,f){var g=b[e]||b._;return g!==q?g(d,e,h,f):d}}if(null===a)return function(d){return d};if("function"===typeof a)return function(d,
e,h,f){return a(d,e,h,f)};if("string"!==typeof a||-1===a.indexOf(".")&&-1===a.indexOf("[")&&-1===a.indexOf("("))return function(d,e){return d[a]};var c=function(d,e,h){if(""!==h){var f=hb(h);for(var g=0,k=f.length;g<k;g++){h=f[g].match(Ga);var m=f[g].match(qa);if(h){f[g]=f[g].replace(Ga,"");""!==f[g]&&(d=d[f[g]]);m=[];f.splice(0,g+1);f=f.join(".");if(Array.isArray(d))for(g=0,k=d.length;g<k;g++)m.push(c(d[g],e,f));d=h[0].substring(1,h[0].length-1);d=""===d?m:m.join(d);break}else if(m){f[g]=f[g].replace(qa,
"");d=d[f[g]]();continue}if(null===d||d[f[g]]===q)return q;d=d[f[g]]}}return d};return function(d,e){return c(d,e,a)}}};var S=function(a,b,c){a[b]!==q&&(a[c]=a[b])},Ga=/\[.*?\]$/,qa=/\(\)$/,ma=u.util.get,ha=u.util.set,ob=u.util.escapeRegex,Sa=l("<div>")[0],Bc=Sa.textContent!==q,Cc=/<.*?>/g,mb=u.util.throttle,tc=[],N=Array.prototype,Fc=function(a){var b,c=u.settings,d=l.map(c,function(h,f){return h.nTable});if(a){if(a.nTable&&a.oApi)return[a];if(a.nodeName&&"table"===a.nodeName.toLowerCase()){var e=
l.inArray(a,d);return-1!==e?[c[e]]:null}if(a&&"function"===typeof a.settings)return a.settings().toArray();"string"===typeof a?b=l(a):a instanceof l&&(b=a)}else return[];if(b)return b.map(function(h){e=l.inArray(this,d);return-1!==e?c[e]:null}).toArray()};var B=function(a,b){if(!(this instanceof B))return new B(a,b);var c=[],d=function(f){(f=Fc(f))&&c.push.apply(c,f)};if(Array.isArray(a))for(var e=0,h=a.length;e<h;e++)d(a[e]);else d(a);this.context=Oa(c);b&&l.merge(this,b);this.selector={rows:null,
cols:null,opts:null};B.extend(this,this,tc)};u.Api=B;l.extend(B.prototype,{any:function(){return 0!==this.count()},concat:N.concat,context:[],count:function(){return this.flatten().length},each:function(a){for(var b=0,c=this.length;b<c;b++)a.call(this,this[b],b,this);return this},eq:function(a){var b=this.context;return b.length>a?new B(b[a],this[a]):null},filter:function(a){var b=[];if(N.filter)b=N.filter.call(this,a,this);else for(var c=0,d=this.length;c<d;c++)a.call(this,this[c],c,this)&&b.push(this[c]);
return new B(this.context,b)},flatten:function(){var a=[];return new B(this.context,a.concat.apply(a,this.toArray()))},join:N.join,indexOf:N.indexOf||function(a,b){b=b||0;for(var c=this.length;b<c;b++)if(this[b]===a)return b;return-1},iterator:function(a,b,c,d){var e=[],h,f,g=this.context,k,m=this.selector;"string"===typeof a&&(d=c,c=b,b=a,a=!1);var n=0;for(h=g.length;n<h;n++){var p=new B(g[n]);if("table"===b){var t=c.call(p,g[n],n);t!==q&&e.push(t)}else if("columns"===b||"rows"===b)t=c.call(p,g[n],
this[n],n),t!==q&&e.push(t);else if("column"===b||"column-rows"===b||"row"===b||"cell"===b){var v=this[n];"column-rows"===b&&(k=ab(g[n],m.opts));var x=0;for(f=v.length;x<f;x++)t=v[x],t="cell"===b?c.call(p,g[n],t.row,t.column,n,x):c.call(p,g[n],t,n,x,k),t!==q&&e.push(t)}}return e.length||d?(a=new B(g,a?e.concat.apply([],e):e),b=a.selector,b.rows=m.rows,b.cols=m.cols,b.opts=m.opts,a):this},lastIndexOf:N.lastIndexOf||function(a,b){return this.indexOf.apply(this.toArray.reverse(),arguments)},length:0,
map:function(a){var b=[];if(N.map)b=N.map.call(this,a,this);else for(var c=0,d=this.length;c<d;c++)b.push(a.call(this,this[c],c));return new B(this.context,b)},pluck:function(a){var b=u.util.get(a);return this.map(function(c){return b(c)})},pop:N.pop,push:N.push,reduce:N.reduce||function(a,b){return Gb(this,a,b,0,this.length,1)},reduceRight:N.reduceRight||function(a,b){return Gb(this,a,b,this.length-1,-1,-1)},reverse:N.reverse,selector:null,shift:N.shift,slice:function(){return new B(this.context,
this)},sort:N.sort,splice:N.splice,toArray:function(){return N.slice.call(this)},to$:function(){return l(this)},toJQuery:function(){return l(this)},unique:function(){return new B(this.context,Oa(this))},unshift:N.unshift});B.extend=function(a,b,c){if(c.length&&b&&(b instanceof B||b.__dt_wrapper)){var d,e=function(g,k,m){return function(){var n=k.apply(g,arguments);B.extend(n,n,m.methodExt);return n}};var h=0;for(d=c.length;h<d;h++){var f=c[h];b[f.name]="function"===f.type?e(a,f.val,f):"object"===
f.type?{}:f.val;b[f.name].__dt_wrapper=!0;B.extend(a,b[f.name],f.propExt)}}};B.register=z=function(a,b){if(Array.isArray(a))for(var c=0,d=a.length;c<d;c++)B.register(a[c],b);else{d=a.split(".");var e=tc,h;a=0;for(c=d.length;a<c;a++){var f=(h=-1!==d[a].indexOf("()"))?d[a].replace("()",""):d[a];a:{var g=0;for(var k=e.length;g<k;g++)if(e[g].name===f){g=e[g];break a}g=null}g||(g={name:f,val:{},methodExt:[],propExt:[],type:"object"},e.push(g));a===c-1?(g.val=b,g.type="function"===typeof b?"function":l.isPlainObject(b)?
"object":"other"):e=h?g.methodExt:g.propExt}}};B.registerPlural=J=function(a,b,c){B.register(a,c);B.register(b,function(){var d=c.apply(this,arguments);return d===this?this:d instanceof B?d.length?Array.isArray(d[0])?new B(d.context,d[0]):d[0]:q:d})};var uc=function(a,b){if(Array.isArray(a))return l.map(a,function(d){return uc(d,b)});if("number"===typeof a)return[b[a]];var c=l.map(b,function(d,e){return d.nTable});return l(c).filter(a).map(function(d){d=l.inArray(this,c);return b[d]}).toArray()};
z("tables()",function(a){return a!==q&&null!==a?new B(uc(a,this.context)):this});z("table()",function(a){a=this.tables(a);var b=a.context;return b.length?new B(b[0]):a});J("tables().nodes()","table().node()",function(){return this.iterator("table",function(a){return a.nTable},1)});J("tables().body()","table().body()",function(){return this.iterator("table",function(a){return a.nTBody},1)});J("tables().header()","table().header()",function(){return this.iterator("table",function(a){return a.nTHead},
1)});J("tables().footer()","table().footer()",function(){return this.iterator("table",function(a){return a.nTFoot},1)});J("tables().containers()","table().container()",function(){return this.iterator("table",function(a){return a.nTableWrapper},1)});z("draw()",function(a){return this.iterator("table",function(b){"page"===a?ja(b):("string"===typeof a&&(a="full-hold"===a?!1:!0),ka(b,!1===a))})});z("page()",function(a){return a===q?this.page.info().page:this.iterator("table",function(b){Ta(b,a)})});z("page.info()",
function(a){if(0===this.context.length)return q;a=this.context[0];var b=a._iDisplayStart,c=a.oFeatures.bPaginate?a._iDisplayLength:-1,d=a.fnRecordsDisplay(),e=-1===c;return{page:e?0:Math.floor(b/c),pages:e?1:Math.ceil(d/c),start:b,end:a.fnDisplayEnd(),length:c,recordsTotal:a.fnRecordsTotal(),recordsDisplay:d,serverSide:"ssp"===Q(a)}});z("page.len()",function(a){return a===q?0!==this.context.length?this.context[0]._iDisplayLength:q:this.iterator("table",function(b){pb(b,a)})});var vc=function(a,b,
c){if(c){var d=new B(a);d.one("draw",function(){c(d.ajax.json())})}if("ssp"==Q(a))ka(a,b);else{V(a,!0);var e=a.jqXHR;e&&4!==e.readyState&&e.abort();Qa(a,[],function(h){Ma(a);h=za(a,h);for(var f=0,g=h.length;f<g;f++)ia(a,h[f]);ka(a,b);V(a,!1)})}};z("ajax.json()",function(){var a=this.context;if(0<a.length)return a[0].json});z("ajax.params()",function(){var a=this.context;if(0<a.length)return a[0].oAjaxData});z("ajax.reload()",function(a,b){return this.iterator("table",function(c){vc(c,!1===b,a)})});
z("ajax.url()",function(a){var b=this.context;if(a===q){if(0===b.length)return q;b=b[0];return b.ajax?l.isPlainObject(b.ajax)?b.ajax.url:b.ajax:b.sAjaxSource}return this.iterator("table",function(c){l.isPlainObject(c.ajax)?c.ajax.url=a:c.ajax=a})});z("ajax.url().load()",function(a,b){return this.iterator("table",function(c){vc(c,!1===b,a)})});var zb=function(a,b,c,d,e){var h=[],f,g,k;var m=typeof b;b&&"string"!==m&&"function"!==m&&b.length!==q||(b=[b]);m=0;for(g=b.length;m<g;m++){var n=b[m]&&b[m].split&&
!b[m].match(/[\[\(:]/)?b[m].split(","):[b[m]];var p=0;for(k=n.length;p<k;p++)(f=c("string"===typeof n[p]?n[p].trim():n[p]))&&f.length&&(h=h.concat(f))}a=M.selector[a];if(a.length)for(m=0,g=a.length;m<g;m++)h=a[m](d,e,h);return Oa(h)},Ab=function(a){a||(a={});a.filter&&a.search===q&&(a.search=a.filter);return l.extend({search:"none",order:"current",page:"all"},a)},Bb=function(a){for(var b=0,c=a.length;b<c;b++)if(0<a[b].length)return a[0]=a[b],a[0].length=1,a.length=1,a.context=[a.context[b]],a;a.length=
0;return a},ab=function(a,b){var c=[],d=a.aiDisplay;var e=a.aiDisplayMaster;var h=b.search;var f=b.order;b=b.page;if("ssp"==Q(a))return"removed"===h?[]:pa(0,e.length);if("current"==b)for(f=a._iDisplayStart,a=a.fnDisplayEnd();f<a;f++)c.push(d[f]);else if("current"==f||"applied"==f)if("none"==h)c=e.slice();else if("applied"==h)c=d.slice();else{if("removed"==h){var g={};f=0;for(a=d.length;f<a;f++)g[d[f]]=null;c=l.map(e,function(k){return g.hasOwnProperty(k)?null:k})}}else if("index"==f||"original"==
f)for(f=0,a=a.aoData.length;f<a;f++)"none"==h?c.push(f):(e=l.inArray(f,d),(-1===e&&"removed"==h||0<=e&&"applied"==h)&&c.push(f));return c},Gc=function(a,b,c){var d;return zb("row",b,function(e){var h=nc(e),f=a.aoData;if(null!==h&&!c)return[h];d||(d=ab(a,c));if(null!==h&&-1!==l.inArray(h,d))return[h];if(null===e||e===q||""===e)return d;if("function"===typeof e)return l.map(d,function(k){var m=f[k];return e(k,m._aData,m.nTr)?k:null});if(e.nodeName){h=e._DT_RowIndex;var g=e._DT_CellIndex;if(h!==q)return f[h]&&
f[h].nTr===e?[h]:[];if(g)return f[g.row]&&f[g.row].nTr===e.parentNode?[g.row]:[];h=l(e).closest("*[data-dt-row]");return h.length?[h.data("dt-row")]:[]}if("string"===typeof e&&"#"===e.charAt(0)&&(h=a.aIds[e.replace(/^#/,"")],h!==q))return[h.idx];h=qc(Fa(a.aoData,d,"nTr"));return l(h).filter(e).map(function(){return this._DT_RowIndex}).toArray()},a,c)};z("rows()",function(a,b){a===q?a="":l.isPlainObject(a)&&(b=a,a="");b=Ab(b);var c=this.iterator("table",function(d){return Gc(d,a,b)},1);c.selector.rows=
a;c.selector.opts=b;return c});z("rows().nodes()",function(){return this.iterator("row",function(a,b){return a.aoData[b].nTr||q},1)});z("rows().data()",function(){return this.iterator(!0,"rows",function(a,b){return Fa(a.aoData,b,"_aData")},1)});J("rows().cache()","row().cache()",function(a){return this.iterator("row",function(b,c){b=b.aoData[c];return"search"===a?b._aFilterData:b._aSortData},1)});J("rows().invalidate()","row().invalidate()",function(a){return this.iterator("row",function(b,c){va(b,
c,a)})});J("rows().indexes()","row().index()",function(){return this.iterator("row",function(a,b){return b},1)});J("rows().ids()","row().id()",function(a){for(var b=[],c=this.context,d=0,e=c.length;d<e;d++)for(var h=0,f=this[d].length;h<f;h++){var g=c[d].rowIdFn(c[d].aoData[this[d][h]]._aData);b.push((!0===a?"#":"")+g)}return new B(c,b)});J("rows().remove()","row().remove()",function(){var a=this;this.iterator("row",function(b,c,d){var e=b.aoData,h=e[c],f,g;e.splice(c,1);var k=0;for(f=e.length;k<
f;k++){var m=e[k];var n=m.anCells;null!==m.nTr&&(m.nTr._DT_RowIndex=k);if(null!==n)for(m=0,g=n.length;m<g;m++)n[m]._DT_CellIndex.row=k}Na(b.aiDisplayMaster,c);Na(b.aiDisplay,c);Na(a[d],c,!1);0<b._iRecordsDisplay&&b._iRecordsDisplay--;qb(b);c=b.rowIdFn(h._aData);c!==q&&delete b.aIds[c]});this.iterator("table",function(b){for(var c=0,d=b.aoData.length;c<d;c++)b.aoData[c].idx=c});return this});z("rows.add()",function(a){var b=this.iterator("table",function(d){var e,h=[];var f=0;for(e=a.length;f<e;f++){var g=
a[f];g.nodeName&&"TR"===g.nodeName.toUpperCase()?h.push(La(d,g)[0]):h.push(ia(d,g))}return h},1),c=this.rows(-1);c.pop();l.merge(c,b);return c});z("row()",function(a,b){return Bb(this.rows(a,b))});z("row().data()",function(a){var b=this.context;if(a===q)return b.length&&this.length?b[0].aoData[this[0]]._aData:q;var c=b[0].aoData[this[0]];c._aData=a;Array.isArray(a)&&c.nTr&&c.nTr.id&&ha(b[0].rowId)(a,c.nTr.id);va(b[0],this[0],"data");return this});z("row().node()",function(){var a=this.context;return a.length&&
this.length?a[0].aoData[this[0]].nTr||null:null});z("row.add()",function(a){a instanceof l&&a.length&&(a=a[0]);var b=this.iterator("table",function(c){return a.nodeName&&"TR"===a.nodeName.toUpperCase()?La(c,a)[0]:ia(c,a)});return this.row(b[0])});l(A).on("plugin-init.dt",function(a,b){a=new B(b);a.on("stateSaveParams",function(d,e,h){d=e.rowIdFn;e=e.aoData;for(var f=[],g=0;g<e.length;g++)e[g]._detailsShow&&f.push("#"+d(e[g]._aData));h.childRows=f});var c=a.state.loaded();c&&c.childRows&&a.rows(l.map(c.childRows,
function(d){return d.replace(/:/g,"\\:")})).every(function(){F(b,null,"requestChild",[this])})});var Hc=function(a,b,c,d){var e=[],h=function(f,g){if(Array.isArray(f)||f instanceof l)for(var k=0,m=f.length;k<m;k++)h(f[k],g);else f.nodeName&&"tr"===f.nodeName.toLowerCase()?e.push(f):(k=l("<tr><td></td></tr>").addClass(g),l("td",k).addClass(g).html(f)[0].colSpan=na(a),e.push(k[0]))};h(c,d);b._details&&b._details.detach();b._details=l(e);b._detailsShow&&b._details.insertAfter(b.nTr)},wc=u.util.throttle(function(a){Da(a[0])},
500),Cb=function(a,b){var c=a.context;c.length&&(a=c[0].aoData[b!==q?b:a[0]])&&a._details&&(a._details.remove(),a._detailsShow=q,a._details=q,l(a.nTr).removeClass("dt-hasChild"),wc(c))},xc=function(a,b){var c=a.context;if(c.length&&a.length){var d=c[0].aoData[a[0]];d._details&&((d._detailsShow=b)?(d._details.insertAfter(d.nTr),l(d.nTr).addClass("dt-hasChild")):(d._details.detach(),l(d.nTr).removeClass("dt-hasChild")),F(c[0],null,"childRow",[b,a.row(a[0])]),Ic(c[0]),wc(c))}},Ic=function(a){var b=new B(a),
c=a.aoData;b.off("draw.dt.DT_details column-sizing.dt.DT_details destroy.dt.DT_details");0<U(c,"_details").length&&(b.on("draw.dt.DT_details",function(d,e){a===e&&b.rows({page:"current"}).eq(0).each(function(h){h=c[h];h._detailsShow&&h._details.insertAfter(h.nTr)})}),b.on("column-sizing.dt.DT_details",function(d,e,h,f){if(a===e)for(e=na(e),h=0,f=c.length;h<f;h++)d=c[h],d._details&&d._details.children("td[colspan]").attr("colspan",e)}),b.on("destroy.dt.DT_details",function(d,e){if(a===e)for(d=0,e=
c.length;d<e;d++)c[d]._details&&Cb(b,d)}))};z("row().child()",function(a,b){var c=this.context;if(a===q)return c.length&&this.length?c[0].aoData[this[0]]._details:q;!0===a?this.child.show():!1===a?Cb(this):c.length&&this.length&&Hc(c[0],c[0].aoData[this[0]],a,b);return this});z(["row().child.show()","row().child().show()"],function(a){xc(this,!0);return this});z(["row().child.hide()","row().child().hide()"],function(){xc(this,!1);return this});z(["row().child.remove()","row().child().remove()"],function(){Cb(this);
return this});z("row().child.isShown()",function(){var a=this.context;return a.length&&this.length?a[0].aoData[this[0]]._detailsShow||!1:!1});var Jc=/^([^:]+):(name|visIdx|visible)$/,yc=function(a,b,c,d,e){c=[];d=0;for(var h=e.length;d<h;d++)c.push(T(a,e[d],b));return c},Kc=function(a,b,c){var d=a.aoColumns,e=U(d,"sName"),h=U(d,"nTh");return zb("column",b,function(f){var g=nc(f);if(""===f)return pa(d.length);if(null!==g)return[0<=g?g:d.length+g];if("function"===typeof f){var k=ab(a,c);return l.map(d,
function(p,t){return f(t,yc(a,t,0,0,k),h[t])?t:null})}var m="string"===typeof f?f.match(Jc):"";if(m)switch(m[2]){case "visIdx":case "visible":g=parseInt(m[1],10);if(0>g){var n=l.map(d,function(p,t){return p.bVisible?t:null});return[n[n.length+g]]}return[ta(a,g)];case "name":return l.map(e,function(p,t){return p===m[1]?t:null});default:return[]}if(f.nodeName&&f._DT_CellIndex)return[f._DT_CellIndex.column];g=l(h).filter(f).map(function(){return l.inArray(this,h)}).toArray();if(g.length||!f.nodeName)return g;
g=l(f).closest("*[data-dt-column]");return g.length?[g.data("dt-column")]:[]},a,c)};z("columns()",function(a,b){a===q?a="":l.isPlainObject(a)&&(b=a,a="");b=Ab(b);var c=this.iterator("table",function(d){return Kc(d,a,b)},1);c.selector.cols=a;c.selector.opts=b;return c});J("columns().header()","column().header()",function(a,b){return this.iterator("column",function(c,d){return c.aoColumns[d].nTh},1)});J("columns().footer()","column().footer()",function(a,b){return this.iterator("column",function(c,
d){return c.aoColumns[d].nTf},1)});J("columns().data()","column().data()",function(){return this.iterator("column-rows",yc,1)});J("columns().dataSrc()","column().dataSrc()",function(){return this.iterator("column",function(a,b){return a.aoColumns[b].mData},1)});J("columns().cache()","column().cache()",function(a){return this.iterator("column-rows",function(b,c,d,e,h){return Fa(b.aoData,h,"search"===a?"_aFilterData":"_aSortData",c)},1)});J("columns().nodes()","column().nodes()",function(){return this.iterator("column-rows",
function(a,b,c,d,e){return Fa(a.aoData,e,"anCells",b)},1)});J("columns().visible()","column().visible()",function(a,b){var c=this,d=this.iterator("column",function(e,h){if(a===q)return e.aoColumns[h].bVisible;var f=e.aoColumns,g=f[h],k=e.aoData,m;if(a!==q&&g.bVisible!==a){if(a){var n=l.inArray(!0,U(f,"bVisible"),h+1);f=0;for(m=k.length;f<m;f++){var p=k[f].nTr;e=k[f].anCells;p&&p.insertBefore(e[h],e[n]||null)}}else l(U(e.aoData,"anCells",h)).detach();g.bVisible=a}});a!==q&&this.iterator("table",function(e){xa(e,
e.aoHeader);xa(e,e.aoFooter);e.aiDisplay.length||l(e.nTBody).find("td[colspan]").attr("colspan",na(e));Da(e);c.iterator("column",function(h,f){F(h,null,"column-visibility",[h,f,a,b])});(b===q||b)&&c.columns.adjust()});return d});J("columns().indexes()","column().index()",function(a){return this.iterator("column",function(b,c){return"visible"===a?ua(b,c):c},1)});z("columns.adjust()",function(){return this.iterator("table",function(a){sa(a)},1)});z("column.index()",function(a,b){if(0!==this.context.length){var c=
this.context[0];if("fromVisible"===a||"toData"===a)return ta(c,b);if("fromData"===a||"toVisible"===a)return ua(c,b)}});z("column()",function(a,b){return Bb(this.columns(a,b))});var Lc=function(a,b,c){var d=a.aoData,e=ab(a,c),h=qc(Fa(d,e,"anCells")),f=l(rc([],h)),g,k=a.aoColumns.length,m,n,p,t,v,x;return zb("cell",b,function(w){var r="function"===typeof w;if(null===w||w===q||r){m=[];n=0;for(p=e.length;n<p;n++)for(g=e[n],t=0;t<k;t++)v={row:g,column:t},r?(x=d[g],w(v,T(a,g,t),x.anCells?x.anCells[t]:null)&&
m.push(v)):m.push(v);return m}if(l.isPlainObject(w))return w.column!==q&&w.row!==q&&-1!==l.inArray(w.row,e)?[w]:[];r=f.filter(w).map(function(C,G){return{row:G._DT_CellIndex.row,column:G._DT_CellIndex.column}}).toArray();if(r.length||!w.nodeName)return r;x=l(w).closest("*[data-dt-row]");return x.length?[{row:x.data("dt-row"),column:x.data("dt-column")}]:[]},a,c)};z("cells()",function(a,b,c){l.isPlainObject(a)&&(a.row===q?(c=a,a=null):(c=b,b=null));l.isPlainObject(b)&&(c=b,b=null);if(null===b||b===
q)return this.iterator("table",function(n){return Lc(n,a,Ab(c))});var d=c?{page:c.page,order:c.order,search:c.search}:{},e=this.columns(b,d),h=this.rows(a,d),f,g,k,m;d=this.iterator("table",function(n,p){n=[];f=0;for(g=h[p].length;f<g;f++)for(k=0,m=e[p].length;k<m;k++)n.push({row:h[p][f],column:e[p][k]});return n},1);d=c&&c.selected?this.cells(d,c):d;l.extend(d.selector,{cols:b,rows:a,opts:c});return d});J("cells().nodes()","cell().node()",function(){return this.iterator("cell",function(a,b,c){return(a=
a.aoData[b])&&a.anCells?a.anCells[c]:q},1)});z("cells().data()",function(){return this.iterator("cell",function(a,b,c){return T(a,b,c)},1)});J("cells().cache()","cell().cache()",function(a){a="search"===a?"_aFilterData":"_aSortData";return this.iterator("cell",function(b,c,d){return b.aoData[c][a][d]},1)});J("cells().render()","cell().render()",function(a){return this.iterator("cell",function(b,c,d){return T(b,c,d,a)},1)});J("cells().indexes()","cell().index()",function(){return this.iterator("cell",
function(a,b,c){return{row:b,column:c,columnVisible:ua(a,c)}},1)});J("cells().invalidate()","cell().invalidate()",function(a){return this.iterator("cell",function(b,c,d){va(b,c,a,d)})});z("cell()",function(a,b,c){return Bb(this.cells(a,b,c))});z("cell().data()",function(a){var b=this.context,c=this[0];if(a===q)return b.length&&c.length?T(b[0],c[0].row,c[0].column):q;Ib(b[0],c[0].row,c[0].column,a);va(b[0],c[0].row,"data",c[0].column);return this});z("order()",function(a,b){var c=this.context;if(a===
q)return 0!==c.length?c[0].aaSorting:q;"number"===typeof a?a=[[a,b]]:a.length&&!Array.isArray(a[0])&&(a=Array.prototype.slice.call(arguments));return this.iterator("table",function(d){d.aaSorting=a.slice()})});z("order.listener()",function(a,b,c){return this.iterator("table",function(d){kb(d,a,b,c)})});z("order.fixed()",function(a){if(!a){var b=this.context;b=b.length?b[0].aaSortingFixed:q;return Array.isArray(b)?{pre:b}:b}return this.iterator("table",function(c){c.aaSortingFixed=l.extend(!0,{},a)})});
z(["columns().order()","column().order()"],function(a){var b=this;return this.iterator("table",function(c,d){var e=[];l.each(b[d],function(h,f){e.push([f,a])});c.aaSorting=e})});z("search()",function(a,b,c,d){var e=this.context;return a===q?0!==e.length?e[0].oPreviousSearch.sSearch:q:this.iterator("table",function(h){h.oFeatures.bFilter&&ya(h,l.extend({},h.oPreviousSearch,{sSearch:a+"",bRegex:null===b?!1:b,bSmart:null===c?!0:c,bCaseInsensitive:null===d?!0:d}),1)})});J("columns().search()","column().search()",
function(a,b,c,d){return this.iterator("column",function(e,h){var f=e.aoPreSearchCols;if(a===q)return f[h].sSearch;e.oFeatures.bFilter&&(l.extend(f[h],{sSearch:a+"",bRegex:null===b?!1:b,bSmart:null===c?!0:c,bCaseInsensitive:null===d?!0:d}),ya(e,e.oPreviousSearch,1))})});z("state()",function(){return this.context.length?this.context[0].oSavedState:null});z("state.clear()",function(){return this.iterator("table",function(a){a.fnStateSaveCallback.call(a.oInstance,a,{})})});z("state.loaded()",function(){return this.context.length?
this.context[0].oLoadedState:null});z("state.save()",function(){return this.iterator("table",function(a){Da(a)})});u.versionCheck=u.fnVersionCheck=function(a){var b=u.version.split(".");a=a.split(".");for(var c,d,e=0,h=a.length;e<h;e++)if(c=parseInt(b[e],10)||0,d=parseInt(a[e],10)||0,c!==d)return c>d;return!0};u.isDataTable=u.fnIsDataTable=function(a){var b=l(a).get(0),c=!1;if(a instanceof u.Api)return!0;l.each(u.settings,function(d,e){d=e.nScrollHead?l("table",e.nScrollHead)[0]:null;var h=e.nScrollFoot?
l("table",e.nScrollFoot)[0]:null;if(e.nTable===b||d===b||h===b)c=!0});return c};u.tables=u.fnTables=function(a){var b=!1;l.isPlainObject(a)&&(b=a.api,a=a.visible);var c=l.map(u.settings,function(d){if(!a||a&&l(d.nTable).is(":visible"))return d.nTable});return b?new B(c):c};u.camelToHungarian=P;z("$()",function(a,b){b=this.rows(b).nodes();b=l(b);return l([].concat(b.filter(a).toArray(),b.find(a).toArray()))});l.each(["on","one","off"],function(a,b){z(b+"()",function(){var c=Array.prototype.slice.call(arguments);
c[0]=l.map(c[0].split(/\s/),function(e){return e.match(/\.dt\b/)?e:e+".dt"}).join(" ");var d=l(this.tables().nodes());d[b].apply(d,c);return this})});z("clear()",function(){return this.iterator("table",function(a){Ma(a)})});z("settings()",function(){return new B(this.context,this.context)});z("init()",function(){var a=this.context;return a.length?a[0].oInit:null});z("data()",function(){return this.iterator("table",function(a){return U(a.aoData,"_aData")}).flatten()});z("destroy()",function(a){a=a||
!1;return this.iterator("table",function(b){var c=b.oClasses,d=b.nTable,e=b.nTBody,h=b.nTHead,f=b.nTFoot,g=l(d);e=l(e);var k=l(b.nTableWrapper),m=l.map(b.aoData,function(p){return p.nTr}),n;b.bDestroying=!0;F(b,"aoDestroyCallback","destroy",[b]);a||(new B(b)).columns().visible(!0);k.off(".DT").find(":not(tbody *)").off(".DT");l(y).off(".DT-"+b.sInstance);d!=h.parentNode&&(g.children("thead").detach(),g.append(h));f&&d!=f.parentNode&&(g.children("tfoot").detach(),g.append(f));b.aaSorting=[];b.aaSortingFixed=
[];Va(b);l(m).removeClass(b.asStripeClasses.join(" "));l("th, td",h).removeClass(c.sSortable+" "+c.sSortableAsc+" "+c.sSortableDesc+" "+c.sSortableNone);e.children().detach();e.append(m);h=b.nTableWrapper.parentNode;f=a?"remove":"detach";g[f]();k[f]();!a&&h&&(h.insertBefore(d,b.nTableReinsertBefore),g.css("width",b.sDestroyWidth).removeClass(c.sTable),(n=b.asDestroyStripes.length)&&e.children().each(function(p){l(this).addClass(b.asDestroyStripes[p%n])}));c=l.inArray(b,u.settings);-1!==c&&u.settings.splice(c,
1)})});l.each(["column","row","cell"],function(a,b){z(b+"s().every()",function(c){var d=this.selector.opts,e=this;return this.iterator(b,function(h,f,g,k,m){c.call(e[b](f,"cell"===b?g:d,"cell"===b?d:q),f,g,k,m)})})});z("i18n()",function(a,b,c){var d=this.context[0];a=ma(a)(d.oLanguage);a===q&&(a=b);c!==q&&l.isPlainObject(a)&&(a=a[c]!==q?a[c]:a._);return a.replace("%d",c)});u.version="1.12.1";u.settings=[];u.models={};u.models.oSearch={bCaseInsensitive:!0,sSearch:"",bRegex:!1,bSmart:!0,"return":!1};
u.models.oRow={nTr:null,anCells:null,_aData:[],_aSortData:null,_aFilterData:null,_sFilterRow:null,_sRowStripe:"",src:null,idx:-1};u.models.oColumn={idx:null,aDataSort:null,asSorting:null,bSearchable:null,bSortable:null,bVisible:null,_sManualType:null,_bAttrSrc:!1,fnCreatedCell:null,fnGetData:null,fnSetData:null,mData:null,mRender:null,nTh:null,nTf:null,sClass:null,sContentPadding:null,sDefaultContent:null,sName:null,sSortDataType:"std",sSortingClass:null,sSortingClassJUI:null,sTitle:null,sType:null,
sWidth:null,sWidthOrig:null};u.defaults={aaData:null,aaSorting:[[0,"asc"]],aaSortingFixed:[],ajax:null,aLengthMenu:[10,25,50,100],aoColumns:null,aoColumnDefs:null,aoSearchCols:[],asStripeClasses:null,bAutoWidth:!0,bDeferRender:!1,bDestroy:!1,bFilter:!0,bInfo:!0,bLengthChange:!0,bPaginate:!0,bProcessing:!1,bRetrieve:!1,bScrollCollapse:!1,bServerSide:!1,bSort:!0,bSortMulti:!0,bSortCellsTop:!1,bSortClasses:!0,bStateSave:!1,fnCreatedRow:null,fnDrawCallback:null,fnFooterCallback:null,fnFormatNumber:function(a){return a.toString().replace(/\B(?=(\d{3})+(?!\d))/g,
this.oLanguage.sThousands)},fnHeaderCallback:null,fnInfoCallback:null,fnInitComplete:null,fnPreDrawCallback:null,fnRowCallback:null,fnServerData:null,fnServerParams:null,fnStateLoadCallback:function(a){try{return JSON.parse((-1===a.iStateDuration?sessionStorage:localStorage).getItem("DataTables_"+a.sInstance+"_"+location.pathname))}catch(b){return{}}},fnStateLoadParams:null,fnStateLoaded:null,fnStateSaveCallback:function(a,b){try{(-1===a.iStateDuration?sessionStorage:localStorage).setItem("DataTables_"+
a.sInstance+"_"+location.pathname,JSON.stringify(b))}catch(c){}},fnStateSaveParams:null,iStateDuration:7200,iDeferLoading:null,iDisplayLength:10,iDisplayStart:0,iTabIndex:0,oClasses:{},oLanguage:{oAria:{sSortAscending:": activate to sort column ascending",sSortDescending:": activate to sort column descending"},oPaginate:{sFirst:"First",sLast:"Last",sNext:"Next",sPrevious:"Previous"},sEmptyTable:"No data available in table",sInfo:"Showing _START_ to _END_ of _TOTAL_ entries",sInfoEmpty:"Showing 0 to 0 of 0 entries",
sInfoFiltered:"(filtered from _MAX_ total entries)",sInfoPostFix:"",sDecimal:"",sThousands:",",sLengthMenu:"Show _MENU_ entries",sLoadingRecords:"Loading...",sProcessing:"",sSearch:"Search:",sSearchPlaceholder:"",sUrl:"",sZeroRecords:"No matching records found"},oSearch:l.extend({},u.models.oSearch),sAjaxDataProp:"data",sAjaxSource:null,sDom:"lfrtip",searchDelay:null,sPaginationType:"simple_numbers",sScrollX:"",sScrollXInner:"",sScrollY:"",sServerMethod:"GET",renderer:null,rowId:"DT_RowId"};E(u.defaults);
u.defaults.column={aDataSort:null,iDataSort:-1,asSorting:["asc","desc"],bSearchable:!0,bSortable:!0,bVisible:!0,fnCreatedCell:null,mData:null,mRender:null,sCellType:"td",sClass:"",sContentPadding:"",sDefaultContent:null,sName:"",sSortDataType:"std",sTitle:null,sType:null,sWidth:null};E(u.defaults.column);u.models.oSettings={oFeatures:{bAutoWidth:null,bDeferRender:null,bFilter:null,bInfo:null,bLengthChange:null,bPaginate:null,bProcessing:null,bServerSide:null,bSort:null,bSortMulti:null,bSortClasses:null,
bStateSave:null},oScroll:{bCollapse:null,iBarWidth:0,sX:null,sXInner:null,sY:null},oLanguage:{fnInfoCallback:null},oBrowser:{bScrollOversize:!1,bScrollbarLeft:!1,bBounding:!1,barWidth:0},ajax:null,aanFeatures:[],aoData:[],aiDisplay:[],aiDisplayMaster:[],aIds:{},aoColumns:[],aoHeader:[],aoFooter:[],oPreviousSearch:{},aoPreSearchCols:[],aaSorting:null,aaSortingFixed:[],asStripeClasses:null,asDestroyStripes:[],sDestroyWidth:0,aoRowCallback:[],aoHeaderCallback:[],aoFooterCallback:[],aoDrawCallback:[],
aoRowCreatedCallback:[],aoPreDrawCallback:[],aoInitComplete:[],aoStateSaveParams:[],aoStateLoadParams:[],aoStateLoaded:[],sTableId:"",nTable:null,nTHead:null,nTFoot:null,nTBody:null,nTableWrapper:null,bDeferLoading:!1,bInitialised:!1,aoOpenRows:[],sDom:null,searchDelay:null,sPaginationType:"two_button",iStateDuration:0,aoStateSave:[],aoStateLoad:[],oSavedState:null,oLoadedState:null,sAjaxSource:null,sAjaxDataProp:null,jqXHR:null,json:q,oAjaxData:q,fnServerData:null,aoServerParams:[],sServerMethod:null,
fnFormatNumber:null,aLengthMenu:null,iDraw:0,bDrawing:!1,iDrawError:-1,_iDisplayLength:10,_iDisplayStart:0,_iRecordsTotal:0,_iRecordsDisplay:0,oClasses:{},bFiltered:!1,bSorted:!1,bSortCellsTop:null,oInit:null,aoDestroyCallback:[],fnRecordsTotal:function(){return"ssp"==Q(this)?1*this._iRecordsTotal:this.aiDisplayMaster.length},fnRecordsDisplay:function(){return"ssp"==Q(this)?1*this._iRecordsDisplay:this.aiDisplay.length},fnDisplayEnd:function(){var a=this._iDisplayLength,b=this._iDisplayStart,c=b+
a,d=this.aiDisplay.length,e=this.oFeatures,h=e.bPaginate;return e.bServerSide?!1===h||-1===a?b+d:Math.min(b+a,this._iRecordsDisplay):!h||c>d||-1===a?d:c},oInstance:null,sInstance:null,iTabIndex:0,nScrollHead:null,nScrollFoot:null,aLastSort:[],oPlugins:{},rowIdFn:null,rowId:null};u.ext=M={buttons:{},classes:{},build:"bs4/dt-1.12.1",errMode:"alert",feature:[],search:[],selector:{cell:[],column:[],row:[]},internal:{},legacy:{ajax:null},pager:{},renderer:{pageButton:{},header:{}},order:{},type:{detect:[],
search:{},order:{}},_unique:0,fnVersionCheck:u.fnVersionCheck,iApiIndex:0,oJUIClasses:{},sVersion:u.version};l.extend(M,{afnFiltering:M.search,aTypes:M.type.detect,ofnSearch:M.type.search,oSort:M.type.order,afnSortData:M.order,aoFeatures:M.feature,oApi:M.internal,oStdClasses:M.classes,oPagination:M.pager});l.extend(u.ext.classes,{sTable:"dataTable",sNoFooter:"no-footer",sPageButton:"paginate_button",sPageButtonActive:"current",sPageButtonDisabled:"disabled",sStripeOdd:"odd",sStripeEven:"even",sRowEmpty:"dataTables_empty",
sWrapper:"dataTables_wrapper",sFilter:"dataTables_filter",sInfo:"dataTables_info",sPaging:"dataTables_paginate paging_",sLength:"dataTables_length",sProcessing:"dataTables_processing",sSortAsc:"sorting_asc",sSortDesc:"sorting_desc",sSortable:"sorting",sSortableAsc:"sorting_desc_disabled",sSortableDesc:"sorting_asc_disabled",sSortableNone:"sorting_disabled",sSortColumn:"sorting_",sFilterInput:"",sLengthSelect:"",sScrollWrapper:"dataTables_scroll",sScrollHead:"dataTables_scrollHead",sScrollHeadInner:"dataTables_scrollHeadInner",
sScrollBody:"dataTables_scrollBody",sScrollFoot:"dataTables_scrollFoot",sScrollFootInner:"dataTables_scrollFootInner",sHeaderTH:"",sFooterTH:"",sSortJUIAsc:"",sSortJUIDesc:"",sSortJUI:"",sSortJUIAscAllowed:"",sSortJUIDescAllowed:"",sSortJUIWrapper:"",sSortIcon:"",sJUIHeader:"",sJUIFooter:""});var ic=u.ext.pager;l.extend(ic,{simple:function(a,b){return["previous","next"]},full:function(a,b){return["first","previous","next","last"]},numbers:function(a,b){return[Ea(a,b)]},simple_numbers:function(a,b){return["previous",
Ea(a,b),"next"]},full_numbers:function(a,b){return["first","previous",Ea(a,b),"next","last"]},first_last_numbers:function(a,b){return["first",Ea(a,b),"last"]},_numbers:Ea,numbers_length:7});l.extend(!0,u.ext.renderer,{pageButton:{_:function(a,b,c,d,e,h){var f=a.oClasses,g=a.oLanguage.oPaginate,k=a.oLanguage.oAria.paginate||{},m,n,p=0,t=function(x,w){var r,C=f.sPageButtonDisabled,G=function(I){Ta(a,I.data.action,!0)};var ba=0;for(r=w.length;ba<r;ba++){var L=w[ba];if(Array.isArray(L)){var O=l("<"+(L.DT_el||
"div")+"/>").appendTo(x);t(O,L)}else{m=null;n=L;O=a.iTabIndex;switch(L){case "ellipsis":x.append('<span class="ellipsis">…</span>');break;case "first":m=g.sFirst;0===e&&(O=-1,n+=" "+C);break;case "previous":m=g.sPrevious;0===e&&(O=-1,n+=" "+C);break;case "next":m=g.sNext;if(0===h||e===h-1)O=-1,n+=" "+C;break;case "last":m=g.sLast;if(0===h||e===h-1)O=-1,n+=" "+C;break;default:m=a.fnFormatNumber(L+1),n=e===L?f.sPageButtonActive:""}null!==m&&(O=l("<a>",{"class":f.sPageButton+" "+n,"aria-controls":a.sTableId,
"aria-label":k[L],"data-dt-idx":p,tabindex:O,id:0===c&&"string"===typeof L?a.sTableId+"_"+L:null}).html(m).appendTo(x),sb(O,{action:L},G),p++)}}};try{var v=l(b).find(A.activeElement).data("dt-idx")}catch(x){}t(l(b).empty(),d);v!==q&&l(b).find("[data-dt-idx="+v+"]").trigger("focus")}}});l.extend(u.ext.type.detect,[function(a,b){b=b.oLanguage.sDecimal;return yb(a,b)?"num"+b:null},function(a,b){if(a&&!(a instanceof Date)&&!Dc.test(a))return null;b=Date.parse(a);return null!==b&&!isNaN(b)||aa(a)?"date":
null},function(a,b){b=b.oLanguage.sDecimal;return yb(a,b,!0)?"num-fmt"+b:null},function(a,b){b=b.oLanguage.sDecimal;return pc(a,b)?"html-num"+b:null},function(a,b){b=b.oLanguage.sDecimal;return pc(a,b,!0)?"html-num-fmt"+b:null},function(a,b){return aa(a)||"string"===typeof a&&-1!==a.indexOf("<")?"html":null}]);l.extend(u.ext.type.search,{html:function(a){return aa(a)?a:"string"===typeof a?a.replace(mc," ").replace(Ya,""):""},string:function(a){return aa(a)?a:"string"===typeof a?a.replace(mc," "):
a}});var Xa=function(a,b,c,d){if(0!==a&&(!a||"-"===a))return-Infinity;b&&(a=oc(a,b));a.replace&&(c&&(a=a.replace(c,"")),d&&(a=a.replace(d,"")));return 1*a};l.extend(M.type.order,{"date-pre":function(a){a=Date.parse(a);return isNaN(a)?-Infinity:a},"html-pre":function(a){return aa(a)?"":a.replace?a.replace(/<.*?>/g,"").toLowerCase():a+""},"string-pre":function(a){return aa(a)?"":"string"===typeof a?a.toLowerCase():a.toString?a.toString():""},"string-asc":function(a,b){return a<b?-1:a>b?1:0},"string-desc":function(a,
b){return a<b?1:a>b?-1:0}});bb("");l.extend(!0,u.ext.renderer,{header:{_:function(a,b,c,d){l(a.nTable).on("order.dt.DT",function(e,h,f,g){a===h&&(e=c.idx,b.removeClass(d.sSortAsc+" "+d.sSortDesc).addClass("asc"==g[e]?d.sSortAsc:"desc"==g[e]?d.sSortDesc:c.sSortingClass))})},jqueryui:function(a,b,c,d){l("<div/>").addClass(d.sSortJUIWrapper).append(b.contents()).append(l("<span/>").addClass(d.sSortIcon+" "+c.sSortingClassJUI)).appendTo(b);l(a.nTable).on("order.dt.DT",function(e,h,f,g){a===h&&(e=c.idx,
b.removeClass(d.sSortAsc+" "+d.sSortDesc).addClass("asc"==g[e]?d.sSortAsc:"desc"==g[e]?d.sSortDesc:c.sSortingClass),b.find("span."+d.sSortIcon).removeClass(d.sSortJUIAsc+" "+d.sSortJUIDesc+" "+d.sSortJUI+" "+d.sSortJUIAscAllowed+" "+d.sSortJUIDescAllowed).addClass("asc"==g[e]?d.sSortJUIAsc:"desc"==g[e]?d.sSortJUIDesc:c.sSortingClassJUI))})}}});var $a=function(a){Array.isArray(a)&&(a=a.join(","));return"string"===typeof a?a.replace(/&/g,"&").replace(/</g,"<").replace(/>/g,">").replace(/"/g,
"""):a},kc=!1,zc=",",Ac=".";if(Intl)try{for(var Ha=(new Intl.NumberFormat).formatToParts(100000.1),ra=0;ra<Ha.length;ra++)"group"===Ha[ra].type?zc=Ha[ra].value:"decimal"===Ha[ra].type&&(Ac=Ha[ra].value)}catch(a){}u.datetime=function(a,b){var c="datetime-detect-"+a;b||(b="en");u.ext.type.order[c]||(u.ext.type.detect.unshift(function(d){var e=Za(d,a,b);return""===d||e?c:!1}),u.ext.type.order[c+"-pre"]=function(d){return Za(d,a,b)||0})};u.render={date:wb("toLocaleDateString"),datetime:wb("toLocaleString"),
time:wb("toLocaleTimeString"),number:function(a,b,c,d,e){if(null===a||a===q)a=zc;if(null===b||b===q)b=Ac;return{display:function(h){if("number"!==typeof h&&"string"!==typeof h||""===h||null===h)return h;var f=0>h?"-":"",g=parseFloat(h);if(isNaN(g))return $a(h);g=g.toFixed(c);h=Math.abs(g);g=parseInt(h,10);h=c?b+(h-g).toFixed(c).substring(2):"";0===g&&0===parseFloat(h)&&(f="");return f+(d||"")+g.toString().replace(/\B(?=(\d{3})+(?!\d))/g,a)+h+(e||"")}}},text:function(){return{display:$a,filter:$a}}};
l.extend(u.ext.internal,{_fnExternApiFunc:lc,_fnBuildAjax:Qa,_fnAjaxUpdate:Kb,_fnAjaxParameters:Tb,_fnAjaxUpdateDraw:Ub,_fnAjaxDataSrc:za,_fnAddColumn:cb,_fnColumnOptions:Ia,_fnAdjustColumnSizing:sa,_fnVisibleToColumnIndex:ta,_fnColumnIndexToVisible:ua,_fnVisbleColumns:na,_fnGetColumns:Ka,_fnColumnTypes:eb,_fnApplyColumnDefs:Hb,_fnHungarianMap:E,_fnCamelToHungarian:P,_fnLanguageCompat:la,_fnBrowserDetect:Fb,_fnAddData:ia,_fnAddTr:La,_fnNodeToDataIndex:function(a,b){return b._DT_RowIndex!==q?b._DT_RowIndex:
null},_fnNodeToColumnIndex:function(a,b,c){return l.inArray(c,a.aoData[b].anCells)},_fnGetCellData:T,_fnSetCellData:Ib,_fnSplitObjNotation:hb,_fnGetObjectDataFn:ma,_fnSetObjectDataFn:ha,_fnGetDataMaster:ib,_fnClearTable:Ma,_fnDeleteIndex:Na,_fnInvalidate:va,_fnGetRowElements:gb,_fnCreateTr:fb,_fnBuildHead:Jb,_fnDrawHead:xa,_fnDraw:ja,_fnReDraw:ka,_fnAddOptionsHtml:Mb,_fnDetectHeader:wa,_fnGetUniqueThs:Pa,_fnFeatureHtmlFilter:Ob,_fnFilterComplete:ya,_fnFilterCustom:Xb,_fnFilterColumn:Wb,_fnFilter:Vb,
_fnFilterCreateSearch:nb,_fnEscapeRegex:ob,_fnFilterData:Yb,_fnFeatureHtmlInfo:Rb,_fnUpdateInfo:ac,_fnInfoMacros:bc,_fnInitialise:Aa,_fnInitComplete:Ra,_fnLengthChange:pb,_fnFeatureHtmlLength:Nb,_fnFeatureHtmlPaginate:Sb,_fnPageChange:Ta,_fnFeatureHtmlProcessing:Pb,_fnProcessingDisplay:V,_fnFeatureHtmlTable:Qb,_fnScrollDraw:Ja,_fnApplyToChildren:da,_fnCalculateColumnWidths:db,_fnThrottle:mb,_fnConvertToWidth:cc,_fnGetWidestNode:dc,_fnGetMaxLenString:ec,_fnStringToCss:K,_fnSortFlatten:oa,_fnSort:Lb,
_fnSortAria:gc,_fnSortListener:rb,_fnSortAttachListener:kb,_fnSortingClasses:Va,_fnSortData:fc,_fnSaveState:Da,_fnLoadState:hc,_fnImplementState:tb,_fnSettingsFromNode:Wa,_fnLog:ea,_fnMap:Y,_fnBindAction:sb,_fnCallbackReg:R,_fnCallbackFire:F,_fnLengthOverflow:qb,_fnRenderer:lb,_fnDataSource:Q,_fnRowAttributes:jb,_fnExtend:ub,_fnCalculateEnd:function(){}});l.fn.dataTable=u;u.$=l;l.fn.dataTableSettings=u.settings;l.fn.dataTableExt=u.ext;l.fn.DataTable=function(a){return l(this).dataTable(a).api()};
l.each(u,function(a,b){l.fn.DataTable[a]=b});return u});
/*!
DataTables Bootstrap 4 integration
©2011-2017 SpryMedia Ltd - datatables.net/license
*/
var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.findInternal=function(a,b,c){a instanceof String&&(a=String(a));for(var e=a.length,d=0;d<e;d++){var f=a[d];if(b.call(c,f,d,a))return{i:d,v:f}}return{i:-1,v:void 0}};$jscomp.ASSUME_ES5=!1;$jscomp.ASSUME_NO_NATIVE_MAP=!1;$jscomp.ASSUME_NO_NATIVE_SET=!1;$jscomp.SIMPLE_FROUND_POLYFILL=!1;$jscomp.ISOLATE_POLYFILLS=!1;
$jscomp.defineProperty=$jscomp.ASSUME_ES5||"function"==typeof Object.defineProperties?Object.defineProperty:function(a,b,c){if(a==Array.prototype||a==Object.prototype)return a;a[b]=c.value;return a};$jscomp.getGlobal=function(a){a=["object"==typeof globalThis&&globalThis,a,"object"==typeof window&&window,"object"==typeof self&&self,"object"==typeof global&&global];for(var b=0;b<a.length;++b){var c=a[b];if(c&&c.Math==Math)return c}throw Error("Cannot find global object");};$jscomp.global=$jscomp.getGlobal(this);
$jscomp.IS_SYMBOL_NATIVE="function"===typeof Symbol&&"symbol"===typeof Symbol("x");$jscomp.TRUST_ES6_POLYFILLS=!$jscomp.ISOLATE_POLYFILLS||$jscomp.IS_SYMBOL_NATIVE;$jscomp.polyfills={};$jscomp.propertyToPolyfillSymbol={};$jscomp.POLYFILL_PREFIX="$jscp$";var $jscomp$lookupPolyfilledValue=function(a,b){var c=$jscomp.propertyToPolyfillSymbol[b];if(null==c)return a[b];c=a[c];return void 0!==c?c:a[b]};
$jscomp.polyfill=function(a,b,c,e){b&&($jscomp.ISOLATE_POLYFILLS?$jscomp.polyfillIsolated(a,b,c,e):$jscomp.polyfillUnisolated(a,b,c,e))};$jscomp.polyfillUnisolated=function(a,b,c,e){c=$jscomp.global;a=a.split(".");for(e=0;e<a.length-1;e++){var d=a[e];if(!(d in c))return;c=c[d]}a=a[a.length-1];e=c[a];b=b(e);b!=e&&null!=b&&$jscomp.defineProperty(c,a,{configurable:!0,writable:!0,value:b})};
$jscomp.polyfillIsolated=function(a,b,c,e){var d=a.split(".");a=1===d.length;e=d[0];e=!a&&e in $jscomp.polyfills?$jscomp.polyfills:$jscomp.global;for(var f=0;f<d.length-1;f++){var l=d[f];if(!(l in e))return;e=e[l]}d=d[d.length-1];c=$jscomp.IS_SYMBOL_NATIVE&&"es6"===c?e[d]:null;b=b(c);null!=b&&(a?$jscomp.defineProperty($jscomp.polyfills,d,{configurable:!0,writable:!0,value:b}):b!==c&&($jscomp.propertyToPolyfillSymbol[d]=$jscomp.IS_SYMBOL_NATIVE?$jscomp.global.Symbol(d):$jscomp.POLYFILL_PREFIX+d,d=
$jscomp.propertyToPolyfillSymbol[d],$jscomp.defineProperty(e,d,{configurable:!0,writable:!0,value:b})))};$jscomp.polyfill("Array.prototype.find",function(a){return a?a:function(b,c){return $jscomp.findInternal(this,b,c).v}},"es6","es3");
(function(a){"function"===typeof define&&define.amd?define(["jquery","datatables.net"],function(b){return a(b,window,document)}):"object"===typeof exports?module.exports=function(b,c){b||(b=window);c&&c.fn.dataTable||(c=require("datatables.net")(b,c).$);return a(c,b,b.document)}:a(jQuery,window,document)})(function(a,b,c,e){var d=a.fn.dataTable;a.extend(!0,d.defaults,{dom:"<'row'<'col-sm-12 col-md-6'l><'col-sm-12 col-md-6'f>><'row'<'col-sm-12'tr>><'row'<'col-sm-12 col-md-5'i><'col-sm-12 col-md-7'p>>",
renderer:"bootstrap"});a.extend(d.ext.classes,{sWrapper:"dataTables_wrapper dt-bootstrap4",sFilterInput:"form-control form-control-sm",sLengthSelect:"custom-select custom-select-sm form-control form-control-sm",sProcessing:"dataTables_processing card",sPageButton:"paginate_button page-item"});d.ext.renderer.pageButton.bootstrap=function(f,l,A,B,m,t){var u=new d.Api(f),C=f.oClasses,n=f.oLanguage.oPaginate,D=f.oLanguage.oAria.paginate||{},h,k,v=0,y=function(q,w){var x,E=function(p){p.preventDefault();
a(p.currentTarget).hasClass("disabled")||u.page()==p.data.action||u.page(p.data.action).draw("page")};var r=0;for(x=w.length;r<x;r++){var g=w[r];if(Array.isArray(g))y(q,g);else{k=h="";switch(g){case "ellipsis":h="…";k="disabled";break;case "first":h=n.sFirst;k=g+(0<m?"":" disabled");break;case "previous":h=n.sPrevious;k=g+(0<m?"":" disabled");break;case "next":h=n.sNext;k=g+(m<t-1?"":" disabled");break;case "last":h=n.sLast;k=g+(m<t-1?"":" disabled");break;default:h=g+1,k=m===g?"active":""}if(h){var F=
a("<li>",{"class":C.sPageButton+" "+k,id:0===A&&"string"===typeof g?f.sTableId+"_"+g:null}).append(a("<a>",{href:"#","aria-controls":f.sTableId,"aria-label":D[g],"data-dt-idx":v,tabindex:f.iTabIndex,"class":"page-link"}).html(h)).appendTo(q);f.oApi._fnBindAction(F,{action:g},E);v++}}}};try{var z=a(l).find(c.activeElement).data("dt-idx")}catch(q){}y(a(l).empty().html('<ul class="pagination"/>').children("ul"),B);z!==e&&a(l).find("[data-dt-idx="+z+"]").trigger("focus")};return d}); | zensols.zotsite | /zensols.zotsite-0.8.1-py3-none-any.whl/zensols/zotsite/resources/site/lib/js/datatables.min.js | datatables.min.js |
# ZenSVI
This package handles downloading, cleaning, analyzing street view imagery
## Installation
```bash
$ pip install zensvi
```
## Usage
- TODO
## Contributing
Interested in contributing? Check out the contributing guidelines. Please note that this project is released with a Code of Conduct. By contributing to this project, you agree to abide by its terms.
## License
`zensvi` was created by koito19960406. It is licensed under the terms of the MIT license.
## Credits
`zensvi` was created with [`cookiecutter`](https://cookiecutter.readthedocs.io/en/latest/) and the `py-pkgs-cookiecutter` [template](https://github.com/py-pkgs/py-pkgs-cookiecutter).
| zensvi | /zensvi-0.8.2.tar.gz/zensvi-0.8.2/README.md | README.md |
The MIT License (MIT)
Copyright © 2021 Brown University, Providence, Rhode Island, USA
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
### Sub-licenses
Please note that some of the code in this repository has been copied from the [`pandas`](https://github.com/pandas-dev/pandas) project, and as such is licensed under the BSD 3-Clause License reproduced below:
BSD 3-Clause License
Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team
All rights reserved.
Copyright (c) 2011-2021, Open source contributors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | zentables | /ZenTables-0.2.4.tar.gz/ZenTables-0.2.4/LICENSE.md | LICENSE.md |
# ZenTables - Stress-Free Descriptive Tables in Python
`ZenTables` transforms your `pandas` DataFrames into beautiful, publishable tables in one line of code, which you can then transfer into Google Docs and other word processors with one click. Supercharge your workflow when you are writing papers and reports.
```python
import zentables as zen
df.zen.pretty()
```

## Features
* Beautiful tables in one line
* Google Docs/Word ready in one click
* Descriptive statistics at varying levels of aggregation
* Control table aesthetics globally
* and many more to come....
## Installation
Via `pip` from PyPI:
```sh
pip install zentables
```
Via `pip` from GitHub directly
```sh
pip install -U git+https://github.com/thepolicylab/ZenTables
```
## How to use `ZenTables`
### 1. How to format any `DataFrame`
First, import the package alongside `pandas`:
```python
import pandas as pd
import zentables as zen
```
Then, to format any `DataFrame`, simply use:
```python
df.zen.pretty()
```
And this is the result:

Click on the `Copy Table` button to transfer the table to Google Docs and Word. Formatting will be preserved.
Results in Google Docs (Tested on Chrome, Firefox, and Safari):

Results in Microsoft Word:

### 2. How to control my tables' appearance?
`ZenTables` provides two ways to control the aesthetics of the tables. You can use global settings to control the font and font size of the tables via:
```python
zen.set_options(font_family="Times New Roman, serif", font_size=12)
```
**Note:** When `font_size` is specified as an `int`, it will be interpreted as points (`pt`). All other CSS units are accepted as a `str`.
Or you can override any global options by specifying `font_family` and `font_size` in `zen.pretty()` method:
```python
df.zen.pretty(font_family="Times New Roman, serif", font_size="12pt")
```
Both will result in a table that looks like this

We are working on adding more customization options in a future release.
### 3. How to create common descriptive tables using `ZenTables`?
#### 3.1. Frequency Tables
Use `df.zen.freq_tables()` to create simple frequency tables:
```python
freq_table = df.zen.freq_table(
index=["Segment", "Region"],
columns=["Category"],
values="Order ID",
props="index",
totals=True,
subtotals=True,
totals_names="Total"
subtotals_names="Subtotal",
)
freq_table.zen.pretty() # You can also chain the methods
```
Use `props` to control whether to add percentages of counts. When `props` is not set (the default), no percentages will be added. You can also specify `props` to calculate percentages over `"index"` (rows), `"columns"`, or `"all"` (over the totals of the immediate top level).
Use `totals` and `subtotals` parameters to specify whether totals and subtotals will be added. Note that when `props` is not `None`, both `totals` and `subtotals` will be `True`, and when `subtotals` is set to `True`, this will also override `totals` settings to `True`.
Additionally, you can control the names of the total and subtotal categories using `totals_names` and `subtotals_names` parameters.

#### 3.2. Mean and standard deviation tables
Use `df.zen.mean_sd_table()` to create descriptives with n, mean, and standard deviation:
```python
mean_sd_table = df.zen.mean_sd_table(
index=["Segment", "Region"],
columns=["Category"],
values="Sales",
margins=True,
margins_name="All",
submargins=True,
submargins_name="All Regions",
)
mean_sd_table.zen.pretty() # You can also chain the methods
```
Similar to `freq_tables`, you can use `margins` and `submargins` parameters to specify whether aggregations at the top and intermediate levels will be added. Additionally, you can control the names of the total and subtotal categories using `margins_names` and `submargins_names` parameters.

#### 3.3 Other descriptive statistics tables
For all other types of tables, `ZenTables` provides its own `df.zen.pivot_table()` method:
```python
mean_median_table = df.zen.pivot_table(
index=["Segment", "Region"],
columns=["Category"],
values="Sales",
aggfunc=["count", "mean", "median"],
margins=True,
margins_name="All",
submargins=True,
submargins_name="All Regions",
).rename( # rename columns
columns={
"count": "n",
"mean": "Mean",
"median": "Median",
}
)
mean_median_table.zen.pretty().format(precision=1) # Specify level of precision
```
There are two differences between this `pivot_table()` method and the `pandas` `pivot_table` method. First, like `mean_sd_table()`, it provides `submargins` and `submargins_names` for creating intermediate-level aggregations. Second, results are grouped by `values`, `columns`, and `aggfuncs`, instead of `aggfuncs`, `values`, and `columns`. This provides more readability than what the `pandas` version provides.

### 4. Tips and tricks
1. `df.zen.pretty()` returns a subclass of `pandas` `Styler`, which means you can chain all other methods after `df.style`. `format()` in the previous section is an example. For more formatting options, please see [this page in `pandas` documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html)
2. All other methods in `ZenTables` returns a regular `DataFrame` that can be modified further.
3. The names of the index and columns are by default hidden. You can get them back by doing this:
```python
df.zen.pretty().show_index_names().show_column_names()
```
4. You can also disable the `Copy Table` button like this:
```python
df.zen.pretty().hide_copy_button()
```
## TODO
- [ ] More tests on compatibility with `Styler` in `pandas`
- [ ] More options for customization
- [ ] A theming system
- [ ] More to come...
## Contributing
Contributions are welcome, and they are greatly appreciated! If you have a new idea for a simple table that we should add, please submit an issue.
## Contributors
Principally written by Paul Xu at [The Policy Lab](https://thepolicylab.brown.edu). Other contributors:
* Kevin H. Wilson
* Edward Huh
## Special thanks
* All the members of [The Policy Lab](https://thepolicylab.brown.edu) at Brown University for their feedback
* The [`sidetable` package](https://github.com/chris1610/sidetable) for ideas and inspiration.
| zentables | /ZenTables-0.2.4.tar.gz/ZenTables-0.2.4/README.md | README.md |
========
Zentinel
========
.. image:: https://img.shields.io/pypi/v/zentinel.svg
:target: https://pypi.python.org/pypi/zentinel
.. image:: https://github.com/symonk/zentinel/actions/workflows/python-package.yml/badge.svg
:target: https://github.com/symonk/zentinel/actions
.. image:: https://readthedocs.org/projects/zentinel/badge/?version=latest
:target: https://zentinel.readthedocs.io/en/latest/
:alt: Documentation Status
.. image:: https://codecov.io/gh/symonk/zentinel/branch/master/graph/badge.svg?token=E7SVA868NR
:target: https://codecov.io/gh/symonk/zentinel
Zentinel
=========
Zentinel is a fully async TCP port scanner written on top of asyncio. It's sole purpose is for administrators
to test services that are exposed on their infrastructure with a simple python library. Zentinel is powerful
and highly configurable, by default it will complete full TCP connect scans on each of the ports, however to
perform a SYN -> ACK -> RST --half-open can be specified which will prevent a full 3 way handshake and connection.
Zentinel is lightning fast, compared to other multi threaded models.
----
Legal Disclaimer
-----------------
The use of code contained in this repository, either in part or in its totality, for engaging targets without prior mutual consent is illegal. It is the end user's responsibility to obey all applicable local, state and federal laws.
Developers assume no liability and are not responsible for misuses or damages caused by any code contained in this repository in any event that, accidentally or otherwise, it comes to be utilized by a threat agent or unauthorized entity as a means to compromise the security, privacy, confidentiality, integrity, and/or availability of systems and their associated resources by leveraging the exploitation of known or unknown vulnerabilities present in said systems, including, but not limited to, the implementation of security controls, human- or electronically-enabled.
The use of this code is only endorsed by the developers in those circumstances directly related to educational environments or authorized penetration testing engagements whose declared purpose is that of finding and mitigating vulnerabilities in systems, limiting their exposure to compromises and exploits employed by malicious agents as defined in their respective threat models.
| zentinel | /zentinel-0.0.3.tar.gz/zentinel-0.0.3/README.rst | README.rst |
import os
import sys
from zentool.lib.google_sheet import GoogleSheet
from .make_spreadsheet import MakeSpreadsheet
from .sync_spreadsheet import SyncSpreadsheet
from .issue_creator import IssueCreator
class SpreadsheetTools:
@classmethod
def configure(cls, subparsers):
spreadsheet_parser = subparsers.add_parser('spreadsheet',
description="Spreadsheet manipulation tools")
spreadsheet_parser.set_defaults(command='spreadsheet')
spreadsheet_parser.add_argument('spreadsheet_id', type=str)
subparsers = spreadsheet_parser.add_subparsers()
MakeSpreadsheet.configure(subparsers)
SyncSpreadsheet.configure(subparsers)
IssueCreator.configure(subparsers)
def __init__(self, combo):
self.combo = combo
self.sheet = None
self._check_google_auth_is_configured()
def run(self, args):
self.sheet = GoogleSheet(args.spreadsheet_id)
if args.subcommand == 'make':
MakeSpreadsheet(tools=self).run(args)
elif args.subcommand == 'sync':
SyncSpreadsheet(tools=self).run(args)
elif args.subcommand == 'create-issues':
IssueCreator(tools=self).run(args)
def cell_addr(self, row, col):
"""
row and col should be 1-based
"""
return f"{self.column_number_to_letter(col)}{row}"
@staticmethod
def column_number_to_letter(column_number):
"""
column_number should be 1-based
"""
return chr(65 + column_number - 1)
@staticmethod
def _check_google_auth_is_configured():
if not os.path.exists('credentials.json'):
sys.stderr.write("\nYou need to get credentials for accessing the Google Sheets API.\n"
"Go to https://developers.google.com/sheets/api/quickstart/python\n"
"click [ENABLE THE GOOGLE SHEETS API], then in the resulting dialog\n"
"click [DOWNLOAD CLIENT CONFIGURATION]\n"
"and save the file credentials.json to your working directory.\n\n")
exit(1) | zentool | /tools/spreadsheet_tools.py | spreadsheet_tools.py |
from colored import fg, bg, attr
from .spreadsheet_processor import SpreadsheetProcessor
from ..lib.sheet_range import SheetRange
class IssueCreator:
"""
usage: zentool -r <repo> spreadsheet <spreadsheet_id> create-issues
Create an issue for every 🛠 in tracking spreadsheet.
"""
@classmethod
def configure(cls, subparsers):
sync_parser = subparsers.add_parser('create-issues')
sync_parser.set_defaults(subcommand='create-issues')
sync_parser.add_argument('epic_id', type=str, nargs='?')
def __init__(self, tools):
self.tools = tools
def run(self, args):
SpreadsheetProcessor(tools=self.tools, row_processor_class=self.CreateRowIssues).run(args)
class CreateRowIssues:
@property
def sheet(self):
return self.sheet_processor.sheet
def __init__(self, sheet_processor):
self.sheet_processor = sheet_processor
self.row = None
self.row_number = None
self.epic = None
self.row_range = SheetRange()
def process(self, row, row_number):
self.row = row
self.row_number = row_number
self._find_and_update_epic(epic_number=row[0])
for map_entry in self.sheet_processor.repo_map.map.values():
try:
cell_value = self.row[map_entry.column - 1]
except IndexError:
cell_value = None
if cell_value == '🛠':
issue = map_entry.repo.create_issue(self.epic.title, self.epic.body)
print(f"\tCreated issue {issue}")
self.update_issue_in_sheet(issue, map_entry)
print("\tAdding issue to epic")
self.epic.add_issues([issue])
if not self.row_range.is_empty:
self.sheet.update_range(self.row_range)
def _find_and_update_epic(self, epic_number):
self.epic = self.sheet_processor.repo.epic(epic_number)
print(self.epic)
self.row_range['B', self.row_number] = self.epic.title
def update_issue_in_sheet(self, issue, map_entry):
colname = self.sheet_processor.tools.column_number_to_letter(map_entry.column)
link_to_issue = '=HYPERLINK("https://github.com/{repo}/issues/{issue}","{issue}")'.format(
repo=issue.repo.full_name, issue=issue.number)
self.row_range[colname, self.row_number] = link_to_issue
default_colors = {'red': 255, 'green': 255, 'blue': 200}
state_to_colors_map = {
'closed': {'red': 200, 'green': 255, 'blue': 200}
}
color = state_to_colors_map.get(issue.status, default_colors)
self.row_range[colname, self.row_number].bg = color | zentool | /tools/issue_creator.py | issue_creator.py |
from .repo_map import RepoMap
class SpreadsheetProcessor:
"""
Iterate through spreadsheet rows, calling the provided row processor
"""
REPO_HEADER_ROW = 2
REPO_HEADING_RANGE = "C2:Z"
DATA_START_ROW = 3 # all spreadsheet references are 1-based
def __init__(self, tools, row_processor_class):
self.args = None
self.tools = tools
self.repo = None
self.repo_map = RepoMap()
self.row_processor_class = row_processor_class
@property
def sheet(self):
return self.tools.sheet
def run(self, args):
self.args = args
self.repo = self.tools.combo.repo(args.repo_name)
self._check_sheet_matches_repo()
self._read_repo_headings()
self._process_rows()
def _check_sheet_matches_repo(self):
headings = self.sheet.get_cells("A1:B2")
assert headings[0][0] == 'Repo:', "Expected Cell A1 to contain the word 'Repo:'."
assert headings[0][1] == self.repo.full_name, (
f"Command line specified repo {self.repo.full_name} "
f"but spreadsheet is for repo {headings[0][1]}.")
assert headings[1][0] == 'Epic', "Expected cell A2 to contain the heading 'Epic'."
assert headings[1][1] == 'Description', "Expected cell B2 to contain the heading 'Description'."
def _read_repo_headings(self):
cells = self.sheet.get_cells(self.REPO_HEADING_RANGE)
if len(cells) == 0:
return
row = cells[0]
col_number = 2 # 0-based
for repo_name in row:
col_number += 1
if not self.repo_map.get_by_repo_name(repo_name):
repo = self.tools.combo.repo(repo_name)
self.repo_map.record(repo, col_number)
def _process_rows(self):
row_number = self.DATA_START_ROW - 1
sheet_data = self.sheet.get_cells(f"A{self.DATA_START_ROW}:Z9999")
for row in sheet_data:
row_number += 1
if self.args.epic_id and row[0] != self.args.epic_id:
continue
self.row_processor_class(sheet_processor=self).process(row, row_number) | zentool | /tools/spreadsheet_processor.py | spreadsheet_processor.py |
from colored import fg, bg, attr
from .spreadsheet_processor import SpreadsheetProcessor
from ..lib.sheet_range import SheetRange
class SyncSpreadsheet:
"""
usage: zentool -r <repo> spreadsheet <spreadsheet_id> sync
Retrieve issue status for epics
"""
@classmethod
def configure(cls, subparsers):
sync_parser = subparsers.add_parser('sync')
sync_parser.set_defaults(subcommand='sync')
sync_parser.add_argument('epic_id', type=str, nargs='?')
def __init__(self, tools):
self.tools = tools
def run(self, args):
SpreadsheetProcessor(tools=self.tools, row_processor_class=self.SyncRowIssues).run(args)
class SyncRowIssues:
@property
def sheet(self):
return self.sheet_processor.sheet
def __init__(self, sheet_processor):
self.sheet_processor = sheet_processor
self.row = None
self.row_number = None
self.epic = None
self.header_range = SheetRange()
self.row_range = SheetRange()
def process(self, row, row_number):
self.row = row
self.row_number = row_number
self._find_and_update_epic(epic_number=row[0])
for issue in self.epic.issues():
print(f"\t{issue}")
map_entry = self._find_or_create_column_for_repo(issue.repo)
self._update_issue_in_sheet(issue, map_entry)
if not self.header_range.is_empty:
self.sheet.update_range(self.header_range)
if not self.row_range.is_empty:
self.sheet.update_range(self.row_range)
def _find_and_update_epic(self, epic_number):
self.epic = self.sheet_processor.repo.epic(epic_number)
print(self.epic)
self.row_range['B', self.row_number] = self.epic.title
def _find_or_create_column_for_repo(self, repo):
map_entry = self.sheet_processor.repo_map.get_by_repo_name(repo.full_name)
if map_entry:
print(f"\t\t{repo.full_name} = column {map_entry.column}")
else:
map_entry = self.sheet_processor.repo_map.create_new(repo)
print(f"\t\tassigning column {map_entry.column} to {repo.full_name}")
column_letter = self.sheet_processor.tools.column_number_to_letter(map_entry.column)
self.header_range[column_letter, 2] = repo.full_name
return map_entry
def _update_issue_in_sheet(self, issue, map_entry):
colname = self.sheet_processor.tools.column_number_to_letter(map_entry.column)
link_to_issue = '=HYPERLINK("https://github.com/{repo}/issues/{issue}","{issue}")'.format(
repo=issue.repo.full_name, issue=issue.number)
self.row_range[colname, self.row_number] = link_to_issue
default_colors = {'red': 255, 'green': 255, 'blue': 200}
state_to_colors_map = {
'closed': {'red': 200, 'green': 255, 'blue': 200}
}
color = state_to_colors_map.get(issue.status, default_colors)
self.row_range[colname, self.row_number].bg = color | zentool | /tools/sync_spreadsheet.py | sync_spreadsheet.py |
========
Overview
========
.. start-badges
.. list-table::
:stub-columns: 1
* - docs
- |docs|
* - package
- | |version| |wheel| |supported-versions| |supported-implementations|
| |commits-since|
.. |docs| image:: https://readthedocs.org/projects/zentropi/badge/?style=flat
:target: https://readthedocs.org/projects/zentropi
:alt: Documentation Status
.. |version| image:: https://img.shields.io/pypi/v/zentropi.svg
:alt: PyPI Package latest release
:target: https://pypi.org/project/zentropi
.. |wheel| image:: https://img.shields.io/pypi/wheel/zentropi.svg
:alt: PyPI Wheel
:target: https://pypi.org/project/zentropi
.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/zentropi.svg
:alt: Supported versions
:target: https://pypi.org/project/zentropi
.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/zentropi.svg
:alt: Supported implementations
:target: https://pypi.org/project/zentropi
.. |commits-since| image:: https://img.shields.io/github/commits-since/zentropi/python-zentropi/v2020.0.1.svg
:alt: Commits since latest release
:target: https://github.com/zentropi/python-zentropi/compare/v2020.0.1...master
.. end-badges
Zentropi Agent Framework: Script Your World
* Free software: BSD 3-Clause License
Installation
============
::
pip install zentropi
You can also install the in-development version with::
pip install https://github.com/zentropi/python-zentropi/archive/master.zip
Documentation
=============
https://zentropi.readthedocs.io/
Development
===========
To run the all tests run::
tox
Note, to combine the coverage data from all the tox environments run:
.. list-table::
:widths: 10 90
:stub-columns: 1
- - Windows
- ::
set PYTEST_ADDOPTS=--cov-append
tox
- - Other
- ::
PYTEST_ADDOPTS=--cov-append tox
| zentropi | /zentropi-2020.0.1.tar.gz/zentropi-2020.0.1/README.rst | README.rst |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Bug reports
===========
When `reporting a bug <https://github.com/zentropi/python-zentropi/issues>`_ please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Documentation improvements
==========================
zentropi could always use more documentation, whether as part of the
official zentropi docs, in docstrings, or even on the web in blog posts,
articles, and such.
Feature requests and feedback
=============================
The best way to send feedback is to file an issue at https://github.com/zentropi/python-zentropi/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that code contributions are welcome :)
Development
===========
To set up `python-zentropi` for local development:
1. Fork `python-zentropi <https://github.com/zentropi/python-zentropi>`_
(look for the "Fork" button).
2. Clone your fork locally::
git clone [email protected]:zentropi/python-zentropi.git
3. Create a branch for local development::
git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
4. When you're done making changes run all the checks and docs builder with `tox <https://tox.readthedocs.io/en/latest/install.html>`_ one command::
tox
5. Commit your changes and push your branch to GitHub::
git add .
git commit -m "Your detailed description of your changes."
git push origin name-of-your-bugfix-or-feature
6. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
If you need some code review or feedback while you're developing the code just make the pull request.
For merging, you should:
1. Include passing tests (run ``tox``).
2. Update documentation when there's new API, functionality etc.
3. Add a note to ``CHANGELOG.rst`` about the changes.
4. Add yourself to ``AUTHORS.rst``.
Tips
----
To run a subset of tests::
tox -e envname -- pytest -k test_myfeature
To run all the test environments in *parallel* (you need to ``pip install detox``)::
detox
| zentropi | /zentropi-2020.0.1.tar.gz/zentropi-2020.0.1/CONTRIBUTING.rst | CONTRIBUTING.rst |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import subprocess
import sys
from os.path import abspath
from os.path import dirname
from os.path import exists
from os.path import join
base_path = dirname(dirname(abspath(__file__)))
def check_call(args):
print("+", *args)
subprocess.check_call(args)
def exec_in_env():
env_path = join(base_path, ".tox", "bootstrap")
if sys.platform == "win32":
bin_path = join(env_path, "Scripts")
else:
bin_path = join(env_path, "bin")
if not exists(env_path):
import subprocess
print("Making bootstrap env in: {0} ...".format(env_path))
try:
check_call([sys.executable, "-m", "venv", env_path])
except subprocess.CalledProcessError:
try:
check_call([sys.executable, "-m", "virtualenv", env_path])
except subprocess.CalledProcessError:
check_call(["virtualenv", env_path])
print("Installing `jinja2` into bootstrap environment...")
check_call([join(bin_path, "pip"), "install", "jinja2", "tox"])
python_executable = join(bin_path, "python")
if not os.path.exists(python_executable):
python_executable += '.exe'
print("Re-executing with: {0}".format(python_executable))
print("+ exec", python_executable, __file__, "--no-env")
os.execv(python_executable, [python_executable, __file__, "--no-env"])
def main():
import jinja2
print("Project path: {0}".format(base_path))
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True
)
tox_environments = [
line.strip()
# 'tox' need not be installed globally, but must be importable
# by the Python that is running this script.
# This uses sys.executable the same way that the call in
# cookiecutter-pylibrary/hooks/post_gen_project.py
# invokes this bootstrap.py itself.
for line in subprocess.check_output([sys.executable, '-m', 'tox', '--listenvs'], universal_newlines=True).splitlines()
]
tox_environments = [line for line in tox_environments if line.startswith('py')]
for name in os.listdir(join("ci", "templates")):
with open(join(base_path, name), "w") as fh:
fh.write(jinja.get_template(name).render(tox_environments=tox_environments))
print("Wrote {}".format(name))
print("DONE.")
if __name__ == "__main__":
args = sys.argv[1:]
if args == ["--no-env"]:
main()
elif not args:
exec_in_env()
else:
print("Unexpected arguments {0}".format(args), file=sys.stderr)
sys.exit(1) | zentropi | /zentropi-2020.0.1.tar.gz/zentropi-2020.0.1/ci/bootstrap.py | bootstrap.py |
[](https://gitlab.django-creation.net/zentux/zentuxlog-client/commits/master)
[](https://gitlab.django-creation.net/zentux/zentuxlog-client/commits/master)
# Utilisation
## Dans un script, une classe, un module, ...
```python
from zentuxlog_client.client import Client
APIKEY = "hkhfds56dfsdfjhdjk"
APISECRET = "KAP0dika43iH7"
USERNAME = "John"
PASSWORD = "Mypass
auth = {
'client_id': APIKEY,
'client_secret': APISECRET,
'username': USERNAME,
'password': PASSWORD
}
c = Client(auth=auth)
c.send(data="information à logguer", method="POST", path="logs/")
```
## Dans une exception personnalisée
```python
from zentuxlog_client.client import Client
APIKEY = "hkhfds56dfsdfjhdjk"
APISECRET = "KAP0dika43iH7"
USERNAME = "John"
PASSWORD = "Mypass"
auth = {
'client_id': APIKEY,
'client_secret': APISECRET,
'username': USERNAME,
'password': PASSWORD
}
c = Client(auth=auth)
class MyCustomError(Exception):
"""Erreur générique."""
def __init__(self, msg=''):
self.msg = msg
if msg:
c.send(msg, method="POST", path="logs/")
def __str__(self):
return self.msg
```
| zentuxlog-client | /zentuxlog-client-1.0.3.tar.gz/zentuxlog-client-1.0.3/README.md | README.md |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.